From 869a1ab41a7c817e3f5f9bb5c74a93b68e5d2af4 Mon Sep 17 00:00:00 2001 From: sdeka Date: Sat, 18 May 2019 16:16:33 +0530 Subject: [PATCH 0001/1308] HDDS-1535. Space tracking for Open Containers : Handle Node Startup. Contributed by Supratim Deka --- .../container/ozoneimpl/ContainerReader.java | 31 +++++++ .../ozoneimpl/TestOzoneContainer.java | 91 ++++++++++++++++++- 2 files changed, 117 insertions(+), 5 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 0192fd5dd1b57..d704bb7b5b667 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -27,11 +27,14 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Storage; +import org.apache.hadoop.ozone.container.common.helpers.BlockData; +import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; @@ -45,6 +48,7 @@ import java.io.File; import java.io.FileFilter; import java.io.IOException; +import java.util.List; /** * Class used to read .container files from Volume and build container map. @@ -201,6 +205,11 @@ public void verifyContainerData(ContainerData containerData) kvContainerData .updateBlockCommitSequenceId(Longs.fromByteArray(bcsId)); } + if (kvContainer.getContainerState() + == ContainerProtos.ContainerDataProto.State.OPEN) { + // commitSpace for Open Containers relies on usedBytes + initializeUsedBytes(kvContainer); + } containerSet.addContainer(kvContainer); } else { throw new StorageContainerException("Container File is corrupted. " + @@ -215,4 +224,26 @@ public void verifyContainerData(ContainerData containerData) ContainerProtos.Result.UNKNOWN_CONTAINER_TYPE); } } + + private void initializeUsedBytes(KeyValueContainer container) throws IOException { + KeyValueBlockIterator blockIter = new KeyValueBlockIterator( + container.getContainerData().getContainerID(), + new File(container.getContainerData().getContainerPath())); + long usedBytes = 0; + + while (blockIter.hasNext()) { + BlockData block = blockIter.nextBlock(); + long blockLen = 0; + + List chunkInfoList = block.getChunks(); + for (ContainerProtos.ChunkInfo chunk : chunkInfoList) { + ChunkInfo info = ChunkInfo.getFromProtoBuf(chunk); + blockLen += info.getLen(); + } + + usedBytes += blockLen; + } + + container.getContainerData().setBytesUsed(usedBytes); + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 003f26e7a4902..198885d0e6dcb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -19,11 +19,17 @@ package org.apache.hadoop.ozone.container.ozoneimpl; +import com.google.common.base.Preconditions; +import com.google.common.primitives.Longs; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.container.common.helpers.BlockData; +import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -32,6 +38,8 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.utils.MetadataStore; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -40,7 +48,9 @@ import java.util.Random; import java.util.UUID; - +import java.util.HashMap; +import java.util.List; +import java.util.ArrayList; import static org.junit.Assert.assertEquals; @@ -52,7 +62,6 @@ public class TestOzoneContainer { @Rule public TemporaryFolder folder = new TemporaryFolder(); - private OzoneConfiguration conf; private String scmId = UUID.randomUUID().toString(); private VolumeSet volumeSet; @@ -60,6 +69,8 @@ public class TestOzoneContainer { private KeyValueContainerData keyValueContainerData; private KeyValueContainer keyValueContainer; private final DatanodeDetails datanodeDetails = createDatanodeDetails(); + private HashMap commitSpaceMap; //RootDir -> committed space + private final int NUM_TEST_CONTAINERS = 10; @Before public void setUp() throws Exception { @@ -68,6 +79,7 @@ public void setUp() throws Exception { .getAbsolutePath()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().getAbsolutePath()); + commitSpaceMap = new HashMap(); } @Test @@ -78,16 +90,32 @@ public void testBuildContainerMap() throws Exception { // Format the volumes for (HddsVolume volume : volumeSet.getVolumesList()) { volume.format(UUID.randomUUID().toString()); + commitSpaceMap.put(getVolumeKey(volume), Long.valueOf(0)); } // Add containers to disk - for (int i=0; i<10; i++) { + for (int i=0; i < NUM_TEST_CONTAINERS; i++) { + long freeBytes = 0; + long volCommitBytes; + long maxCap = (long) StorageUnit.GB.toBytes(1); + + HddsVolume myVolume; + keyValueContainerData = new KeyValueContainerData(i, - (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), + maxCap, UUID.randomUUID().toString(), datanodeDetails.getUuidString()); keyValueContainer = new KeyValueContainer( keyValueContainerData, conf); keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); + myVolume = keyValueContainer.getContainerData().getVolume(); + + freeBytes = addBlocks(keyValueContainer, 2, 3); + + // update our expectation of volume committed space in the map + volCommitBytes = commitSpaceMap.get(getVolumeKey(myVolume)).longValue(); + Preconditions.checkState(freeBytes >= 0); + commitSpaceMap.put(getVolumeKey(myVolume), + Long.valueOf(volCommitBytes + freeBytes)); } DatanodeStateMachine stateMachine = Mockito.mock( @@ -97,12 +125,65 @@ public void testBuildContainerMap() throws Exception { Mockito.when(context.getParent()).thenReturn(stateMachine); // When OzoneContainer is started, the containers from disk should be // loaded into the containerSet. + // Also expected to initialize committed space for each volume. OzoneContainer ozoneContainer = new OzoneContainer(datanodeDetails, conf, context, null); + ContainerSet containerset = ozoneContainer.getContainerSet(); - assertEquals(10, containerset.containerCount()); + assertEquals(NUM_TEST_CONTAINERS, containerset.containerCount()); + + verifyCommittedSpace(ozoneContainer); } + //verify committed space on each volume + private void verifyCommittedSpace(OzoneContainer oc) { + for (HddsVolume dnVol : oc.getVolumeSet().getVolumesList()) { + String key = getVolumeKey(dnVol); + long expectedCommit = commitSpaceMap.get(key).longValue(); + long volumeCommitted = dnVol.getCommittedBytes(); + assertEquals("Volume committed space not initialized correctly", + expectedCommit, volumeCommitted); + } + } + + private long addBlocks(KeyValueContainer container, + int blocks, int chunksPerBlock) throws Exception { + String strBlock = "block"; + String strChunk = "-chunkFile"; + int datalen = 65536; + long usedBytes = 0; + + long freeBytes = container.getContainerData().getMaxSize(); + long containerId = container.getContainerData().getContainerID(); + MetadataStore metadataStore = BlockUtils.getDB(container + .getContainerData(), conf); + + for (int bi = 0; bi < blocks; bi++) { + // Creating BlockData + BlockID blockID = new BlockID(containerId, bi); + BlockData blockData = new BlockData(blockID); + List chunkList = new ArrayList<>(); + + chunkList.clear(); + for (int ci = 0; ci < chunksPerBlock; ci++) { + String chunkName = strBlock + bi + strChunk + ci; + long offset = ci * datalen; + ChunkInfo info = new ChunkInfo(chunkName, offset, datalen); + usedBytes += datalen; + chunkList.add(info.getProtoBufMessage()); + } + blockData.setChunks(chunkList); + metadataStore.put(Longs.toByteArray(blockID.getLocalID()), + blockData.getProtoBufMessage().toByteArray()); + } + + // remaining available capacity of the container + return (freeBytes - usedBytes); + } + + private String getVolumeKey(HddsVolume volume) { + return volume.getHddsRootDir().getPath(); + } private DatanodeDetails createDatanodeDetails() { Random random = new Random(); From 64c39856d2d3af762ab508c68057ce6eb654bd75 Mon Sep 17 00:00:00 2001 From: sdeka Date: Mon, 20 May 2019 08:13:37 +0530 Subject: [PATCH 0002/1308] Fixed checkstyle issues. --- .../hadoop/ozone/container/ozoneimpl/ContainerReader.java | 3 ++- .../ozone/container/ozoneimpl/TestOzoneContainer.java | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index d704bb7b5b667..08a8f5d47f683 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -225,7 +225,8 @@ public void verifyContainerData(ContainerData containerData) } } - private void initializeUsedBytes(KeyValueContainer container) throws IOException { + private void initializeUsedBytes(KeyValueContainer container) + throws IOException { KeyValueBlockIterator blockIter = new KeyValueBlockIterator( container.getContainerData().getContainerID(), new File(container.getContainerData().getContainerPath())); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 198885d0e6dcb..7cdb692597e3b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -70,7 +70,7 @@ public class TestOzoneContainer { private KeyValueContainer keyValueContainer; private final DatanodeDetails datanodeDetails = createDatanodeDetails(); private HashMap commitSpaceMap; //RootDir -> committed space - private final int NUM_TEST_CONTAINERS = 10; + private final int numTestContainers = 10; @Before public void setUp() throws Exception { @@ -94,7 +94,7 @@ public void testBuildContainerMap() throws Exception { } // Add containers to disk - for (int i=0; i < NUM_TEST_CONTAINERS; i++) { + for (int i = 0; i < numTestContainers; i++) { long freeBytes = 0; long volCommitBytes; long maxCap = (long) StorageUnit.GB.toBytes(1); @@ -130,7 +130,7 @@ public void testBuildContainerMap() throws Exception { OzoneContainer(datanodeDetails, conf, context, null); ContainerSet containerset = ozoneContainer.getContainerSet(); - assertEquals(NUM_TEST_CONTAINERS, containerset.containerCount()); + assertEquals(numTestContainers, containerset.containerCount()); verifyCommittedSpace(ozoneContainer); } From 24c53e057a237f78e1433d724df5ffe7961579a5 Mon Sep 17 00:00:00 2001 From: Sunil G Date: Mon, 20 May 2019 10:53:01 -0400 Subject: [PATCH 0003/1308] YARN-9546. Add configuration option for YARN Native services AM classpath. Contributed by Gergely Pollak. --- .../yarn/service/client/ServiceClient.java | 12 ++++++---- .../yarn/service/conf/YarnServiceConf.java | 2 ++ .../yarn/service/utils/ServiceUtils.java | 7 ++++++ .../service/client/TestServiceClient.java | 23 +++++++++++++++++++ .../src/main/resources/yarn-default.xml | 10 ++++++++ 5 files changed, 50 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index 08352a88c798e..030e0dbf848da 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -1234,11 +1234,15 @@ private String buildCommandLine(Service app, Configuration conf, return cmdStr; } - private Map addAMEnv() throws IOException { + @VisibleForTesting + protected Map addAMEnv() throws IOException { Map env = new HashMap<>(); - ClasspathConstructor classpath = - buildClasspath(YarnServiceConstants.SUBMITTED_CONF_DIR, "lib", fs, getConfig() - .getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)); + ClasspathConstructor classpath = buildClasspath( + YarnServiceConstants.SUBMITTED_CONF_DIR, + "lib", + fs, + getConfig().get(YarnServiceConf.YARN_SERVICE_CLASSPATH, ""), + getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)); env.put("CLASSPATH", classpath.buildClasspath()); env.put("LANG", "en_US.UTF-8"); env.put("LC_ALL", "en_US.UTF-8"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java index b9568f23fd18c..58fe70b4f9bd7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java @@ -60,6 +60,8 @@ public class YarnServiceConf { public static final String ROLLING_LOG_INCLUSION_PATTERN = "yarn.service.rolling-log.include-pattern"; public static final String ROLLING_LOG_EXCLUSION_PATTERN = "yarn.service.rolling-log.exclude-pattern"; + public static final String YARN_SERVICE_CLASSPATH = "yarn.service.classpath"; + public static final String YARN_SERVICES_SYSTEM_SERVICE_DIRECTORY = YARN_SERVICE_PREFIX + "system-service.dir"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java index a76b64d3cf036..34d2ba3bb830a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java @@ -451,6 +451,7 @@ public static Path createLocalPath(File file) { * @param sliderConfDir relative path to the dir containing slider config * options to put on the classpath -or null * @param libdir directory containing the JAR files + * @param configClassPath extra class path configured in yarn-site.xml * @param usingMiniMRCluster flag to indicate the MiniMR cluster is in use * (and hence the current classpath should be used, not anything built up) * @return a classpath @@ -458,6 +459,7 @@ public static Path createLocalPath(File file) { public static ClasspathConstructor buildClasspath(String sliderConfDir, String libdir, SliderFileSystem sliderFileSystem, + String configClassPath, boolean usingMiniMRCluster) { ClasspathConstructor classpath = new ClasspathConstructor(); @@ -479,6 +481,11 @@ public static ClasspathConstructor buildClasspath(String sliderConfDir, classpath.addRemoteClasspathEnvVar(); classpath.append(ApplicationConstants.Environment.HADOOP_CONF_DIR.$$()); } + + if (!configClassPath.isEmpty()) { + classpath.appendAll(Arrays.asList(configClassPath.split(","))); + } + return classpath; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java index 4527da433ce2d..c66c4aedf89f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java @@ -76,6 +76,29 @@ public class TestServiceClient { public ServiceTestUtils.ServiceFSWatcher rule = new ServiceTestUtils.ServiceFSWatcher(); + @Test + public void testAMEnvCustomClasspath() throws Exception { + Service service = createService(); + service.getComponents().forEach(comp -> + comp.setRestartPolicy(Component.RestartPolicyEnum.NEVER)); + ServiceClient client = MockServiceClient.create(rule, service, true); + //saving the original value of the param, for restoration purposes + String oldParam = client.getConfig().get("yarn.service.classpath", ""); + String originalPath = client.addAMEnv().get("CLASSPATH"); + + client.getConfig().set("yarn.service.classpath", "{{VAR_1}},{{VAR_2}}"); + String newPath = client.addAMEnv().get("CLASSPATH"); + + Assert.assertEquals(originalPath + "{{VAR_1}}{{VAR_2}}", newPath); + //restoring the original value for service classpath + client.getConfig().set("yarn.service.classpath", oldParam); + + newPath = client.addAMEnv().get("CLASSPATH"); + Assert.assertEquals(originalPath, newPath); + + client.stop(); + } + @Test public void testUpgradeDisabledByDefault() throws Exception { Service service = createService(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 9741f6c36b1da..87c2f132ea722 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -4211,4 +4211,14 @@ yarn.resourcemanager.activities-manager.app-activities.max-queue-length 1000 + + + + Comma separated extra class path parameters for yarn services AM. + These path elements will be appended to the end of the YARN service AM + classpath. + + yarn.service.classpath + + From f63300228e27d1b8170e4d85538d8011484f1448 Mon Sep 17 00:00:00 2001 From: avijayanhwx <14299376+avijayanhwx@users.noreply.github.com> Date: Mon, 20 May 2019 13:02:56 -0700 Subject: [PATCH 0004/1308] HDDS-1451 : SCMBlockManager findPipeline and createPipeline are not lock protected. (#799) * HDDS-1451 : SCMBlockManager findPipeline and createPipeline are not lock protected. * HDDS-1451 : Address review comments. --- .../hdds/scm/block/BlockManagerImpl.java | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index 31d82f633b6ee..1ffd01d30c559 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -182,18 +182,27 @@ public AllocatedBlock allocateBlock(final long size, ReplicationType type, pipelineManager .getPipelines(type, factor, Pipeline.PipelineState.OPEN, excludeList.getDatanodes(), excludeList.getPipelineIds()); - Pipeline pipeline; + Pipeline pipeline = null; if (availablePipelines.size() == 0) { try { // TODO: #CLUTIL Remove creation logic when all replication types and // factors are handled by pipeline creator pipeline = pipelineManager.createPipeline(type, factor); } catch (IOException e) { - LOG.error("Pipeline creation failed for type:{} factor:{}", - type, factor, e); - break; + LOG.warn("Pipeline creation failed for type:{} factor:{}. Retrying " + + "get pipelines call once.", type, factor, e); + availablePipelines = pipelineManager + .getPipelines(type, factor, Pipeline.PipelineState.OPEN, + excludeList.getDatanodes(), excludeList.getPipelineIds()); + if (availablePipelines.size() == 0) { + LOG.info("Could not find available pipeline of type:{} and " + + "factor:{} even after retrying", type, factor); + break; + } } - } else { + } + + if (null == pipeline) { // TODO: #CLUTIL Make the selection policy driven. pipeline = availablePipelines .get((int) (Math.random() * availablePipelines.size())); From 05db2a598e7c2d30899c3e0d69a0d2ade7ec512f Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Mon, 20 May 2019 13:09:58 -0700 Subject: [PATCH 0005/1308] HDDS-1487. Bootstrap React framework for Recon UI (#831) HDDS-1487. Bootstrap React framework for Recon UI. Contributed by Vivek Ratnavel Subramanian. --- hadoop-ozone/ozone-recon/pom.xml | 90 +- .../main/resources/webapps/recon/index.html | 59 - .../webapps/recon/ozone-recon-web/.gitignore | 23 + .../webapps/recon/ozone-recon-web/README.md | 45 + .../recon/ozone-recon-web/config-overrides.js | 33 + .../recon/ozone-recon-web/package.json | 45 + .../recon/ozone-recon-web/public/favicon.ico | Bin 0 -> 17470 bytes .../recon/ozone-recon-web/public/index.html | 56 + .../ozone-recon-web/public/manifest.json | 15 + .../recon/ozone-recon-web/src/App.less | 49 + .../recon/ozone-recon-web/src/App.test.tsx | 27 + .../webapps/recon/ozone-recon-web/src/App.tsx | 88 + .../components/Breadcrumbs/Breadcrumbs.tsx | 58 + .../src/components/NavBar/NavBar.less | 41 + .../src/components/NavBar/NavBar.tsx | 67 + .../src/constants/breadcrumbs.constants.tsx | 26 + .../recon/ozone-recon-web/src/index.less | 32 + .../recon/ozone-recon-web/src/index.tsx | 30 + .../recon/ozone-recon-web/src/logo.png | Bin 0 -> 22480 bytes .../src/makeRouteWithSubRoutes.tsx | 32 + .../ozone-recon-web/src/react-app-env.d.ts | 18 + .../recon/ozone-recon-web/src/routes.tsx | 37 + .../ozone-recon-web/src/routes.types.tsx | 23 + .../ozone-recon-web/src/serviceWorker.ts | 161 + .../ContainerBrowser/ContainerBrowser.tsx | 33 + .../src/views/Dashboard/Dashboard.tsx | 32 + .../src/views/NotFound/NotFound.tsx | 29 + .../recon/ozone-recon-web/tsconfig.json | 27 + .../webapps/recon/ozone-recon-web/yarn.lock | 9638 +++++++++++++++++ hadoop-ozone/pom.xml | 3 + 30 files changed, 10744 insertions(+), 73 deletions(-) delete mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/index.html create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/.gitignore create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/README.md create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/config-overrides.js create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/package.json create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/public/favicon.ico create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/public/index.html create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/public/manifest.json create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.test.tsx create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.tsx create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/Breadcrumbs/Breadcrumbs.tsx create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.less create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.tsx create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.less create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.tsx create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/logo.png create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/makeRouteWithSubRoutes.tsx create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/react-app-env.d.ts create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.tsx create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.types.tsx create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/serviceWorker.ts create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/ContainerBrowser/ContainerBrowser.tsx create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Dashboard/Dashboard.tsx create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/NotFound/NotFound.tsx create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/tsconfig.json create mode 100644 hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/yarn.lock diff --git a/hadoop-ozone/ozone-recon/pom.xml b/hadoop-ozone/ozone-recon/pom.xml index 03a4c48cc7f73..e84c2dc2e06f5 100644 --- a/hadoop-ozone/ozone-recon/pom.xml +++ b/hadoop-ozone/ozone-recon/pom.xml @@ -75,27 +75,84 @@ ${basedir}/dev-support/findbugsExcludeFile.xml + + + com.github.eirslett + frontend-maven-plugin + 1.6 + + target + ${basedir}/src/main/resources/webapps/recon/ozone-recon-web + + + + Install node and yarn locally to the project + + install-node-and-yarn + + + v12.1.0 + v1.9.2 + + + + yarn install + + yarn + + + install + + + + Build frontend + + yarn + + + run build + + + + org.apache.maven.plugins - maven-dependency-plugin + maven-resources-plugin - copy-common-html - prepare-package + Copy frontend build to target + process-resources - unpack + copy-resources - - - org.apache.hadoop - hadoop-hdds-server-framework - ${project.build.outputDirectory} - - webapps/static/**/*.* - - - true + ${project.build.outputDirectory}/webapps/recon + + + ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build + true + + + + + + Copy frontend static files to target + process-resources + + copy-resources + + + ${project.build.outputDirectory}/webapps/static + + + ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build/static + true + + @@ -261,5 +318,10 @@ spring-jdbc ${spring.version} + + javax.activation + activation + 1.1.1 + \ No newline at end of file diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/index.html b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/index.html deleted file mode 100644 index 800071661d1e0..0000000000000 --- a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/index.html +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - - - - - Ozone Recon - - - - - - - - - - - -
-

Container Id Mapping

-
- - - - - diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/.gitignore b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/.gitignore new file mode 100644 index 0000000000000..4d29575de8048 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/.gitignore @@ -0,0 +1,23 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.js + +# testing +/coverage + +# production +/build + +# misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/README.md b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/README.md new file mode 100644 index 0000000000000..d555ccd4bdf1f --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/README.md @@ -0,0 +1,45 @@ + + +This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app). + +## Available Scripts + +In the project directory, you can run: + +### `yarn start` + +Runs the app in the development mode.
+Open [http://localhost:3000](http://localhost:3000) to view it in the browser. + +The page will reload if you make edits.
+You will also see any lint errors in the console. + +### `yarn test` + +Launches the test runner in the interactive watch mode.
+See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information. + +### `yarn run build` + +Builds the app for production to the `build` folder.
+It correctly bundles React in production mode and optimizes the build for the best performance. + +The build is minified and the filenames include the hashes.
+Your app is ready to be deployed! + +See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information. diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/config-overrides.js b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/config-overrides.js new file mode 100644 index 0000000000000..d29b530256801 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/config-overrides.js @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +const { override, fixBabelImports, addLessLoader} = require('customize-cra'); + +module.exports = override( + fixBabelImports('import', { + libraryName: 'antd', + libraryDirectory: 'es', + style: true + }), + addLessLoader({ + javascriptEnabled: true, + modifyVars: { + '@primary-color': '#1DA57A' + } + }) +); diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/package.json new file mode 100644 index 0000000000000..0a6dc1a29cd9c --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/package.json @@ -0,0 +1,45 @@ +{ + "name": "ozone-recon", + "version": "0.1.0", + "private": true, + "dependencies": { + "@types/jest": "24.0.12", + "@types/node": "11.13.9", + "@types/react": "16.8.15", + "@types/react-dom": "16.8.4", + "@types/react-router-dom": "^4.3.3", + "antd": "^3.16.6", + "babel-plugin-import": "^1.11.0", + "classnames": "^2.2.6", + "customize-cra": "^0.2.12", + "less": "^3.9.0", + "less-loader": "^5.0.0", + "react": "^16.8.6", + "react-app-rewired": "^2.1.3", + "react-dom": "^16.8.6", + "react-router-dom": "^5.0.0", + "react-scripts": "3.0.0", + "typescript": "3.4.5" + }, + "scripts": { + "start": "react-app-rewired start", + "build": "react-app-rewired build", + "test": "react-app-rewired test", + "eject": "react-scripts eject" + }, + "eslintConfig": { + "extends": "react-app" + }, + "browserslist": { + "production": [ + ">0.2%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 1 chrome version", + "last 1 firefox version", + "last 1 safari version" + ] + } +} diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/public/favicon.ico b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/public/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..df12210781df8596dffad422ee5513a9df8807e2 GIT binary patch literal 17470 zcmeHN2YgmVw*S!8RZ&-cmPHWjvX)guMU+lDq>uyxXy}9{MF9l_q)L@0(qu)t^pZ|O z3Pn;Nfj|(P(w&w}#vW+E3zb^k(-DVT0ssN}~P*CuQ^{iTUcJ`fYSF7sX-&&v0 zE;O-jL>M}D?AY7ncX)XCJ#7Dh&EGs9NVlSy_4|Z$!fV0^N-haVaW@lZ~VK`0>i5-fOJI5fmD34v3`q^inDNibP zAt)&5cJUeVjleI(+8RgJ;8*1b@eto9Jg7@MP~34#H`rF%?1!!$hH08WxQ!R6w`i#26D z=feJ9RW3OH9fA*Q-)m+0@K_fB+n2WTFAprn)sky=_J7F7ckJA`vj*E891h1~(wBMS z#EH`V`}ZR`IT>MLVfgB+uP|%YEKHd)1=FTY!@PO(uxiyR#KpxSEiDaMSy{NmQd(M? zz;VVA-sZSU53TusH9im*EeAbwOY_iRl?!ZN+3am1JK^w^qkN2{i+LAM?Af~)n>TO9 zz<~qt`s=TQcpC(}1rF9Xg7Y)e&(9Cjr%%VX-+l}7&!Bvya_sjRi~gD4s>0vDSikgG zKd0urng?SJUHA^RCAExaRyH$j%WX#3@hIdM7hvA(c|xE4PY*p{v}n=7B*4wh4N^C6 z-W<)EHMP*`GZXpw`DZ!*ILerc&)HV?_2=OK zOWw8SQ}iFpIaZ6mbCPrRhzuAW>pH-;)MoTc7-pU;JddpXnP}Xkk*q!3-QCfoNfR_| z*bt2xHB!*HapTIi^vN>~@>}@e>guZA;TTGO`u6RMjEoFWuTD|FdTLsT$A4TFIES+i zzC*i}yJpR59RL2f8GR%Mwna90Z|ICatZal@8(fieW)En`Fn9JBiU;-IsIT}SbPF#u z$Zy@J+fqC8MDyaQr=AkIu#X%$5;-|JAP=IB9zFVu@WJ9irR~+c`FGS#{1x9e_uRR2 z_~@gLlwNzc^2X)Mm$7XBH?S?PhL#(gd z2tOPm4_}D-W=p852VtC+*rM z2@YB|MSf-4wX4=3uke!5WJ5b!_#?6YIYw%e|LgeYwr_Hr%*W&TVA2&#&A$-13 z76c!ezgDeUCUG&u7L}Kmhsc;5<6rU%`a9P1WGy(9TK5}i7y8APz4+pb<~{e^0q4{4O*_gW z?!W(jhm0?>uGd}9ZOOe65)uOO2`ozqk6H6>`u21h<-oJNb?9JtY#sp5E$_o8^h2}` z9SYmV?wFaj(ve?QDBq#5s3?tV*I46hT7`ZD@33JA3Lk}+HuXohglXo{%O_?3AodH?iKzz; z98h~oYyYIz<4=4gRQ%6F4?X0NwXcc?Vkd}$o&E~kZ+&6W^H1pk$5-#MUa))jZZ(eZ zRIk6xDfC;qZb*zyoXhyWr!~*r7S56<6h1!2yurz@4s5_(>$*7FgpW|P`-XiC-|&%W z7daLlp&#R|h+#-LmF6%EQ|UxOVZkAe+d{^W!g17iR(s*`v&qPiO8qm`0A^# zn)D6g6HKm6L-|hf3R2&ya1OopuTt*qGEV*a^-V4Fns!Ui%mMue;M%oodAV0|U%l0s z(oZ^SUoECJ-~FKt0VS#gCdl$L@r z`1uX8pyZgiF0|~DUF?Cji6WmOrxuP5*@MxiL}IiB?d!lwv| znuw0kQ}Np7q4;3ue4NTZZ@^I@3*^BOg1e@HV`)5T4wO%}i!N#z&~{32eeSvEOtDK` z16S+&DrJCU>b0-Kt#^Uw&}*-~X3AIsdhLbR9LGF*3L(jP|JBZTfrgeion3Oa8644z?Q;1)Rs z-=01|4{6#>!$4`7l7+eS@7hPmde#&hHEh^0JoC&mhOCw5fzYmHK-Q9FjYWTzF#(@z zo{rqJhx<9&59hU)afK%jKm4#EISa{aaPF4*0SF@le^eo zO6g($TWT+R-IPA|dMW+wjrI&OUDAf3{r>Ujo;D4^J0>G2dJ@`3OhLP-&(LA(3pJ|2F28*H0g z%Y)*FAZX`D2u>M^Zu`a}Bx3@49hgLzg5C$Fp=ZW4gdCiX-dVHQo{ervvk)9N3-3f# zfbgLm^+4p}T|%9W?_(JC;Y@xms1NqC@-o@;!EU!tKY#wb@)4Xr`O7cAtmJ{LmC!GG zEB=9aIwYsW(%-`QRyt6ZaYhMa8Tq!-?#Xx}3$pf-v*H?LN$#KHX_=9|LHhP{Ddd4m zxz|?u`K|B6YujM>Z~qwWlSiXd`UG^#m`vKIq9^I^d2l9rAN(ABv*+Ty%z5a&cRspo zpMwt3Gbj^N(Kd1t?ZX7}J9Y@XVm?Bv*x~St8-+GI#^K%MN$8qRIXEyAeKKbgzQB7~bI~t* zKKfc&I_gpEV%u+ikj82B(=sJ^~C)=(eH>}4n`;s5e-kpuCU6R8tN zWj|i8UOiLRO6!2sb?VeHB|fA4i{Mt~jf4j0!e?KuU%y__FMJdF<$KWs0pSH@&Q8A9 zmGAhzwwYR-xZ&_6{eE#{;2%f3moN$KcTPj6-C(0OxKbmE*C!;D@3!cs07Wqgl*AGa!yJ55X^fGy)RFAu!=n@?bLB zCQL)S#Odg;V$<=* zbO18WX45~wBo7S6OM)A=Y}tYbAAAtBduneiHbCou#0%7c?iTKP&E%)hau?&yAA0xh z4d$2(Ek7drI;P&RVS~if+K*iKy){N-#L{uPeJFslW9XIq5Hu}=yu>!1gDNh>$nf$8~Onn zuM1&r#Ty&5B4IZiC@m?)wft*J7LL$2ck9OU36do~OWSSP!?KLEaJ>$H_Sc|nU-9+z zRXQYVe}nGZC4bK{(fVHLy|kSRpI2MAuF@_J&Mm!+;Kb3W7v0}z%k}r69#rr^8aoWW8Zk8Z=59$6BIv0Hbh7K6UI_sK2dfv zfIRcx_Az-f41tM^A$AW($Gs!aDQ!49Ck;ov&<^lpjwLfk`3KNHC|wY{kdl&8X}iTf zuzYh1`u`&Ii=A_(U-aMx+uygU@1^g?;>3eCmrJfWMyAfgqmdzwb_pT}vW!l!+ z@b|Rk>ij|Uf%+`;s@!0oK7AYu7cP{a8w5|*|LXd`Z+(Bu^>^k2=efgr-?ON^qJD1J&!Lp1C**K7U z2>Z`v5i*gH%ios{Vf*RbIDGXVLZ`9?q6<twR4DB1o&j9JZMW%hjnXmTt@QtH%FBsf zy?QBM=WPGA{gk~V`M1_u&q{x%>HM*pIBFjJ&2Bn!mK@$pIc{qYWNhFiaT1||-XZWL z&7QG?*f$8SlmXi^e?%ROS9Xmy-mvyUUpmjn@oVOsJ#&eDV)xC*tN&TqfBDTlptK#) z(b0Sj6UMl4&Dfc5=x-(nwZOpk!W?m$stMf^)&zD;t|GB3vt zS*V>)*y2db9y0stI{XMvB?F*Nv-tb=23xR9i;~Ap| zzWR10WDitQTwE^1P#!awbG?tv$?@^=g1gFx2)?4j68}ie^CDBu%UI+rUHEkq?(FNU zb1D);>)6rR2g+Fmi_Wj##6uZd7rrNcuqx2H@Gzh6jXRq}9(dSW#S9?Leb8cSZ?uT+ zh33&c(L6E)Ex+l3maDoWV09nzzztJk=b)&h2xX;Zng>T{-;T>!yW}h+Ue)?5_4Cg^ zZ+7X@MdJTcoKLQW3jH_bg4p*O_tK@zB?#>|?f>q&>n@e=pYdlB(e({M|DQiu+F5N=V&WC6FR|dWiWinkH38FiFelYLC&SK689PT1^K4% z;49uO5fUZt6}i{@fAX1AV1TAmYUjddN|srVrF~O+R8{`x_SOvWfbF8Z^B0OPUOZovo0D5~@jL;& z6Gq^r@HS`~*%=KZgV7{37#=G+!F^>%xUC2#ybG5V9pJsH6Y4GXM#H84$j?F_?w6QH&S`7b ztSNkO$ekmWpY+>&N6Wx!i-&fRA8kXm50KmteTq77r7dV@@j%%$X{$o|IiBL_WfXSI zE;(in$Q+GVcek~--P6zXPU&g3N(wPOzwKeV?dWQ{CU!Pm;yR&eOfVWnk^YDvyg@m5 zdqW$zEESq7pap4fPTHF{Q{{qcics&erz!d-}sawKt&;0`~N+1o`ck(i^@>l#kur(K422$Ll)7Wo3Ib zTiS*Yh-Rd_>9PO?O}PF|86UZ?;C+kv_hA0^MJQGNQQ1TX@snI_sDX+sZ z>#t?M%uc)566f2i`Hg;1%8#t~@_(zB~|3mb6CW zCBA69l(5VPjh6brQ}Lh%J_#R(Yeo66GhVi0#pA@`4GX^?y^Gv8sl2De zgU)&&JZRObmC~;nGiJznYbkw#imj}9#Qs;>#WbX;d6~t3?hJAdFVEY6hj+M@2c+^` zJf#PGQfLR9z;8EgL~L)`2+F}W3&dt@3t>*N8$7}~qQ%-EG+E|{hF`ZLv_!)tt;mBQ z+JnELm0eenzoyJ1MYM0;IKT@BQ0MR4=@%W?Ow56nOm-?J)=9rE%aS9U(<%X@WtC-^WMzT06h0qBfP%E z0|!nXRQ^%+!jcc5UHI4;H{HIm7QCg8^1Ne9W}$N0QT1ibHmeo z|Ak0zJQm^ZcqH82cyP0e@z~m?#)He=G;A|$#=N)%MqzQGqpY;Fj5vuL>|mvH;YvQ8 z@sc*5YZ-Wy?W2Z|g(06tuAHQqYF7T{c?zv=9|5k9Ep@blUJE0+= zUMcJ`SvZDGr(=!q6Nw1r{rWHg?u~v5HCVe?TW|J)hjV`{uh`re-36X zoQuyF&By0o3d~b5o3(~nY|mOa4|5kU!lyH)84H&!HtjG_4&(DYl=@ypS$jf|hGO#k zWZ9*lgSp3oNVD+YhtS@tEA6y1?Yhl(G!H}$QUrfKv-3`pDRN(xJ_sLV?;-NQ+T>lK+_59hRP1dB@!5Z*XdYO1 zJC?{E$jY&+dlI7i&V11FATlAcBD|1u1mTCshP25$;h)^ofQOd{+&tVX@)pcpxCn3$ z-on+T0$f_4S&QaqLD_2Q-3oPH_^aZl#I_}6C6|bww-BC)MDe>~v;JAAQunK3BFC(J zm+fw}Z&yVhZoBO^!NsBJ)V@+`XFh0Kd8=CcRC%ty)pkan$$m`knQ<<=-0_k#PnKB1 zvw|<{Ds8zu`*#)2A#uIdhbP(oM)In1-->gnHIcjPVgp1bgcePMrs-C-eoxy~O|$Uz zzWeS|V+RKZBQ-Tue9IZm-AB{K_J5pqJ(twZ^ReB65G(mNIX`XPy0!2^-RG6FSJJD_ zWM%z?C*mix{nWMeiH?Xpk@}4{-Y{j4!EwZ1m?K7vfSf&Zk9(eTjVC + + + + + + + + + + + + + React App + + + +
+ + + diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/public/manifest.json b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/public/manifest.json new file mode 100644 index 0000000000000..1f2f141fafdeb --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/public/manifest.json @@ -0,0 +1,15 @@ +{ + "short_name": "React App", + "name": "Create React App Sample", + "icons": [ + { + "src": "favicon.ico", + "sizes": "64x64 32x32 24x24 16x16", + "type": "image/x-icon" + } + ], + "start_url": ".", + "display": "standalone", + "theme_color": "#000000", + "background_color": "#ffffff" +} diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less new file mode 100644 index 0000000000000..1d6ee7c3f06cd --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +@import "./components/NavBar/NavBar.less"; + +.ant-layout-header { + padding: 0 20px; + height: 50px; + line-height: 50px; + background: #FFF; +} + +.content-layout { + margin-left: 200px; + &.sidebar-collapsed { + margin-left: @sidebar-collapsed-width; + } +} + +.page-header { + padding: 10px 0; + font-size: 20px; + font-weight: 500; +} + +.content-div { + padding: 24px; + background-color: #FFF; + min-height: 80vh; +} + +body { + font-family: 'Roboto', sans-serif; +} diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.test.tsx b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.test.tsx new file mode 100644 index 0000000000000..0205e7473ad54 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.test.tsx @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import ReactDOM from 'react-dom'; +import App from './App'; + +it('renders without crashing', () => { + const div = document.createElement('div'); + ReactDOM.render(, div); + ReactDOM.unmountComponentAtNode(div); +}); diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.tsx b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.tsx new file mode 100644 index 0000000000000..8c1e7c01d7ebb --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.tsx @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; + +import { Layout } from 'antd'; +import './App.less'; +import NavBar from './components/NavBar/NavBar'; +import Breadcrumbs from './components/Breadcrumbs/Breadcrumbs'; +import { BrowserRouter as Router, Switch, Route, Redirect } from 'react-router-dom'; +import { routes } from './routes'; +import { MakeRouteWithSubRoutes } from './makeRouteWithSubRoutes'; + +const classNames = require('classnames'); +const { + Header, Content, Footer +} = Layout; + +interface Props { +} + +interface State { + collapsed: boolean; +} + +class App extends React.Component { + + constructor(props: Props) { + super(props); + + this.state = {collapsed: false}; + } + + onCollapse = (collapsed: boolean) => { + this.setState({ collapsed }); + }; + + render() { + const { collapsed } = this.state; + const layoutClass = classNames('content-layout', {'sidebar-collapsed': collapsed}); + + return ( + + + + +
+
+ +
+
+ + + + + + { + routes.map( + (route, index) => + ) + } + + +
+
+
+
+
+ ); + } +} + +export default App; diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/Breadcrumbs/Breadcrumbs.tsx b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/Breadcrumbs/Breadcrumbs.tsx new file mode 100644 index 0000000000000..41e5db1267944 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/Breadcrumbs/Breadcrumbs.tsx @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { Breadcrumb } from 'antd'; +import { withRouter, Link } from 'react-router-dom'; +import { RouteComponentProps } from 'react-router'; +import { breadcrumbNameMap } from '../../constants/breadcrumbs.constants'; + +interface Props extends RouteComponentProps { + collapsed: boolean; + onCollapse: (arg: boolean) => void; +} + +class Breadcrumbs extends React.Component { + + render() { + const { location } = this.props; + const pathSnippets = location.pathname.split('/').filter(i => i); + const extraBreadcrumbItems = pathSnippets.map((_, index) => { + const url = `/${pathSnippets.slice(0, index + 1).join('/')}`; + return ( + + + {breadcrumbNameMap[url]} + + + ); + }); + const breadcrumbItems = [( + + Home + + )].concat(extraBreadcrumbItems); + return ( + + {breadcrumbItems} + + ); + } +} + +export default withRouter(Breadcrumbs); diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.less b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.less new file mode 100644 index 0000000000000..cd3ab1fc6a5e8 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.less @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +@sidebar-collapsed-width: 50px; + +.logo { + color: #FFF; + font-size: 20px; + font-weight: 500; + padding: 10px; + background-color: #002040; + .logo-text { + margin-left: 10px; + } +} + +.ant-layout-sider-collapsed .logo-text { + display: none; +} + +.ant-menu-inline-collapsed { + width: @sidebar-collapsed-width; + .ant-menu-item { + padding-left: 17px !important; + } +} diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.tsx b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.tsx new file mode 100644 index 0000000000000..0e632d3881b66 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.tsx @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import logo from '../../logo.png'; +import { Layout, Menu, Icon } from 'antd'; +import './NavBar.less'; +import { withRouter, Link } from 'react-router-dom'; +import { RouteComponentProps } from 'react-router'; +const { Sider } = Layout; + +interface NavBarProps extends RouteComponentProps { + collapsed: boolean; + onCollapse: (arg: boolean) => void; +} + +class NavBar extends React.Component { + render() { + const {location} = this.props; + return ( + +
+ Ozone Recon Logo + Ozone Recon +
+ + + + Dashboard + + + + + Container Browser + + + +
+ ); + } +} + +export default withRouter(NavBar); diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx new file mode 100644 index 0000000000000..5af64580ce1be --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +interface IBreadcrumbNameMap { + [path: string]: string; +} + +export const breadcrumbNameMap: IBreadcrumbNameMap = { + '/Dashboard': 'Dashboard', + '/ContainerBrowser': 'Container Browser' +}; diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.less b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.less new file mode 100644 index 0000000000000..1b94f4e6a4bc1 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.less @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +body { + margin: 0; + padding: 0; + font-family: -apple-system, BlinkMacSystemFont, 'Roboto', 'Segoe UI', + 'Oxygen', 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', + 'Helvetica Neue', sans-serif; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +code { + font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', + monospace; +} diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.tsx b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.tsx new file mode 100644 index 0000000000000..a3e450c34f303 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.tsx @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import ReactDOM from 'react-dom'; +import './index.less'; +import App from './App'; +import * as serviceWorker from './serviceWorker'; + +ReactDOM.render(, document.getElementById('root')); + +// If you want your app to work offline and load faster, you can change +// unregister() to register() below. Note this comes with some pitfalls. +// Learn more about service workers: https://bit.ly/CRA-PWA +serviceWorker.unregister(); diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/logo.png b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..0438317fa5a0bbce14aa879ad6845b94d3813e45 GIT binary patch literal 22480 zcmbq)1yCH@*6uL42X_JlcXyZIZi59K+})iJoDkgI-CYC03GR~M?rx8BuATee`&Yes zZ>nZ`diVOiC3|h_-W{f-Ac>5Cj{pDwkfo)>Ro;#sZ(n~nn73ahvw`Hd1B8=`q$r?r zoN)i`1m0ds%LxEL#Q5V20Z2{91pr_TEYv{GAbB}nV>=s0LlZkAFr&MT{hKrZ!0*oc z_SXjNY)Im6V{Pli>n=d{rv&fY|38YE$VmPaakdg51Ia6qh}k)UNjMnU8JWoh5lBc# z_#I75c~!(e{vG~yCO~HH>}=1=#N_7Y#^}bzXy<6g#KObF!^F(W#LCL>Cc)t3Ve4$@ z&S2~G;cq1W;1LHq89Q3oJ6qVx~hoXOe{${7Wxt+6}lewM!Kh@bf z{oS`WFPZ*%nTdswnd!d~I9r(h%hZ3v{&D4hg}p`VziV_iwEtg0CdU87*tCPCSXCfzry}i{1?<8wZNbOsBuGP7_qFmp4ovZ=AK@v`&qva>QUbMZ1W^E3U2?El~qa|FLJ{5KYEUbcT@ z`HSTL;COS>!qnrxar}oR|C0P49DlOgIjY&&SqqB%(QZkU{*m@1VqzqJct}DmZ)j{` z`$t=#`I}>Z1^tE3#F*FA&e6uuSbB z`Ts`qAF8A*oSf|(J^t)Z{@Kj`>{wL64*y>IM_&HXGgw>v*+JMFIy!;>$YTMreYBi|Bs?{>y=X>iD|?H*>J<|4h8Urp({i{*?OP zNi1xgoDFS_!GDjq#s780|Hl3o`2ViM(D=_bW&CHNoA5LJ4f^-e{|Vz~`di5Vy(<1z z`tLRLk6QoF8s-06R!RTA`t~32e;N6A>i^F~{R{l>)PHmEFJsc*2E0Fm@iyWKBD{I~ zkI_>Qq5oAg1pp8RNQ;Z8xkDUhz@}^TU*GOp=vW{IB8M~>i>Mvt`U`|{z(fr|dFAFR z=dmaoN7eKbM^W6^TGtr&tyLRnzRQ<7we~r9e9rLNU2H>btER@k^vif&$!KvZdfiFq zaVq2=e@0Cc&3U&OcIfdd1Sf96Q{~s@dYMRlcYB{~7t5DB8%ywGJ<)-pW)tN-wlR zFMi@vQGfuLAEAmJ`+Nq>nsOhUkukDeSTwzFq3`#6E%$rooMA+jJs53o*saB-(CGJi z-6kxoBV4&MR#eqzCY8fCTQ$lmD~V%e3SfXHew`H~b?A!7ilImj5x;2Z=<4*^Tr)&6 zznwh=qnz2Qs!n^U7AZV`X`iJT4F;>|+(%>k-mh2tG9RUW#%!Q4Ux~D9oy?Qev}ykJ zc^NQT>E8_;ywP3VB+Vy?HT>-!q@F^@4W`dhpc}f?u5(lMd3SAT5z70M5J9~~B|dt^A1v75WM{wnY!nJP-ow7aYbjMc z8HLA2`8q&gx0UI0vv@c7^+SaJ&GGU)b#-oTF9rH1Qx$p`L;u+i2sa-wE2v~@9>$xG zvVCvwceD7EJCVdhU%DYtM^W+F2W>jOM|2ac-e6BD8sZs zn(EFHv^}6ck;F`uTY!MWZ}Cv2LKDQk&&VUEH9v^&j~ka63&ych6%Dsv-Cyo^?Vg4d zeW&P!hQD52Uj+EV-P|J7dh$mZ7&9WbFh+S&Mm@@Md>>6=TXmWcNP<|n`=%&pk>9eN zs1shAE)4c&*YQVy6DTNT=$c}F>8Llpb6W)J`O8X8^93g7)zMh~+WMwAeq1*pXnR=E zbR&8w-rczsh{$+HB^E`!+KddQL5LKbcunkZm-T_A$oUPFS4D%D0uHC z4`bdAeuyy{7koV0D%Ywrg40F`7zAL^T^n0sb{F$$>)s>A-*nQIzcRMX#6Rro#dI^$ z(XpMJKO9!2`E*0TZlMTV{p?lXJ0?F+S1~?3I2ad3Y1U-={+3ATHSQk~&ID3V3Ad_i zKX&-L;v&0}ULTSU`)mZjvcBb1JvA8&m^-S)z4)2pc)d+!^iydyf4-^*pzu4E+cS26dx7Cf+@X-^<@m!$opxW_w1w$HD$>V9S< zsx4&8qDmza+MNNzC)C5j%FI-8fb z`{L^z%gaW)2k}PRS-uY?kE^G?NqjrW#^gC=(>rM76DGwxiWX=^+;aEzU5bx<5GgWP z(6O ztWx&Kh)&xyttRq3o%PA{>N$;Z5N*z@^h5>H17y~hFTcIk;s#)G$a=>3UfMcPaVJRc z<$a8D2VNU- z0qgTX%=@h85&akRuFG-0%^+A*&KwC~0$}>XbSBZWDZ~fEXD=PY-1Wqr8)1n)iRWC~ zkn>ZU#S5`wXSi5};Z(opL;Vyn6drM#-?+7m;8J&$sTZ_+}{=?NL-_b ziiR9~Mj<+Q>s+caMYV{3Iw8qZP2IoW?MT0JtWR3Fy){Ziq$umgcn z+sdA*I%_^4=%IV^AIiA2@RwMlyT*t|vTKh!keSFw@_G zf@90XOK|j@l~1mw6&1gD7O<-2B_e15`ylg&J(`;Gd^;~53I|lR(cQg^lrwWl3K>z5 zQ|Hz0%_aEI6q{!s%6Dk_)SFLMTie1fWTrsrDKs%;Pt6@Hns>yY9iZ6AxA_?P2&MjO2L&!v7(9Lsj9rv*zNSDlpNjQ`> z*PnP})BK7qHXgAk!&Up&#RyZ(Y5V2)+#Ya1NM{VULk{^tUp?@%UP9j-0`~d ztH}&zV9~~~UPwpIvnh+Vf73$Gh4VW`5cDkO0+Umd_QAk<8UkU!r>7j)_X?*S6Q95# z?P)w_OSvKfUT!Svsi-adHcRr?uRA*WQr#frs9QcjCk#C6YpBpu&#HYNvO~QvOtruA zhr_R7t#4V__+*0l=~6j>&VXXHy6{NJZ6_27FHX|a#j2t~qt@+oO<}|JCuSjsT=eXi zo;f1Pw3(dKK2l(I4}N|ut!#s5QkC+q5uUjBhw&+cgZw!db}?h)pk?p63f2y{C-<-DjnAGL7qmo~slQ59?5 zKe~fb8UY3q@&5UCT_^^SG!y7&UI@99MvebT%<6`5y3XPCX;;YXykZTOx)(0?A6fb4~=|FF2^fl!IBL^Hng4T1;?}&eK_`r8y_K)c|0r* zWa$OITKmF=ln=*mhymwGOu(;qKZGckjV4V@_9bji=U`87I4`A)FX5?W$E;(9VhtQZMJ1Hj z=Ftlsc5sJK$^YrZ=vr&RrOtu)_hY_j+jd94I=Tr@XK710gFi(QWAjcS5~}BuZVG|c zYhjy7#^vg%EBCm%x?0@j4EY5r=CM*L z_NdiK>tQ7lpePZ`pbQRvP@~Us1t&T7y+AD$x-ycaA#xJFe_*3XJJL7@;!0aOdR^8?9@DP&`!%ywoi2A zoxgP|d3SCM`b=9>p6w}>zeLAUYTpT}+w=AOuJ8SbK4m@!aHUGCxeEb2yc(R9&mGJZ zDC7CHaB3d=(gDKXm>?eqd&gQpNXSo(jF`#mVB|;Lt)GvJ<94l!UZp{V(OeIdHAP*r zu0T9QYo~||M$_$=-KG#;b9`Q&c4`22blf+toPjlY^%1eeg1PbCrf^ zQ@LY{&T5&Sn_Pl$P+YPW*D>fO6P1`B6t8!5gs@C1Q|J(Ng;sxq(XFHr?NEzkIG6|n zZEyvTNLsdcy1Itl4OBg>J8GtGe|>puiQ-xVZ4cFa*t85Tr&}Op6_F0!2v4Y|ZJ(m$ zFw~lMVr6qpNy>VShkBCyA&~6jzT`5bJ~HQ~MX(VF!p8~GaU$CsE*rKG;dFP8kAR0A z9A2J~*uuz2cE&s-t7eXpb6VrzGDTV_$7BPPU6^0mOSDhTf^nK3p6J~kW4Ts#tn4f{ ztFWDVUB`tRXcnAcI=a{P`m^5cYQHUF*t}5@bm{$Ig*6~Cl*6tA*Y&HK{}`i<<2>g< z#>i<)?$S#|Wr2gi(6p_2zDwul-!T-M>&AKMULV(A1wV>>Q!`?JY*qm8_EY!n^tJP~ zPx?3;*1HVgE#i4`QQ{M$i}`L`KGsf*xAHBvSwkk#u)~6D(V<_F+P!ViHhJfYVu-P)b^I2UqHOk`gnE>`vfQ|6)Mlzau4f@!_ZdSG?%7PP&#Y0JtlGy_M0y3>B> zekJi`?C|diwm(w948WKcno_+EQ`9zbZKth>WvwT8fya%95>yYN089#n;kHqlfIVwG z)h6#@@N|k%Mj&;d@IA=hYDlH~7-NvYt~%zakBurE8WfN=*~yNMc1FBFA^{EF3wLwG zNR@1p=RN*CzO--Dbb!#IhcR8@?6jmPjcm~U9GH>EH~s0GsJ~52ielLe%CPnULFcqP zC(5+djmhLMvZ$JQPyW!xXd3udxZMZ}UQDg_2)EsDijG z2HpP4_GJ~kaxF67sA0}x7h;d# zri|vRVx52vxP5)~{d;VJ#AjZv^BAo2aKyrbBJ7jLr^a4bM17en+A`=3$|a%f?}^U* zTMR#k3Ruu4fArHrvB#{h}hA@*f3g9Dwk{1Z$@xtG(w0c z*u!6bh2zE{X#u}YOjDtu;I;c)0|3PzGU(9_;)r>Em6la5oyMywc<37# zT3gnGM3&A;qCc`C$A-|PRE;0UmkavHTy8z-Wv%WF%y!|NX6iP;*~f~vsUtg?BfHa1 zFsq?r0W0LLx=QnJ+x=~@tMSdm3F{e7gCW|F z3(J@OX)cXqz!7OmQa|4S@UH(Y6K<+8Z5U`?w=?G-8S38xJm_N@%r-skxE*J+9q8Ma z{~AcsXB1x%=S%e< z
  • `(Uc%`ND}C69eqTD0t1;DIYS zXPe;vrZ_V9>A1#x+a|!WqG2NZ5WZ3BY?!k}{~!r5M+gGQR*go7 zJtx6-V@z+_7E8|&zF0g{E%FJrQL?n%TLC@=SfJyV!+oQLoN!#w=MSxiMoqN&*<2?tyL2legCOYh z8iQ4_&HXZVhqJ|+RbK$*DGH%9AP18`R%qKtRAw1!)MSC2r5YtCO;FH4NHXObP+=g5 z_K#(!vJ{{1MTGtn8R~ZFK5Kg9LJ)ZHtD)eCOi#ZJ8%Evc>G9WMyqoh}RJJBRUl&JZ zQ-yNgNo4=8kvLTv8^#jIWsz9OMIUigrl1ce-IGUp0_!3xoIv*h46C1Ne=Lcxc>zo*ZSr`i_ zkK%9C9UB%yTxZuWr7hc*GSQz`fJuxqEop9^KLFztAsdto)I4|{>s=sMZ>;NhsO;)d>-fy`Vbr=jQd-4RaJjnoY;zvo)o{&H7ydpW&E4`Z~bB!$)=nB(qV&O{sC37xKkKDq6ib8xd*V zFU4#1GKN3Yts40ud=B)Y3Y&gsDXFLb=IRJfqt4&yR=53l37Rj;&1$p0)j=|#;mHM9 zS<*86hNnt50G!s2Fe*rXR>GxlZbluO3m9bQ$fvW^&>-ThpiTkJ5lu=Faonh$gs^d2qiORdiKdR#EH~bXik}T;bQG{I zb&H`2%8tN37Iw?^MvSyMPRhazhpR`Pft@vTe80IO#jIs%?ODVD1#ms4DijYC32fU> z-W8>Ko#<%*{6L6N%q2-8aC=?dU~IqM#Qm*%-r*k0%CEky(c4AqWcc>;-RC6MLevQ> zx_(VP375t4$40CelvI$3gJ>0Ws<}e~f6j4YQXAD8;Wgr&KB z#}J&42o9CP2vw8| zU{57B1FnfZHG2xE7vDM!#8k%f3SwJ(v_^I|k+yp{*=h)NOr}P1gV!jXq;|RAG&%*- zmIW-3SK3otguTL@EZ(?^5LX|I>JiIa_9MnW9E0kDF4W}s`{H2W!;WFX9={{G^d#Xbb`Lw|zrY4o`63UW8w&K~(&}HU4ums~~1Zw>xI|yaiK~D^5$zGyO zGCauWa|HRyUil@Qry?0#qSQOrfO%w|)rdN;oaVmtU5&+fAQHxTmnM;WbJHw3W_ojT zNZhHsg-1RoT=PqP8vRAx;iol$Pn!Bke80Ot;Nzzr*j3P1Et##PPdFBJ)M)0?cCtE1 zBt3;iihW3;=J_;nGG=D8vW8P%`)dVcb3fOJy!P9=y`6Y#n-CVj;{fW zH3lQ(;Aa-L%?4v)@bJd_kj?Biucz}Lb%d^pwMbsmpQFMK)dQbtoQYT(tv@!>1~O!tLPt>g)Yu>M_$;zm~6d=gC{FeSw-; z&W(AU%dBX}QshMB9pg$U7`{nq-zZ!0utQdicfe3Xm)4$Us(IE>lS7JXIau!}72(-@ z50XLWf(Y#FIhk_D!+c79^y$)BfmnW8>Xc{zSyI6l?jfc(GFVNA$mux7_+yV?AAU^O z2WT}h9Lbznc?6DPdC&~hS#gMFo;$yD#VZqy6P4n#*sP&Ng;yi&rc3B(sCA|Z*FBS#PWl3@BlC{d<>WS8`LkZ zS44l*6lE?BuM>j4ZytquvqQv;eYNC{4GpP^GpxHIm8N>%A9Xl81>neu65UPHqsch7 zldAWGY`nXcoi3m0_i}k3sDA0AX{9!uWhNHUt-Jw-{5gJ3+3))!i}X%=krJwPyB!YVwLF@;yc8PM z5|P33F52jtl)mj82#uZ(mg3?5U2DnSDFQ7H@qEUYR4e1wvuFlJldJ%1mv_(+@1UxS zRnirI%|w%7+UewE<7&2OO@X^KiM2Rl4iP>j{7(dZnuwkB&S6hExqc+x6PHm*e0t&uL_l$;X zW81cCSe)5-OD5tQOc;Fku?~hfECibOoIY5 z{!fs%sj0lA=ZluAG>=N*hCSQtd2Rutx(hW{(=t$WK7lcp?(r`doD2+vHpgH1pgr?SwR*RBuWPC@%K-#{=(OPPxjSMPQXr@QoFgj39C2C9Yy2W8n;O-1f(K3 z^1uKEiqhTS9@-u(7Celg@Ey~nLNz`79w*Qf`*a!?;Y1tBn2M}YYRxS{qT;CHX)WK1Q*Xv|PowZXU1 z)OT|sB!U*jk12y>ncn-+R2e)g*i3PY4n#0O434FUb=k~_ddUo59#;F+uAI~GG>EHe zz?L$Q%#^(s(|5W~H)t3FiGbaGo?o+vxCY!fDhKV7Pm4fA#$qZ}1}Ntnp|&Aov@^t-o- z5IFSb7R=eCyX-}tPa@+zP1LqweAbk5X7wH&)Y-~st;Mb`j$V-S?)UI&4e80qsD7dA z3u+w{Vf7$_W>Pn5R(Zl+xWw|3#ZTBh!1;V7qX7G=}i0IrpHb#$_nl{o?E$h~9 zC{PDT%99YK#X#kpsZLT>ub_9%NNK+-85t8XKipR!b_~<0(0mW6>9i6{Yg0#*eVDpU zl-%nhvN%kwiNTpD?#7&!tf{-dFZvogI#dtqVW96#^b zM6tYVM6jNOj6*#gOUW{STXbPgq|eSp(z4kn9UZG6?n_WxSNKF{MXofx8`phV1QX0z zzuhx7LMBSYrLgmD`%c^QwtLkS@e}X9aBmjRH z+t0KQgg!Z|@%75tbYKi=&^@Y2-I4asRkHBqsgB-vHXveD29rUOqsq09)tnvHnMlQY z_e*-7W_aN8p3V=HqwRp}vO|lsy)EW7Hxx%NfIhH0YLN%VzHu;@CP8MQ&|<%wwti1% z#tO?nu%4WUiEp|>ttOh!c96%m;G_&wGfvdsDKceAo+rVL5N81UD`Y$$if_-vif-l2`z>gZ!ui#N3~V{R_Iwnsr9JJ@*;xp$5nXUlFX1W zX?IZE`3$`dG!eIbHmnC{5tIWh5GPMIgIR$uCP4pq8(EwvT`f7?e0GA!oSin_dd@&|47t3Jgbq z9!>Ya?dQ0sNK3#y5d=(umAA{cB4G^KkP-@t3hg1`Apz0#f&tFhA+<98C0P?R{x61P z$Jz)*=U*hg1m+9TGN*MHTAi5VA5(S)qgDXM3+*5_*3FSRX8@Tv0D1(|aROl-V9gJ{ zDgK^T`)+RNWw?+=O&dH^~gRNJ)3D|B9BOF^FAh6RAO&hbQfgh zhQRq@Vea9El+%WX>ea^O{w$*V@(KkD!*)Ua-Bg6AN~iWRS<$DZG4xpvx@ag4xG5P_ zSghOQpJX8cay%7YfaC)Wa$|KP>5uEstmygBkS42ywtP5=*lyDMjQcut{!WX_fV+*9 zh|Y1{o14j@ffet!B|Ol)$&|zdS{?4724o|Os=nlh5jxYBN~&rO*m$x1j*Zh|n?RRb zRjj1=o!SzjG_lsWtH7Zr_5ra&^N?uMuuOD`LD=ql0baD%wRCS1d7RjHX@X^fdnu#& z533~!J%aV;6G5Xpsy~Oe%Y6#~37wD5YNTVxU$5wip+VhT*G4Xw#sz{$7u9)yqPqQM zh&xR-9Qh&Ln>m(}ffatujJ=+PftH-uK!NARg-*;)Qb_KgSDE-{L0uGW@(SDL z%h)l7i#KvGIid)8KcZOO9kR_iHSFnoXI#!Xl zXKt=ySIH3-f=BY*;xV7L5se>lKMVQ-xK^FNEw!hX(|R0OBpHFW^)0=VkcQtmKTa{r zdNU0fv_KxR#Q;ZfS75qHp_?G8t26)=uppWdA|MG)6jt?dcN3ZNapA1rRww9;8zGvu zvkoGbc`3Cl7CkXgr~?ZWGWcV=2t2~SB2GI24F4uxim_BtSs6(HT&Rbl2ira!;TZlH ze>N_|S>?2{t?FGkhB$afteYVD_5l0v3)4K)@^SkLeBlOJzBzbnzVMu+>E1z0fF{x< z5ggT2f2P*RePiOZ&zHRsL^<}#>NLA&tfHkBFBZBgDc}8!QWcosILII_r9yr6FGF{bKIac4V4# zk7W?W^$0`DeUG((2e$4t=`KY>(j_##R21ILky|XS0;z8lxtY%wr?j#mPhsam9UicNtkPvSVqbZF z4F>kNOH_tsd!i#6j~BbTNKAR@%9xdeatSlzvh!l%{(sm3-|&2tw}{MAt{pieydF4@Jm$Sb>rUY>Y3UxTdhPdOwoTW!jX-nS zL}Q`mqE8J)ocdoRtmI#tH=~`jv1U-vrI%#Bjd??d1Wa*A)U8cob0&)1O$^IG>8zRt zR+N@z60y?8Bjoc5ZNZp~tc|IM*e;g#w)N+KyB0)2;KA%RS~(ykq#-lb=saO9J%2T3f0Z#T+1* zom#u6Y7+P*blC(*FJlZptwfmDNzOA^2W42EPA{?*0jPY=1ppM8q zf>^S}xJhvO`e3vdb*gSapW^po-7uYh1I*cN*XydD=YGM*4uoCBO6thDY_3HU3^0?- zoZLtJhFle?7aKfCJpUbl5@fls44OPlu1yVyA2r+hns)BvF$&t$-cZcvt1YE_T{0SE zx}Qey^gVe{8_EEF^P{7fsia6^jX9dmR61F;v|CIWxk zT0d`|WH(gW!`ixHZku|j^ZK-cNnZD+BOV2KXsMxx1)EZY$Z_ z7t8w$zJv?I9C4l5Kr~A&1|tfiCfB{lDwdyXKw}lt!?$2Yu$9>AjVk2Mq7yU{zmp5- zN&ER-n#t1HWqP-z74K5FP$W*kg4DbKR5HAiy273q{mOpE!sQ*YiDXcPpdNaK`L+R1 z(lGo;tIR_?i-{vV&rmrH*5qApCnNIoTHE?2MeS>9K@A7)79zdb;w18QcJaBu@S{Xm zooVW{-Pw2`FemSY(!blE=sP&ozfv)bLq@&MegAIe9hzm~8o(;qU_YoR7no zIb^s-Mg4e(Q*nd$%gIULv7r57Me|qXCPsNISmtPqK>7f6p(OH~(Td)W9on$(A~aGb z8xxk@kT^HWw(zs4?hk@p+Y+f$DlJU|U+UWWgt0?DvLOT(Luf-Xl~z?$xReWOy#w7~ z8!pU2_yBDd@-1#d3^ds&JoEs5+%C=!fo?DfE;3h!OiqC6>^Slya!{teEt3MUuU-PWC!h-QPRHvVKX z=mYca56w_L7V;AcwWJ;60H~mV9)9{icMR@dh}J<65~{dPuq>U;RqIfZ&R7H-Akn3b(f@ z6{8#r0YS?4rV54%I4f18q_^+ct}Cu2PEWPxJ~VXL%%VX*$~1NpV(VgcPj?R(ZVaQ! z%=EBHucWpOpjrP0Hp)^Z6_>JuL;6W0d!cQgQHxbInHQypcp`NB>hBpl0EQaBvhOxb z^?(nrbJ^ba(OKTQtkV^m9_*&jRN1eAv$LzJsx94R(YNWxbX zd~U4fOk%)r^%s>IiaOfB)5kCBa0{?50nAAve1D#tQ&l6u%S!2}KUZ9D02YkFJk4Za zpUxXJ?fNqpyJU0brryyH&SF}wk=njCQFg?)Y;-V;c#=0UpEXNx0{n4dste2+ zFIns{MWaY_;B;niBuPwSqLi)Lof;xESvz|(A(?%&+wrzxPjeM~CdlMCCPV{Rd^$&R zzbo0k=={{aFpSdF{NQ+dRemJJWke!6XZj(0(=HWpapp5WPcava*TC-sp;1|;jx9?o zEBW^o(b=r-%-0<=)I=^&dMlNeD!~~~W%|#S+PI8+&;?(~PDNGNAZiem9ocT3!+e}v z`I7Swb#w}&MEjdv2k+$9JAK@cH=zUyqM$_m*%mtX$nBIS8`hG!X`LpMiBvjN+H{a0 zR+gYPmi(-C0cw!goXN%XZcilNWL=McH%l$`(=|gU!5|ogX+syn&gr&8Ksi8vaz$%n zGNX4MW5a1B?2T{SrR|Ia34%cQ2<7KZ(a+20YI7N!d-6@s+CnuUaN|0QPnvFFN>q9= z$?vYGI&nToD}J2|r>nfMhOl&jlJ+Q;UpCh~MB;7zj+WnU$~Iaf^x=-Mmlc>2oU?a~ zKilmIEW^ie@cUyK7G?pgB-XH$LCkfNpV|kv29J?-f4(8hj3-m+U`T{>IBElO(3zcsg(xO)uL<5$m*0 ze_q+2JDy$N>2g8-jyj-kb1*?BP!y|W3UQ`UKiMH(wSDu~`w;+ct z{X^ROh4vt^1S`F2qsTdwLwNSqv84N})*YK8TWa$UO)Eg=lI5&!p8&NkKa!se0{S8&E)SAP&;Od4iH;E^7&n7E%l}FFfyRo`O?t^wXERtYY z?5S15VQ3ErN>LOMdT0u>FqC58M)s^2m)q2_JCe_!L$Fy1`LYn3r*WE(+8=V6!829? zA6|RK>t~V;o-)>+nh9zLq;0>u;xY(8!5Kw!kR3_c$-+XEC>OqZiI06&*MDKSSIfy}S7%|91N ziz68PBIlTrje72jMzQ(O4k6bbWx#h3)$!5yTxn-Y(KoS_VV%gl;oUo>5?GBv^5J1U zJnQE4G5BpbIv{6d3F^RbcC`7d4Rndm?Q10KKAcA-<>0s>0=x0o5r!>d%-;jRIm--k zxj{)p8+cIMnUn3O6ZzY+s8iRj6}8KC1e}c5sY&ZD4p-s>B8arLP@hkA$?J2#7vfgN zKpux$zt@+6QXk4m*DK%J?W*yz^W-2z=wKstdjzvCA*4PAj6oPA;7>a7O0&x*?dnyZ zo?(7%W-ReIj@_&)FC}<&7}t6xbT_}w#VC&8`EiNLTPZuyrK)phucK)%Rvh0}yHZqu zRi9oaT|&80^{^U`GYUcF!KY1H@tRROdBlRige^OA?eMCqP7oInMJRJZW(!;+()L`a z(8~}P;!YpBqeGro6Rv6;J>EVs*2|)3>!KkX4&CX-2gBxJRi(`q7 z;XroP3r+*;ha#IChcUh;IM=4xJtOdxEWK&nclo$wH&aI6`jG)UxRcU5bijDqIKCW6 zi*7Bh9Y0TwN$9#@*izN`Fbv}ZeMPBVU7R(5gO$lbH4wlP)h~UbFDQtVr_Uj4hLCcM z3tanNH4XWGHL>IAQonG{5J{mfn@onTs@zpwueIeM^`ue-6R}!_1tBaO%{v;#Y9vBe zq8|9&$h;7fN+&cB!XV9z(Z1y)ibs$~b!)zTk604&Yan?LracBH-lki|R90Jp)tn2J zwHx2%y1Y$`BA{#>ke;&!5BMV1p()yt=13yXg`7AY9i`-eZ0P%g>xYq_Xz)9^ir#i= z;~A)HYf>_ygl$-j8Jz|NX7n=}0<;rd=x z+#k@CuCJH^cU)I15Z+d!CaJE*A9=#>O1uVEk~^%A?BWhxmx4; zugOrHL+&uNx^3&a!_frJ(r;V0bqYINv{2W1#WKJhBGyu_3Z`q`$W*H0ZQxGg&T){; zKNR}?DL^Nlb*1;+?QPDz>!!{^Kv4Si3G)6jK5Oak-oiB7(sOs>dFe=mE+*n0N&1>R z5}lmAoEWr1mM5OuKyot(kR=k-I;;0gTQGdd;U}N#*@Zq-A$m)NtK;R{`UmK1A!*8y zteHm!md!6)8fAooxH`W|7cLm)%K767@g-i)L2PvTS#E>c$9gA&T+-usu))Mq2lBrvpbi#8wYMDUsZP)4*BdC_Z{fADjAv~vWuiIpb9a4f7& z$Ik}S0Th6DdAjG-q#FVhe~ZDR<)WeXH7x({|OVUS^qI*>k+Ct+ZXz zWmm0M9y^bSFB8Og3S&vm0&3E%1e-EvBkl zOpg~xv6toy6=n~7I1U3^6awaEzk6%qG-Se;bpYMMb%3=h*cNhFq<1%#RSX?MDy^xS zy0i91cTZeZA0uyaf?oMkK=$ZO5xj8FOay8=iA=Q^ed7jU*ycRq90=dsv#i^U4riasaLeqHMcg2nk^6hOV% zV|}OxIGjI9=G=BkfXXl?%rJ$`m2VgvIb%u=trxEj1Ws+5Jn_(b=`jk8IrIwr+I6+$ z)~7}-&mB=bMaRs~nC!i6J*+5eD?zAE1xb{P9_eUvY?s`iScV(pMaAeyQP6RfZw&Xl z6?JV1CO^KPmku4doN{VX(8aURniOVKey2rRkK5z1>Cxi0;;)Y4>Sf~|Sw?#p7*~{{ z6+{HC{T5#OzHc!^=^{&-wTjJTa@{u@S)AO0S6CnL5?Y+7Uf7(7x}A7#L$%!2(dC!K za8mNvv9L)o9K~k*l@(08{=GCQEH&at8xJjo9f2m@1FG6;>jz^GKd*&qIh~V$vg$z* zG-8b><5T@($=i-9d}BP7)X1&iFpLwD7~~jUmj%uRoipo}@`>)eIkgpJ<@bVAQL{yC zYh$a|`!(f23t2`T(y`p*(l+e0!aD|3rl*yVf!q5NhNeSavsOq>fmwj95gfoE zPYFs&K8lis@&u~8NX4(jUKJ48Ozf<2p|V-_0Wll4n)Z(^^5wp5D#xUY18L?2A6FkL z-KJQOlbmPkv%VNXbUl;Cg0&&v-7%c|(Z%+~YAD^kjsu$rJU#8Dkab&0%=0Zny zmc?p+OMFME;dZlV_QRY+bGhiosJSN5H!yasoVwZ!72`-Q6| z=kO>-e>4%6ZQmNEM{?ehJbP_fi|1gXGICeE!z{x^Bj8fM2U`|FafxMBAy{>~Y`OTg z@aSsL{@6riD!=K#J`{a&2g)WkL#4@Z8xaU?>0_OMz%o>W-LZa zsnG$`h4GT=lP=N+vu5e+G1)P7cf3XArU5%Vhzh>4ci$bK3iBAFECSh(jDnWT;~_Jm z1#TADFD($4=Y;gcq#(neix#j|*OM()a>%q;EO%D$P7>Y5#}rF8eZR${B*Wg?izvXT zoe9z7z(2ZWNwR9}<0Gi*-j0{G84_gdd(M}GcVklV(~?Zt*yU$xRYR-CZ}rjo)!GcrPPXv%cL8!xFn2Wa-HxHfJrn^{uy! zV{w#6)M=KL@W}4U$sl}m^u;GS|GLE&R-1I0NyKRlVy^xiqL#ObnKrImqzZ9o2Pex@ z$nQ|E$HE(G0i0#DgcV@5Y1yXofy!R0Dsr0FOpJ~T{_rWnQmqLeyNODNY~)Ol-ibvl zM`{`y9eXluBE(0xkFqYk66bfS$#PJB;7GAOt4+sY+Eag}v)%03uRe_Gc6x&6R+eLT zW#Rd`IbUIJRS#{^uYSP$&c~B!#r;x@?_O#*?(q|=?_W1EN9owAWGX<76w6pGUnI=p z>6yNQvd@(E)x_%(DAyO=Xpj@qaJ*KJ)w~jyT{?nAc@%d?{OkA61;BXpn$N!o9`LlC+e+uYrT^HeGlrTon^6XWjb7|D4%lt9tHvJ2fnIUPQi8@cux;HetPXXd!DyT#JRou93_CocPg&CU z%LVYhgx_yQSu!P3bc0MKegs=P9)LNDEH<O&@5dA!A%TK& zozkw6o#~upiHU5Y4$(XczhMb-i>5fjmohh1YrD%y$Ws=7iy0ugXJLD}u8`&19fAol ztA;4R$HCGq0SsWd2@=heMjq~bPbHLrZ$G~WKM}`>-t76?+kNbm_eK_gGI zRb?RvhPg3@cIjZ6DDcwZmEw~f`t=>Zw3w~$edYb`(JHsw!4b_p!H@Km$hPr#Sw3>e zv9DRICDfGCL7`y>hP%^I4Nk^(Z7!ZCd(o>GS(S>nE)qUG8sP|1do&l%nENj8j_#41 zf$;E})c&cs#NivCw>3jCyks%l=$-^2HnPZ?Ocuhw&8#`CvR8pcdNG#=nv^Zl#Gj>q zFtV7fYX*dnsydw;=lhYOwmpd4`w`TM$0jgOzcJnR?klC#JPD>Gd| zYq*~#61hCuU8pLWjXKDop{L){*gKMhV*0_w-TbH?C1sB*PW7E#5&BBqR`iwUQ2tbR z%e3gn0PSzR$Z!S$fVw9{GlT@}XCs%c zBNVLi>5=iaG=#dmjjg#^>8KI5{(*+<$uQsI@MV#{!M}FLYE}Whbca8MIh= z##D`AF;TR3hN$=YM{(u$cGT_tR6)f>__DDYs?Ja`DPlC`xQ;RZS~`(d#A@qmY`)y%OF1jw2a5R(0;mWE1#%$P06p^!RuY)w(7GpA6%aCw@_7Cm0yFqc6;_OHL9ZHTIKM5s0dYf1>4x`pGRLTXDqNB(3Y{hmYeK`0h8sPq zkLo28wcB%x18TY?fSI}E1u#*YxM}Y=?CJU~Ev2V4Q zcLBsuk}lG1K*0e`%gtfdRHd6;c+w5*d4l`(#;3rZyR+Ju$#VsA66&37v~bY?01y31 zL_t(V2R#;W?<4ONiiR>I;tcFrQ=L{CEKS!?-24>j`dGs5qOzjt$}!J(=bwMRE&PiM zkzOls6|Ox{{WpCVz@+VT+Y*9jft&sd%&FtZ1IIG1UqhI0Eo>=2Z#oRJB9OU>@@(?! z#y+Jn%sE#_8o4rhCHe*Y`%Z&j!Pp|0YBU#;Ks*N4V7Wcm$h?jH6wd z$2K7gl+Jy1Iq9a1kNZ%sUl4a~r46_OA;+?7vTxtMPSUfckA8?!*}d^@*-^K?dq3F{ zw8Y-ksTqqiYpk(GO5-pcP;!r^k$&3dlm1qAbh@(Cgvi`>Jz&pA8}w7A8>7+glru)< z`1Mp9pJ-46+E>zp=xt@<-Lf?)Mtb^^;ow%htXv4j~x8 z5XTb$dB%59J1D$B<7WXk zfm$&Rh3RaVeHZn5zE9)l?(^Xr5}F_F19a8%^-|61gaY^R}-|_ zH%O8K8PMozhcTYqD~d!TmZRq<#JZuNBEV6Qy<1sIjVH=r$kZ>;xRq91$=x&h7I*Py zhq#Tab`7o@<{A>unggH(cEVL~-^P@}6K=;|r@7%9pTziEV*;3jIc;zU&;n1J_0yQB z@sMph4&8Z{-`voar8okav3KUlCiRM<#a6xXpy|ET-QK(Wm7nUW^t7GF1I(EIDXQW< z*sDHX;l2xiRn|5eK_5C|D3}3FgB`%8g1c2vX>2sL5YQ6FWCW}S*W1z|Lsj-GDIb3H zq1$kS4c*IQpLf4|eMh%x^-f%rhFZ}7QA!C`V$Zs3I#!lV;t+QHe6^02#__rhy}*f=`{7xlBsS+fK|rOR2is}@R=*yykF%NRjbqbvZ`L* zwB`1GZP`jkONWwIG#Azqq~_LehR0@RZx6>%s2W0q3~YYWiAxeEiAB?uyH=kaEI) z<4~PQc_=U6vBEN%zZzcOy)C!zcygDTsGoOx+oxM<>$}p_j6w5Y=DY>8Svl0gNx0if zbnssjN55T3u$Bdx(!7_~dq7^_GBpaS(}^jh?I&3(+kyt1irO*-drE6cHEPr--3n_B zD@qc;r=5P90jwXjzUbxC-Q=d1g5PLjZCH(D3Jw+*bXS+Q?0%Ae%Kd!Ri``$gy~Y66 z20qSSx&9mWx2;B!el>oI)rxz_mY&={de?VRtXQZASyi|EA^`h$+0pObzb!xiP3baB zn2;rakx_D-Dv1eThKhS=hhfB@tnl9lz$)uwg zs}nt+Z9qE6TX_@`u6GT{`1iTZr{=D&ZrInW&~EZ)6U9j?xLLHo^^dHhqzM*T;ckyv=LO8 za}PiAko(1czpxYfY&>W+Z*2=)`9(oeS=utCVV3mnzsdG)$pPXZNa@-PpGRYs=d8DW zO=c!@?&H|``hdbxp5?0OXsa%ze#;$XWk6+$i`erTAeqzlUl2BwA6~me{`|Y=%b!IyG({QmrpbGt%uzn@W zXZgL^-wDVnJ(W`0Rxz$mSedgt7c(9oq)!v)O^6M9Y7N)jZ30Nzy2H{S>{-%tCROjo z=H7GXtaF?D#YP7!Dfj#f&$~m0AHrtyxo+#Nw`xVb`raO>QU54NiqoEnhKsrtUhn?# zycl&L(I|*!>f{9`TYSefT zWS4%6hAVaZc%}Y&*Ej!NX=*+#jg_=%nkgs<^V~eUTXYSg0ZI8rGatTy4e^ijy_xvM zAxjbdo2GS(Eyt4hN@7>@d_ZHZaq~OZWeaa?pW`p z`~e&NI*)W6dwS$kiQRVH-Abd)=>k|njix@{L;;Pu{r8U8l&e)R|E_Ufz)IIFoi%Dl z;~*$93OZrP@@fKBY;A{J@CnPKSMyCvn7XV*pi9fvI`LLHeZMc?3CL8GItG=5&5gvp z^Rr{0&g}g1ZmFMCZ3_j@bDjAylrLj1yPjpe-Nl>TpWVFoRz|3_$?wmwISL6@W2hrtXb*uHB{OZBAkXfGLZzK;YT%p_~80R=xC$(@JqBkz>J@RV1F+)W*gjyrw%QQnssk%qfR@zQMu>b zCRP0kmuz-xcGFci7r^!N=FL8`d-tAq;jU)$T9ReB^ktV_));0~4ci`ox^xKpzy>jG z*%<#7!*6D3lY%Hw4~rzabw}Hi3VNqJM&V-TQm4Wt#Fq6thcT!#Cf3V^uO?JQ(##Nk z0z%f?>>_;$73^!rAAkHfzT1K_Y5ri@a-qSkXpJgG_|5CHN z#T8b&+Z7IY;wSmfW__ajv{}9{%$z=Zx~(z{9z3`c%K9!ut3U@`jxEZSB(276tMe%z zJ+|3hHsO|wiKHuP{hy`S<8iyX<1@840-zFR^+LZd(8=T3f~g@4+c{0E*I{+lMtU3AZfywN-}y z+kP~V0oT z$BbzK@U!u7eqnwM?kNKFf>+PZ+&Si!BK0C%>0%lEnrn>Zw{9HA)Pb^!HrO-XKbjv@ z3xH>F48#4h03J2rxuy@NkCF5$;cCY0d9(Kk3ti1_L&?oCVa74{GgLjT4>wMyOYD)k%sSMPYw5KcE%>M-#NLe=krm0lFw7Fo%al z{ZY930b#aGMFCI`@Gb^#_zxA}b8PWhONmLi;?X$#qj0rMt@{HA44z(Pxd!g}A-+vW z&nJe(i_rw1HphW=* z_lp7;>OH}ioNcz9QBT^U0)C~`Ta4?&|7iaJTfl>i{VyIJ%<^Nege%Q^rL_{c8m}ce zd(|Y)AI%TG1pw5u>4Tx(^L&9{V^ z;|g~I7A;`){7AiF { + return ( + ( + + )} + /> + ); +}; diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/react-app-env.d.ts b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/react-app-env.d.ts new file mode 100644 index 0000000000000..15f01c4b10046 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/react-app-env.d.ts @@ -0,0 +1,18 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/// diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.tsx b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.tsx new file mode 100644 index 0000000000000..4ea0a39f687d7 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.tsx @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Dashboard } from './views/Dashboard/Dashboard'; +import { ContainerBrowser } from './views/ContainerBrowser/ContainerBrowser'; +import { NotFound } from './views/NotFound/NotFound'; +import { IRoute } from "./routes.types"; + +export const routes:IRoute[] = [ + { + path: "/Dashboard", + component: Dashboard + }, + { + path: "/ContainerBrowser", + component: ContainerBrowser + }, + { + path: "/:NotFound", + component: NotFound, + } +]; diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.types.tsx b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.types.tsx new file mode 100644 index 0000000000000..7e12d80f4d92c --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.types.tsx @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export interface IRoute { + path: string; + component: any; + routes?: IRoute[]; +} diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/serviceWorker.ts b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/serviceWorker.ts new file mode 100644 index 0000000000000..47bb33ba7cdb9 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/serviceWorker.ts @@ -0,0 +1,161 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This optional code is used to register a service worker. +// register() is not called by default. + +// This lets the app load faster on subsequent visits in production, and gives +// it offline capabilities. However, it also means that developers (and users) +// will only see deployed updates on subsequent visits to a page, after all the +// existing tabs open on the page have been closed, since previously cached +// resources are updated in the background. + +// To learn more about the benefits of this model and instructions on how to +// opt-in, read https://bit.ly/CRA-PWA + +const isLocalhost = Boolean( + window.location.hostname === 'localhost' || + // [::1] is the IPv6 localhost address. + window.location.hostname === '[::1]' || + // 127.0.0.1/8 is considered localhost for IPv4. + window.location.hostname.match( + /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/ + ) +); + +type Config = { + onSuccess?: (registration: ServiceWorkerRegistration) => void; + onUpdate?: (registration: ServiceWorkerRegistration) => void; +}; + +export function register(config?: Config) { + if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) { + // The URL constructor is available in all browsers that support SW. + const publicUrl = new URL( + (process as { env: { [key: string]: string } }).env.PUBLIC_URL, + window.location.href + ); + if (publicUrl.origin !== window.location.origin) { + // Our service worker won't work if PUBLIC_URL is on a different origin + // from what our page is served on. This might happen if a CDN is used to + // serve assets; see https://github.com/facebook/create-react-app/issues/2374 + return; + } + + window.addEventListener('load', () => { + const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`; + + if (isLocalhost) { + // This is running on localhost. Let's check if a service worker still exists or not. + checkValidServiceWorker(swUrl, config); + + // Add some additional logging to localhost, pointing developers to the + // service worker/PWA documentation. + navigator.serviceWorker.ready.then(() => { + console.log( + 'This web app is being served cache-first by a service ' + + 'worker. To learn more, visit https://bit.ly/CRA-PWA' + ); + }); + } else { + // Is not localhost. Just register service worker + registerValidSW(swUrl, config); + } + }); + } +} + +function registerValidSW(swUrl: string, config?: Config) { + navigator.serviceWorker + .register(swUrl) + .then(registration => { + registration.onupdatefound = () => { + const installingWorker = registration.installing; + if (installingWorker == null) { + return; + } + installingWorker.onstatechange = () => { + if (installingWorker.state === 'installed') { + if (navigator.serviceWorker.controller) { + // At this point, the updated precached content has been fetched, + // but the previous service worker will still serve the older + // content until all client tabs are closed. + console.log( + 'New content is available and will be used when all ' + + 'tabs for this page are closed. See https://bit.ly/CRA-PWA.' + ); + + // Execute callback + if (config && config.onUpdate) { + config.onUpdate(registration); + } + } else { + // At this point, everything has been precached. + // It's the perfect time to display a + // "Content is cached for offline use." message. + console.log('Content is cached for offline use.'); + + // Execute callback + if (config && config.onSuccess) { + config.onSuccess(registration); + } + } + } + }; + }; + }) + .catch(error => { + console.error('Error during service worker registration:', error); + }); +} + +function checkValidServiceWorker(swUrl: string, config?: Config) { + // Check if the service worker can be found. If it can't reload the page. + fetch(swUrl) + .then(response => { + // Ensure service worker exists, and that we really are getting a JS file. + const contentType = response.headers.get('content-type'); + if ( + response.status === 404 || + (contentType != null && contentType.indexOf('javascript') === -1) + ) { + // No service worker found. Probably a different app. Reload the page. + navigator.serviceWorker.ready.then(registration => { + registration.unregister().then(() => { + window.location.reload(); + }); + }); + } else { + // Service worker found. Proceed as normal. + registerValidSW(swUrl, config); + } + }) + .catch(() => { + console.log( + 'No internet connection found. App is running in offline mode.' + ); + }); +} + +export function unregister() { + if ('serviceWorker' in navigator) { + navigator.serviceWorker.ready.then(registration => { + registration.unregister(); + }); + } +} diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/ContainerBrowser/ContainerBrowser.tsx b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/ContainerBrowser/ContainerBrowser.tsx new file mode 100644 index 0000000000000..981f767994a7c --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/ContainerBrowser/ContainerBrowser.tsx @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; + +export const ContainerBrowser:React.FC= () => { + return ( + +
    +
    + Container Browser +
    +
    + Container Browser content +
    +
    + ); +}; diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Dashboard/Dashboard.tsx b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Dashboard/Dashboard.tsx new file mode 100644 index 0000000000000..682d5997b73b1 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Dashboard/Dashboard.tsx @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; + +export const Dashboard:React.FC= () => { + return ( +
    +
    + Dashboard +
    +
    + Dashboard content +
    +
    + ); +}; diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/NotFound/NotFound.tsx b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/NotFound/NotFound.tsx new file mode 100644 index 0000000000000..5bc27cbc2d74b --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/NotFound/NotFound.tsx @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; + +export const NotFound:React.FC= () => { + return ( +
    +
    + 404 Page Not Found :( +
    +
    + ); +}; diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/tsconfig.json b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/tsconfig.json new file mode 100644 index 0000000000000..96c8b91945bd1 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/tsconfig.json @@ -0,0 +1,27 @@ +{ + "compilerOptions": { + "target": "es5", + "lib": [ + "dom", + "dom.iterable", + "esnext" + ], + "allowJs": true, + "skipLibCheck": true, + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "strict": true, + "forceConsistentCasingInFileNames": true, + "module": "esnext", + "moduleResolution": "node", + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "preserve", + "rootDir": "src", + "baseUrl": "src" + }, + "include": [ + "src" + ] +} diff --git a/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/yarn.lock b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/yarn.lock new file mode 100644 index 0000000000000..1236fed636419 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/yarn.lock @@ -0,0 +1,9638 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@ant-design/create-react-context@^0.2.4": + version "0.2.4" + resolved "https://registry.yarnpkg.com/@ant-design/create-react-context/-/create-react-context-0.2.4.tgz#0fe9adad030350c0c9bb296dd6dcf5a8a36bd425" + dependencies: + gud "^1.0.0" + warning "^4.0.3" + +"@ant-design/icons-react@~1.1.5": + version "1.1.5" + resolved "https://registry.yarnpkg.com/@ant-design/icons-react/-/icons-react-1.1.5.tgz#1b03da8dcced2a4bb982ef7b25c1d24014c35a68" + dependencies: + ant-design-palettes "^1.1.3" + babel-runtime "^6.26.0" + +"@ant-design/icons@~1.2.0": + version "1.2.1" + resolved "https://registry.yarnpkg.com/@ant-design/icons/-/icons-1.2.1.tgz#8e19301b1433ec67d6bbd0e892782e2ade561ff9" + +"@babel/code-frame@7.0.0", "@babel/code-frame@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.0.0.tgz#06e2ab19bdb535385559aabb5ba59729482800f8" + dependencies: + "@babel/highlight" "^7.0.0" + +"@babel/core@7.4.3", "@babel/core@^7.1.0", "@babel/core@^7.1.6", "@babel/core@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.4.3.tgz#198d6d3af4567be3989550d97e068de94503074f" + dependencies: + "@babel/code-frame" "^7.0.0" + "@babel/generator" "^7.4.0" + "@babel/helpers" "^7.4.3" + "@babel/parser" "^7.4.3" + "@babel/template" "^7.4.0" + "@babel/traverse" "^7.4.3" + "@babel/types" "^7.4.0" + convert-source-map "^1.1.0" + debug "^4.1.0" + json5 "^2.1.0" + lodash "^4.17.11" + resolve "^1.3.2" + semver "^5.4.1" + source-map "^0.5.0" + +"@babel/generator@^7.0.0", "@babel/generator@^7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.4.0.tgz#c230e79589ae7a729fd4631b9ded4dc220418196" + dependencies: + "@babel/types" "^7.4.0" + jsesc "^2.5.1" + lodash "^4.17.11" + source-map "^0.5.0" + trim-right "^1.0.1" + +"@babel/generator@^7.4.4": + version "7.4.4" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.4.4.tgz#174a215eb843fc392c7edcaabeaa873de6e8f041" + dependencies: + "@babel/types" "^7.4.4" + jsesc "^2.5.1" + lodash "^4.17.11" + source-map "^0.5.0" + trim-right "^1.0.1" + +"@babel/helper-annotate-as-pure@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.0.0.tgz#323d39dd0b50e10c7c06ca7d7638e6864d8c5c32" + dependencies: + "@babel/types" "^7.0.0" + +"@babel/helper-builder-binary-assignment-operator-visitor@^7.1.0": + version "7.1.0" + resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.1.0.tgz#6b69628dfe4087798e0c4ed98e3d4a6b2fbd2f5f" + dependencies: + "@babel/helper-explode-assignable-expression" "^7.1.0" + "@babel/types" "^7.0.0" + +"@babel/helper-builder-react-jsx@^7.3.0": + version "7.3.0" + resolved "https://registry.yarnpkg.com/@babel/helper-builder-react-jsx/-/helper-builder-react-jsx-7.3.0.tgz#a1ac95a5d2b3e88ae5e54846bf462eeb81b318a4" + dependencies: + "@babel/types" "^7.3.0" + esutils "^2.0.0" + +"@babel/helper-call-delegate@^7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/helper-call-delegate/-/helper-call-delegate-7.4.0.tgz#f308eabe0d44f451217853aedf4dea5f6fe3294f" + dependencies: + "@babel/helper-hoist-variables" "^7.4.0" + "@babel/traverse" "^7.4.0" + "@babel/types" "^7.4.0" + +"@babel/helper-create-class-features-plugin@^7.4.0": + version "7.4.4" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.4.4.tgz#fc3d690af6554cc9efc607364a82d48f58736dba" + dependencies: + "@babel/helper-function-name" "^7.1.0" + "@babel/helper-member-expression-to-functions" "^7.0.0" + "@babel/helper-optimise-call-expression" "^7.0.0" + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-replace-supers" "^7.4.4" + "@babel/helper-split-export-declaration" "^7.4.4" + +"@babel/helper-define-map@^7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/helper-define-map/-/helper-define-map-7.4.0.tgz#cbfd8c1b2f12708e262c26f600cd16ed6a3bc6c9" + dependencies: + "@babel/helper-function-name" "^7.1.0" + "@babel/types" "^7.4.0" + lodash "^4.17.11" + +"@babel/helper-explode-assignable-expression@^7.1.0": + version "7.1.0" + resolved "https://registry.yarnpkg.com/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.1.0.tgz#537fa13f6f1674df745b0c00ec8fe4e99681c8f6" + dependencies: + "@babel/traverse" "^7.1.0" + "@babel/types" "^7.0.0" + +"@babel/helper-function-name@^7.1.0": + version "7.1.0" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.1.0.tgz#a0ceb01685f73355d4360c1247f582bfafc8ff53" + dependencies: + "@babel/helper-get-function-arity" "^7.0.0" + "@babel/template" "^7.1.0" + "@babel/types" "^7.0.0" + +"@babel/helper-get-function-arity@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.0.0.tgz#83572d4320e2a4657263734113c42868b64e49c3" + dependencies: + "@babel/types" "^7.0.0" + +"@babel/helper-hoist-variables@^7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.4.0.tgz#25b621399ae229869329730a62015bbeb0a6fbd6" + dependencies: + "@babel/types" "^7.4.0" + +"@babel/helper-member-expression-to-functions@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.0.0.tgz#8cd14b0a0df7ff00f009e7d7a436945f47c7a16f" + dependencies: + "@babel/types" "^7.0.0" + +"@babel/helper-module-imports@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.0.0.tgz#96081b7111e486da4d2cd971ad1a4fe216cc2e3d" + dependencies: + "@babel/types" "^7.0.0" + +"@babel/helper-module-transforms@^7.1.0", "@babel/helper-module-transforms@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.4.3.tgz#b1e357a1c49e58a47211a6853abb8e2aaefeb064" + dependencies: + "@babel/helper-module-imports" "^7.0.0" + "@babel/helper-simple-access" "^7.1.0" + "@babel/helper-split-export-declaration" "^7.0.0" + "@babel/template" "^7.2.2" + "@babel/types" "^7.2.2" + lodash "^4.17.11" + +"@babel/helper-optimise-call-expression@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.0.0.tgz#a2920c5702b073c15de51106200aa8cad20497d5" + dependencies: + "@babel/types" "^7.0.0" + +"@babel/helper-plugin-utils@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.0.0.tgz#bbb3fbee98661c569034237cc03967ba99b4f250" + +"@babel/helper-regex@^7.0.0", "@babel/helper-regex@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/helper-regex/-/helper-regex-7.4.3.tgz#9d6e5428bfd638ab53b37ae4ec8caf0477495147" + dependencies: + lodash "^4.17.11" + +"@babel/helper-remap-async-to-generator@^7.1.0": + version "7.1.0" + resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.1.0.tgz#361d80821b6f38da75bd3f0785ece20a88c5fe7f" + dependencies: + "@babel/helper-annotate-as-pure" "^7.0.0" + "@babel/helper-wrap-function" "^7.1.0" + "@babel/template" "^7.1.0" + "@babel/traverse" "^7.1.0" + "@babel/types" "^7.0.0" + +"@babel/helper-replace-supers@^7.1.0", "@babel/helper-replace-supers@^7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.4.0.tgz#4f56adb6aedcd449d2da9399c2dcf0545463b64c" + dependencies: + "@babel/helper-member-expression-to-functions" "^7.0.0" + "@babel/helper-optimise-call-expression" "^7.0.0" + "@babel/traverse" "^7.4.0" + "@babel/types" "^7.4.0" + +"@babel/helper-replace-supers@^7.4.4": + version "7.4.4" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.4.4.tgz#aee41783ebe4f2d3ab3ae775e1cc6f1a90cefa27" + dependencies: + "@babel/helper-member-expression-to-functions" "^7.0.0" + "@babel/helper-optimise-call-expression" "^7.0.0" + "@babel/traverse" "^7.4.4" + "@babel/types" "^7.4.4" + +"@babel/helper-simple-access@^7.1.0": + version "7.1.0" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.1.0.tgz#65eeb954c8c245beaa4e859da6188f39d71e585c" + dependencies: + "@babel/template" "^7.1.0" + "@babel/types" "^7.0.0" + +"@babel/helper-split-export-declaration@^7.0.0", "@babel/helper-split-export-declaration@^7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.4.0.tgz#571bfd52701f492920d63b7f735030e9a3e10b55" + dependencies: + "@babel/types" "^7.4.0" + +"@babel/helper-split-export-declaration@^7.4.4": + version "7.4.4" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.4.4.tgz#ff94894a340be78f53f06af038b205c49d993677" + dependencies: + "@babel/types" "^7.4.4" + +"@babel/helper-wrap-function@^7.1.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.2.0.tgz#c4e0012445769e2815b55296ead43a958549f6fa" + dependencies: + "@babel/helper-function-name" "^7.1.0" + "@babel/template" "^7.1.0" + "@babel/traverse" "^7.1.0" + "@babel/types" "^7.2.0" + +"@babel/helpers@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.4.3.tgz#7b1d354363494b31cb9a2417ae86af32b7853a3b" + dependencies: + "@babel/template" "^7.4.0" + "@babel/traverse" "^7.4.3" + "@babel/types" "^7.4.0" + +"@babel/highlight@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.0.0.tgz#f710c38c8d458e6dd9a201afb637fcb781ce99e4" + dependencies: + chalk "^2.0.0" + esutils "^2.0.2" + js-tokens "^4.0.0" + +"@babel/parser@^7.0.0", "@babel/parser@^7.1.0", "@babel/parser@^7.4.0", "@babel/parser@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.4.3.tgz#eb3ac80f64aa101c907d4ce5406360fe75b7895b" + +"@babel/parser@^7.4.4": + version "7.4.4" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.4.4.tgz#5977129431b8fe33471730d255ce8654ae1250b6" + +"@babel/plugin-proposal-async-generator-functions@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.2.0.tgz#b289b306669dce4ad20b0252889a15768c9d417e" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-remap-async-to-generator" "^7.1.0" + "@babel/plugin-syntax-async-generators" "^7.2.0" + +"@babel/plugin-proposal-class-properties@7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.4.0.tgz#d70db61a2f1fd79de927eea91f6411c964e084b8" + dependencies: + "@babel/helper-create-class-features-plugin" "^7.4.0" + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-proposal-decorators@7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.4.0.tgz#8e1bfd83efa54a5f662033afcc2b8e701f4bb3a9" + dependencies: + "@babel/helper-create-class-features-plugin" "^7.4.0" + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-syntax-decorators" "^7.2.0" + +"@babel/plugin-proposal-json-strings@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.2.0.tgz#568ecc446c6148ae6b267f02551130891e29f317" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-syntax-json-strings" "^7.2.0" + +"@babel/plugin-proposal-object-rest-spread@7.4.3", "@babel/plugin-proposal-object-rest-spread@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.4.3.tgz#be27cd416eceeba84141305b93c282f5de23bbb4" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-syntax-object-rest-spread" "^7.2.0" + +"@babel/plugin-proposal-optional-catch-binding@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.2.0.tgz#135d81edb68a081e55e56ec48541ece8065c38f5" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-syntax-optional-catch-binding" "^7.2.0" + +"@babel/plugin-proposal-unicode-property-regex@^7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.4.0.tgz#202d91ee977d760ef83f4f416b280d568be84623" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-regex" "^7.0.0" + regexpu-core "^4.5.4" + +"@babel/plugin-syntax-async-generators@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.2.0.tgz#69e1f0db34c6f5a0cf7e2b3323bf159a76c8cb7f" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-syntax-decorators@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.2.0.tgz#c50b1b957dcc69e4b1127b65e1c33eef61570c1b" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-syntax-dynamic-import@7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.2.0.tgz#69c159ffaf4998122161ad8ebc5e6d1f55df8612" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-syntax-flow@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.2.0.tgz#a765f061f803bc48f240c26f8747faf97c26bf7c" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-syntax-json-strings@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.2.0.tgz#72bd13f6ffe1d25938129d2a186b11fd62951470" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-syntax-jsx@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.2.0.tgz#0b85a3b4bc7cdf4cc4b8bf236335b907ca22e7c7" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-syntax-object-rest-spread@^7.0.0", "@babel/plugin-syntax-object-rest-spread@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.2.0.tgz#3b7a3e733510c57e820b9142a6579ac8b0dfad2e" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-syntax-optional-catch-binding@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.2.0.tgz#a94013d6eda8908dfe6a477e7f9eda85656ecf5c" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-syntax-typescript@^7.2.0": + version "7.3.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.3.3.tgz#a7cc3f66119a9f7ebe2de5383cce193473d65991" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-arrow-functions@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.2.0.tgz#9aeafbe4d6ffc6563bf8f8372091628f00779550" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-async-to-generator@^7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.4.0.tgz#234fe3e458dce95865c0d152d256119b237834b0" + dependencies: + "@babel/helper-module-imports" "^7.0.0" + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-remap-async-to-generator" "^7.1.0" + +"@babel/plugin-transform-block-scoped-functions@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.2.0.tgz#5d3cc11e8d5ddd752aa64c9148d0db6cb79fd190" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-block-scoping@^7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.4.0.tgz#164df3bb41e3deb954c4ca32ffa9fcaa56d30bcb" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + lodash "^4.17.11" + +"@babel/plugin-transform-classes@7.4.3", "@babel/plugin-transform-classes@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.4.3.tgz#adc7a1137ab4287a555d429cc56ecde8f40c062c" + dependencies: + "@babel/helper-annotate-as-pure" "^7.0.0" + "@babel/helper-define-map" "^7.4.0" + "@babel/helper-function-name" "^7.1.0" + "@babel/helper-optimise-call-expression" "^7.0.0" + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-replace-supers" "^7.4.0" + "@babel/helper-split-export-declaration" "^7.4.0" + globals "^11.1.0" + +"@babel/plugin-transform-computed-properties@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.2.0.tgz#83a7df6a658865b1c8f641d510c6f3af220216da" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-destructuring@7.4.3", "@babel/plugin-transform-destructuring@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.4.3.tgz#1a95f5ca2bf2f91ef0648d5de38a8d472da4350f" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-dotall-regex@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.4.3.tgz#fceff1c16d00c53d32d980448606f812cd6d02bf" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-regex" "^7.4.3" + regexpu-core "^4.5.4" + +"@babel/plugin-transform-duplicate-keys@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.2.0.tgz#d952c4930f312a4dbfff18f0b2914e60c35530b3" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-exponentiation-operator@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.2.0.tgz#a63868289e5b4007f7054d46491af51435766008" + dependencies: + "@babel/helper-builder-binary-assignment-operator-visitor" "^7.1.0" + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-flow-strip-types@7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-flow-strip-types/-/plugin-transform-flow-strip-types-7.4.0.tgz#f3c59eecff68c99b9c96eaafe4fe9d1fa8947138" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-syntax-flow" "^7.2.0" + +"@babel/plugin-transform-for-of@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.4.3.tgz#c36ff40d893f2b8352202a2558824f70cd75e9fe" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-function-name@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.4.3.tgz#130c27ec7fb4f0cba30e958989449e5ec8d22bbd" + dependencies: + "@babel/helper-function-name" "^7.1.0" + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-literals@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.2.0.tgz#690353e81f9267dad4fd8cfd77eafa86aba53ea1" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-member-expression-literals@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.2.0.tgz#fa10aa5c58a2cb6afcf2c9ffa8cb4d8b3d489a2d" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-modules-amd@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.2.0.tgz#82a9bce45b95441f617a24011dc89d12da7f4ee6" + dependencies: + "@babel/helper-module-transforms" "^7.1.0" + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-modules-commonjs@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.4.3.tgz#3917f260463ac08f8896aa5bd54403f6e1fed165" + dependencies: + "@babel/helper-module-transforms" "^7.4.3" + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-simple-access" "^7.1.0" + +"@babel/plugin-transform-modules-systemjs@^7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.4.0.tgz#c2495e55528135797bc816f5d50f851698c586a1" + dependencies: + "@babel/helper-hoist-variables" "^7.4.0" + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-modules-umd@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.2.0.tgz#7678ce75169f0877b8eb2235538c074268dd01ae" + dependencies: + "@babel/helper-module-transforms" "^7.1.0" + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-named-capturing-groups-regex@^7.4.2": + version "7.4.2" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.4.2.tgz#800391136d6cbcc80728dbdba3c1c6e46f86c12e" + dependencies: + regexp-tree "^0.1.0" + +"@babel/plugin-transform-new-target@^7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.4.0.tgz#67658a1d944edb53c8d4fa3004473a0dd7838150" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-object-super@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.2.0.tgz#b35d4c10f56bab5d650047dad0f1d8e8814b6598" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-replace-supers" "^7.1.0" + +"@babel/plugin-transform-parameters@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.4.3.tgz#e5ff62929fdf4cf93e58badb5e2430303003800d" + dependencies: + "@babel/helper-call-delegate" "^7.4.0" + "@babel/helper-get-function-arity" "^7.0.0" + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-property-literals@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.2.0.tgz#03e33f653f5b25c4eb572c98b9485055b389e905" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-react-constant-elements@7.2.0", "@babel/plugin-transform-react-constant-elements@^7.0.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.2.0.tgz#ed602dc2d8bff2f0cb1a5ce29263dbdec40779f7" + dependencies: + "@babel/helper-annotate-as-pure" "^7.0.0" + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-react-display-name@7.2.0", "@babel/plugin-transform-react-display-name@^7.0.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.2.0.tgz#ebfaed87834ce8dc4279609a4f0c324c156e3eb0" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-react-jsx-self@^7.0.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.2.0.tgz#461e21ad9478f1031dd5e276108d027f1b5240ba" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-syntax-jsx" "^7.2.0" + +"@babel/plugin-transform-react-jsx-source@^7.0.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.2.0.tgz#20c8c60f0140f5dd3cd63418d452801cf3f7180f" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-syntax-jsx" "^7.2.0" + +"@babel/plugin-transform-react-jsx@^7.0.0": + version "7.3.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.3.0.tgz#f2cab99026631c767e2745a5368b331cfe8f5290" + dependencies: + "@babel/helper-builder-react-jsx" "^7.3.0" + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-syntax-jsx" "^7.2.0" + +"@babel/plugin-transform-regenerator@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.4.3.tgz#2a697af96887e2bbf5d303ab0221d139de5e739c" + dependencies: + regenerator-transform "^0.13.4" + +"@babel/plugin-transform-reserved-words@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.2.0.tgz#4792af87c998a49367597d07fedf02636d2e1634" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-runtime@7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.4.3.tgz#4d6691690ecdc9f5cb8c3ab170a1576c1f556371" + dependencies: + "@babel/helper-module-imports" "^7.0.0" + "@babel/helper-plugin-utils" "^7.0.0" + resolve "^1.8.1" + semver "^5.5.1" + +"@babel/plugin-transform-shorthand-properties@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.2.0.tgz#6333aee2f8d6ee7e28615457298934a3b46198f0" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-spread@^7.2.0": + version "7.2.2" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.2.2.tgz#3103a9abe22f742b6d406ecd3cd49b774919b406" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-sticky-regex@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.2.0.tgz#a1e454b5995560a9c1e0d537dfc15061fd2687e1" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-regex" "^7.0.0" + +"@babel/plugin-transform-template-literals@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.2.0.tgz#d87ed01b8eaac7a92473f608c97c089de2ba1e5b" + dependencies: + "@babel/helper-annotate-as-pure" "^7.0.0" + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-typeof-symbol@^7.2.0": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.2.0.tgz#117d2bcec2fbf64b4b59d1f9819894682d29f2b2" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + +"@babel/plugin-transform-typescript@^7.3.2": + version "7.4.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.4.4.tgz#93e9c3f2a546e6d3da1e9cc990e30791b807aa9f" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-syntax-typescript" "^7.2.0" + +"@babel/plugin-transform-unicode-regex@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.4.3.tgz#3868703fc0e8f443dda65654b298df576f7b863b" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-regex" "^7.4.3" + regexpu-core "^4.5.4" + +"@babel/preset-env@7.4.3", "@babel/preset-env@^7.1.6": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.4.3.tgz#e71e16e123dc0fbf65a52cbcbcefd072fbd02880" + dependencies: + "@babel/helper-module-imports" "^7.0.0" + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-proposal-async-generator-functions" "^7.2.0" + "@babel/plugin-proposal-json-strings" "^7.2.0" + "@babel/plugin-proposal-object-rest-spread" "^7.4.3" + "@babel/plugin-proposal-optional-catch-binding" "^7.2.0" + "@babel/plugin-proposal-unicode-property-regex" "^7.4.0" + "@babel/plugin-syntax-async-generators" "^7.2.0" + "@babel/plugin-syntax-json-strings" "^7.2.0" + "@babel/plugin-syntax-object-rest-spread" "^7.2.0" + "@babel/plugin-syntax-optional-catch-binding" "^7.2.0" + "@babel/plugin-transform-arrow-functions" "^7.2.0" + "@babel/plugin-transform-async-to-generator" "^7.4.0" + "@babel/plugin-transform-block-scoped-functions" "^7.2.0" + "@babel/plugin-transform-block-scoping" "^7.4.0" + "@babel/plugin-transform-classes" "^7.4.3" + "@babel/plugin-transform-computed-properties" "^7.2.0" + "@babel/plugin-transform-destructuring" "^7.4.3" + "@babel/plugin-transform-dotall-regex" "^7.4.3" + "@babel/plugin-transform-duplicate-keys" "^7.2.0" + "@babel/plugin-transform-exponentiation-operator" "^7.2.0" + "@babel/plugin-transform-for-of" "^7.4.3" + "@babel/plugin-transform-function-name" "^7.4.3" + "@babel/plugin-transform-literals" "^7.2.0" + "@babel/plugin-transform-member-expression-literals" "^7.2.0" + "@babel/plugin-transform-modules-amd" "^7.2.0" + "@babel/plugin-transform-modules-commonjs" "^7.4.3" + "@babel/plugin-transform-modules-systemjs" "^7.4.0" + "@babel/plugin-transform-modules-umd" "^7.2.0" + "@babel/plugin-transform-named-capturing-groups-regex" "^7.4.2" + "@babel/plugin-transform-new-target" "^7.4.0" + "@babel/plugin-transform-object-super" "^7.2.0" + "@babel/plugin-transform-parameters" "^7.4.3" + "@babel/plugin-transform-property-literals" "^7.2.0" + "@babel/plugin-transform-regenerator" "^7.4.3" + "@babel/plugin-transform-reserved-words" "^7.2.0" + "@babel/plugin-transform-shorthand-properties" "^7.2.0" + "@babel/plugin-transform-spread" "^7.2.0" + "@babel/plugin-transform-sticky-regex" "^7.2.0" + "@babel/plugin-transform-template-literals" "^7.2.0" + "@babel/plugin-transform-typeof-symbol" "^7.2.0" + "@babel/plugin-transform-unicode-regex" "^7.4.3" + "@babel/types" "^7.4.0" + browserslist "^4.5.2" + core-js-compat "^3.0.0" + invariant "^2.2.2" + js-levenshtein "^1.1.3" + semver "^5.5.0" + +"@babel/preset-react@7.0.0", "@babel/preset-react@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.0.0.tgz#e86b4b3d99433c7b3e9e91747e2653958bc6b3c0" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-transform-react-display-name" "^7.0.0" + "@babel/plugin-transform-react-jsx" "^7.0.0" + "@babel/plugin-transform-react-jsx-self" "^7.0.0" + "@babel/plugin-transform-react-jsx-source" "^7.0.0" + +"@babel/preset-typescript@7.3.3": + version "7.3.3" + resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.3.3.tgz#88669911053fa16b2b276ea2ede2ca603b3f307a" + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-transform-typescript" "^7.3.2" + +"@babel/runtime@7.4.3", "@babel/runtime@^7.0.0", "@babel/runtime@^7.3.4": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.4.3.tgz#79888e452034223ad9609187a0ad1fe0d2ad4bdc" + dependencies: + regenerator-runtime "^0.13.2" + +"@babel/runtime@^7.1.2", "@babel/runtime@^7.4.2": + version "7.4.4" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.4.4.tgz#dc2e34982eb236803aa27a07fea6857af1b9171d" + dependencies: + regenerator-runtime "^0.13.2" + +"@babel/template@^7.0.0", "@babel/template@^7.1.0", "@babel/template@^7.2.2", "@babel/template@^7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.4.0.tgz#12474e9c077bae585c5d835a95c0b0b790c25c8b" + dependencies: + "@babel/code-frame" "^7.0.0" + "@babel/parser" "^7.4.0" + "@babel/types" "^7.4.0" + +"@babel/traverse@^7.0.0", "@babel/traverse@^7.1.0", "@babel/traverse@^7.4.0", "@babel/traverse@^7.4.3": + version "7.4.3" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.4.3.tgz#1a01f078fc575d589ff30c0f71bf3c3d9ccbad84" + dependencies: + "@babel/code-frame" "^7.0.0" + "@babel/generator" "^7.4.0" + "@babel/helper-function-name" "^7.1.0" + "@babel/helper-split-export-declaration" "^7.4.0" + "@babel/parser" "^7.4.3" + "@babel/types" "^7.4.0" + debug "^4.1.0" + globals "^11.1.0" + lodash "^4.17.11" + +"@babel/traverse@^7.4.4": + version "7.4.4" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.4.4.tgz#0776f038f6d78361860b6823887d4f3937133fe8" + dependencies: + "@babel/code-frame" "^7.0.0" + "@babel/generator" "^7.4.4" + "@babel/helper-function-name" "^7.1.0" + "@babel/helper-split-export-declaration" "^7.4.4" + "@babel/parser" "^7.4.4" + "@babel/types" "^7.4.4" + debug "^4.1.0" + globals "^11.1.0" + lodash "^4.17.11" + +"@babel/types@^7.0.0", "@babel/types@^7.2.0", "@babel/types@^7.2.2", "@babel/types@^7.3.0", "@babel/types@^7.4.0": + version "7.4.0" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.4.0.tgz#670724f77d24cce6cc7d8cf64599d511d164894c" + dependencies: + esutils "^2.0.2" + lodash "^4.17.11" + to-fast-properties "^2.0.0" + +"@babel/types@^7.4.4": + version "7.4.4" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.4.4.tgz#8db9e9a629bb7c29370009b4b779ed93fe57d5f0" + dependencies: + esutils "^2.0.2" + lodash "^4.17.11" + to-fast-properties "^2.0.0" + +"@cnakazawa/watch@^1.0.3": + version "1.0.3" + resolved "https://registry.yarnpkg.com/@cnakazawa/watch/-/watch-1.0.3.tgz#099139eaec7ebf07a27c1786a3ff64f39464d2ef" + dependencies: + exec-sh "^0.3.2" + minimist "^1.2.0" + +"@csstools/convert-colors@^1.4.0": + version "1.4.0" + resolved "https://registry.yarnpkg.com/@csstools/convert-colors/-/convert-colors-1.4.0.tgz#ad495dc41b12e75d588c6db8b9834f08fa131eb7" + +"@csstools/normalize.css@^9.0.1": + version "9.0.1" + resolved "https://registry.yarnpkg.com/@csstools/normalize.css/-/normalize.css-9.0.1.tgz#c27b391d8457d1e893f1eddeaf5e5412d12ffbb5" + +"@jest/console@^24.7.1": + version "24.7.1" + resolved "https://registry.yarnpkg.com/@jest/console/-/console-24.7.1.tgz#32a9e42535a97aedfe037e725bd67e954b459545" + dependencies: + "@jest/source-map" "^24.3.0" + chalk "^2.0.1" + slash "^2.0.0" + +"@jest/core@^24.7.1": + version "24.7.1" + resolved "https://registry.yarnpkg.com/@jest/core/-/core-24.7.1.tgz#6707f50db238d0c5988860680e2e414df0032024" + dependencies: + "@jest/console" "^24.7.1" + "@jest/reporters" "^24.7.1" + "@jest/test-result" "^24.7.1" + "@jest/transform" "^24.7.1" + "@jest/types" "^24.7.0" + ansi-escapes "^3.0.0" + chalk "^2.0.1" + exit "^0.1.2" + graceful-fs "^4.1.15" + jest-changed-files "^24.7.0" + jest-config "^24.7.1" + jest-haste-map "^24.7.1" + jest-message-util "^24.7.1" + jest-regex-util "^24.3.0" + jest-resolve-dependencies "^24.7.1" + jest-runner "^24.7.1" + jest-runtime "^24.7.1" + jest-snapshot "^24.7.1" + jest-util "^24.7.1" + jest-validate "^24.7.0" + jest-watcher "^24.7.1" + micromatch "^3.1.10" + p-each-series "^1.0.0" + pirates "^4.0.1" + realpath-native "^1.1.0" + rimraf "^2.5.4" + strip-ansi "^5.0.0" + +"@jest/environment@^24.7.1": + version "24.7.1" + resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-24.7.1.tgz#9b9196bc737561f67ac07817d4c5ece772e33135" + dependencies: + "@jest/fake-timers" "^24.7.1" + "@jest/transform" "^24.7.1" + "@jest/types" "^24.7.0" + jest-mock "^24.7.0" + +"@jest/fake-timers@^24.7.1": + version "24.7.1" + resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-24.7.1.tgz#56e5d09bdec09ee81050eaff2794b26c71d19db2" + dependencies: + "@jest/types" "^24.7.0" + jest-message-util "^24.7.1" + jest-mock "^24.7.0" + +"@jest/reporters@^24.7.1": + version "24.7.1" + resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-24.7.1.tgz#38ac0b096cd691bbbe3051ddc25988d42e37773a" + dependencies: + "@jest/environment" "^24.7.1" + "@jest/test-result" "^24.7.1" + "@jest/transform" "^24.7.1" + "@jest/types" "^24.7.0" + chalk "^2.0.1" + exit "^0.1.2" + glob "^7.1.2" + istanbul-api "^2.1.1" + istanbul-lib-coverage "^2.0.2" + istanbul-lib-instrument "^3.0.1" + istanbul-lib-source-maps "^3.0.1" + jest-haste-map "^24.7.1" + jest-resolve "^24.7.1" + jest-runtime "^24.7.1" + jest-util "^24.7.1" + jest-worker "^24.6.0" + node-notifier "^5.2.1" + slash "^2.0.0" + source-map "^0.6.0" + string-length "^2.0.0" + +"@jest/source-map@^24.3.0": + version "24.3.0" + resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-24.3.0.tgz#563be3aa4d224caf65ff77edc95cd1ca4da67f28" + dependencies: + callsites "^3.0.0" + graceful-fs "^4.1.15" + source-map "^0.6.0" + +"@jest/test-result@^24.7.1": + version "24.7.1" + resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-24.7.1.tgz#19eacdb29a114300aed24db651e5d975f08b6bbe" + dependencies: + "@jest/console" "^24.7.1" + "@jest/types" "^24.7.0" + "@types/istanbul-lib-coverage" "^2.0.0" + +"@jest/test-sequencer@^24.7.1": + version "24.7.1" + resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-24.7.1.tgz#9c18e428e1ad945fa74f6233a9d35745ca0e63e0" + dependencies: + "@jest/test-result" "^24.7.1" + jest-haste-map "^24.7.1" + jest-runner "^24.7.1" + jest-runtime "^24.7.1" + +"@jest/transform@^24.7.1": + version "24.7.1" + resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-24.7.1.tgz#872318f125bcfab2de11f53b465ab1aa780789c2" + dependencies: + "@babel/core" "^7.1.0" + "@jest/types" "^24.7.0" + babel-plugin-istanbul "^5.1.0" + chalk "^2.0.1" + convert-source-map "^1.4.0" + fast-json-stable-stringify "^2.0.0" + graceful-fs "^4.1.15" + jest-haste-map "^24.7.1" + jest-regex-util "^24.3.0" + jest-util "^24.7.1" + micromatch "^3.1.10" + realpath-native "^1.1.0" + slash "^2.0.0" + source-map "^0.6.1" + write-file-atomic "2.4.1" + +"@jest/types@^24.7.0": + version "24.7.0" + resolved "https://registry.yarnpkg.com/@jest/types/-/types-24.7.0.tgz#c4ec8d1828cdf23234d9b4ee31f5482a3f04f48b" + dependencies: + "@types/istanbul-lib-coverage" "^2.0.0" + "@types/yargs" "^12.0.9" + +"@mrmlnc/readdir-enhanced@^2.2.1": + version "2.2.1" + resolved "https://registry.yarnpkg.com/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz#524af240d1a360527b730475ecfa1344aa540dde" + dependencies: + call-me-maybe "^1.0.1" + glob-to-regexp "^0.3.0" + +"@nodelib/fs.stat@^1.1.2": + version "1.1.3" + resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz#2b5a3ab3f918cca48a8c754c08168e3f03eba61b" + +"@svgr/babel-plugin-add-jsx-attribute@^4.2.0": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-4.2.0.tgz#dadcb6218503532d6884b210e7f3c502caaa44b1" + +"@svgr/babel-plugin-remove-jsx-attribute@^4.2.0": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-4.2.0.tgz#297550b9a8c0c7337bea12bdfc8a80bb66f85abc" + +"@svgr/babel-plugin-remove-jsx-empty-expression@^4.2.0": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-4.2.0.tgz#c196302f3e68eab6a05e98af9ca8570bc13131c7" + +"@svgr/babel-plugin-replace-jsx-attribute-value@^4.2.0": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-4.2.0.tgz#310ec0775de808a6a2e4fd4268c245fd734c1165" + +"@svgr/babel-plugin-svg-dynamic-title@^4.2.0": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-4.2.0.tgz#43f0f689a5347a894160eb51b39a109889a4df20" + +"@svgr/babel-plugin-svg-em-dimensions@^4.2.0": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-4.2.0.tgz#9a94791c9a288108d20a9d2cc64cac820f141391" + +"@svgr/babel-plugin-transform-react-native-svg@^4.2.0": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-4.2.0.tgz#151487322843359a1ca86b21a3815fd21a88b717" + +"@svgr/babel-plugin-transform-svg-component@^4.2.0": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-4.2.0.tgz#5f1e2f886b2c85c67e76da42f0f6be1b1767b697" + +"@svgr/babel-preset@^4.2.0": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-preset/-/babel-preset-4.2.0.tgz#c9fc236445a02a8cd4e750085e51c181de00d6c5" + dependencies: + "@svgr/babel-plugin-add-jsx-attribute" "^4.2.0" + "@svgr/babel-plugin-remove-jsx-attribute" "^4.2.0" + "@svgr/babel-plugin-remove-jsx-empty-expression" "^4.2.0" + "@svgr/babel-plugin-replace-jsx-attribute-value" "^4.2.0" + "@svgr/babel-plugin-svg-dynamic-title" "^4.2.0" + "@svgr/babel-plugin-svg-em-dimensions" "^4.2.0" + "@svgr/babel-plugin-transform-react-native-svg" "^4.2.0" + "@svgr/babel-plugin-transform-svg-component" "^4.2.0" + +"@svgr/core@^4.1.0": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@svgr/core/-/core-4.2.0.tgz#f32ef8b9d05312aaa775896ec30ae46a6521e248" + dependencies: + "@svgr/plugin-jsx" "^4.2.0" + camelcase "^5.3.1" + cosmiconfig "^5.2.0" + +"@svgr/hast-util-to-babel-ast@^4.2.0": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-4.2.0.tgz#dd743435a5f3a8e84a1da067f27b5fae3d7b6b63" + dependencies: + "@babel/types" "^7.4.0" + +"@svgr/plugin-jsx@^4.1.0", "@svgr/plugin-jsx@^4.2.0": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@svgr/plugin-jsx/-/plugin-jsx-4.2.0.tgz#15a91562c9b5f90640ea0bdcb2ad59d692ee7ae9" + dependencies: + "@babel/core" "^7.4.3" + "@svgr/babel-preset" "^4.2.0" + "@svgr/hast-util-to-babel-ast" "^4.2.0" + rehype-parse "^6.0.0" + unified "^7.1.0" + vfile "^4.0.0" + +"@svgr/plugin-svgo@^4.0.3": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@svgr/plugin-svgo/-/plugin-svgo-4.2.0.tgz#2a594a2d3312955e75fd87dc77ae51f377c809f3" + dependencies: + cosmiconfig "^5.2.0" + merge-deep "^3.0.2" + svgo "^1.2.1" + +"@svgr/webpack@4.1.0": + version "4.1.0" + resolved "https://registry.yarnpkg.com/@svgr/webpack/-/webpack-4.1.0.tgz#20c88f32f731c7b1d4711045b2b993887d731c28" + dependencies: + "@babel/core" "^7.1.6" + "@babel/plugin-transform-react-constant-elements" "^7.0.0" + "@babel/preset-env" "^7.1.6" + "@babel/preset-react" "^7.0.0" + "@svgr/core" "^4.1.0" + "@svgr/plugin-jsx" "^4.1.0" + "@svgr/plugin-svgo" "^4.0.3" + loader-utils "^1.1.0" + +"@types/babel__core@^7.1.0": + version "7.1.1" + resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.1.1.tgz#ce9a9e5d92b7031421e1d0d74ae59f572ba48be6" + dependencies: + "@babel/parser" "^7.1.0" + "@babel/types" "^7.0.0" + "@types/babel__generator" "*" + "@types/babel__template" "*" + "@types/babel__traverse" "*" + +"@types/babel__generator@*": + version "7.0.2" + resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.0.2.tgz#d2112a6b21fad600d7674274293c85dce0cb47fc" + dependencies: + "@babel/types" "^7.0.0" + +"@types/babel__template@*": + version "7.0.2" + resolved "https://registry.yarnpkg.com/@types/babel__template/-/babel__template-7.0.2.tgz#4ff63d6b52eddac1de7b975a5223ed32ecea9307" + dependencies: + "@babel/parser" "^7.1.0" + "@babel/types" "^7.0.0" + +"@types/babel__traverse@*", "@types/babel__traverse@^7.0.6": + version "7.0.6" + resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.0.6.tgz#328dd1a8fc4cfe3c8458be9477b219ea158fd7b2" + dependencies: + "@babel/types" "^7.3.0" + +"@types/history@*": + version "4.7.2" + resolved "https://registry.yarnpkg.com/@types/history/-/history-4.7.2.tgz#0e670ea254d559241b6eeb3894f8754991e73220" + +"@types/hoist-non-react-statics@^3.3.1": + version "3.3.1" + resolved "https://registry.yarnpkg.com/@types/hoist-non-react-statics/-/hoist-non-react-statics-3.3.1.tgz#1124aafe5118cb591977aeb1ceaaed1070eb039f" + dependencies: + "@types/react" "*" + hoist-non-react-statics "^3.3.0" + +"@types/istanbul-lib-coverage@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.0.tgz#1eb8c033e98cf4e1a4cedcaf8bcafe8cb7591e85" + +"@types/jest-diff@*": + version "20.0.1" + resolved "https://registry.yarnpkg.com/@types/jest-diff/-/jest-diff-20.0.1.tgz#35cc15b9c4f30a18ef21852e255fdb02f6d59b89" + +"@types/jest@24.0.12": + version "24.0.12" + resolved "https://registry.yarnpkg.com/@types/jest/-/jest-24.0.12.tgz#0553dd0a5ac744e7dc4e8700da6d3baedbde3e8f" + dependencies: + "@types/jest-diff" "*" + +"@types/node@*": + version "11.13.6" + resolved "https://registry.yarnpkg.com/@types/node/-/node-11.13.6.tgz#37ec75690830acb0d74ce3c6c43caab787081e85" + +"@types/node@11.13.9": + version "11.13.9" + resolved "https://registry.yarnpkg.com/@types/node/-/node-11.13.9.tgz#f80697caca7f7fb2526527a5c5a2743487f05ccc" + +"@types/prop-types@*": + version "15.7.1" + resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.1.tgz#f1a11e7babb0c3cad68100be381d1e064c68f1f6" + +"@types/q@^1.5.1": + version "1.5.2" + resolved "https://registry.yarnpkg.com/@types/q/-/q-1.5.2.tgz#690a1475b84f2a884fd07cd797c00f5f31356ea8" + +"@types/react-dom@16.8.4": + version "16.8.4" + resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-16.8.4.tgz#7fb7ba368857c7aa0f4e4511c4710ca2c5a12a88" + dependencies: + "@types/react" "*" + +"@types/react-router-dom@^4.3.3": + version "4.3.3" + resolved "https://registry.yarnpkg.com/@types/react-router-dom/-/react-router-dom-4.3.3.tgz#7837e3e9fefbc84a8f6c8a51dca004f4e83e94e3" + dependencies: + "@types/history" "*" + "@types/react" "*" + "@types/react-router" "*" + +"@types/react-router@*": + version "5.0.0" + resolved "https://registry.yarnpkg.com/@types/react-router/-/react-router-5.0.0.tgz#22ae8f55d8af770ea1f755218936f01bfe1bfe27" + dependencies: + "@types/history" "*" + "@types/react" "*" + +"@types/react-slick@^0.23.3": + version "0.23.4" + resolved "https://registry.yarnpkg.com/@types/react-slick/-/react-slick-0.23.4.tgz#c97e2a9e7e3d1933c68593b8e82752fab1e8ce53" + dependencies: + "@types/react" "*" + +"@types/react@*", "@types/react@16.8.15": + version "16.8.15" + resolved "https://registry.yarnpkg.com/@types/react/-/react-16.8.15.tgz#a76515fed5aa3e996603056f54427fec5f2a5122" + dependencies: + "@types/prop-types" "*" + csstype "^2.2.0" + +"@types/stack-utils@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@types/stack-utils/-/stack-utils-1.0.1.tgz#0a851d3bd96498fa25c33ab7278ed3bd65f06c3e" + +"@types/unist@*", "@types/unist@^2.0.0", "@types/unist@^2.0.2": + version "2.0.3" + resolved "https://registry.yarnpkg.com/@types/unist/-/unist-2.0.3.tgz#9c088679876f374eb5983f150d4787aa6fb32d7e" + +"@types/vfile-message@*": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@types/vfile-message/-/vfile-message-1.0.1.tgz#e1e9895cc6b36c462d4244e64e6d0b6eaf65355a" + dependencies: + "@types/node" "*" + "@types/unist" "*" + +"@types/vfile@^3.0.0": + version "3.0.2" + resolved "https://registry.yarnpkg.com/@types/vfile/-/vfile-3.0.2.tgz#19c18cd232df11ce6fa6ad80259bc86c366b09b9" + dependencies: + "@types/node" "*" + "@types/unist" "*" + "@types/vfile-message" "*" + +"@types/yargs@^12.0.2", "@types/yargs@^12.0.9": + version "12.0.12" + resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-12.0.12.tgz#45dd1d0638e8c8f153e87d296907659296873916" + +"@typescript-eslint/eslint-plugin@1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-1.6.0.tgz#a5ff3128c692393fb16efa403ec7c8a5593dab0f" + dependencies: + "@typescript-eslint/parser" "1.6.0" + "@typescript-eslint/typescript-estree" "1.6.0" + requireindex "^1.2.0" + tsutils "^3.7.0" + +"@typescript-eslint/parser@1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-1.6.0.tgz#f01189c8b90848e3b8e45a6cdad27870529d1804" + dependencies: + "@typescript-eslint/typescript-estree" "1.6.0" + eslint-scope "^4.0.0" + eslint-visitor-keys "^1.0.0" + +"@typescript-eslint/typescript-estree@1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-1.6.0.tgz#6cf43a07fee08b8eb52e4513b428c8cdc9751ef0" + dependencies: + lodash.unescape "4.0.1" + semver "5.5.0" + +"@webassemblyjs/ast@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.8.5.tgz#51b1c5fe6576a34953bf4b253df9f0d490d9e359" + dependencies: + "@webassemblyjs/helper-module-context" "1.8.5" + "@webassemblyjs/helper-wasm-bytecode" "1.8.5" + "@webassemblyjs/wast-parser" "1.8.5" + +"@webassemblyjs/floating-point-hex-parser@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.8.5.tgz#1ba926a2923613edce496fd5b02e8ce8a5f49721" + +"@webassemblyjs/helper-api-error@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.8.5.tgz#c49dad22f645227c5edb610bdb9697f1aab721f7" + +"@webassemblyjs/helper-buffer@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.8.5.tgz#fea93e429863dd5e4338555f42292385a653f204" + +"@webassemblyjs/helper-code-frame@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.8.5.tgz#9a740ff48e3faa3022b1dff54423df9aa293c25e" + dependencies: + "@webassemblyjs/wast-printer" "1.8.5" + +"@webassemblyjs/helper-fsm@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-fsm/-/helper-fsm-1.8.5.tgz#ba0b7d3b3f7e4733da6059c9332275d860702452" + +"@webassemblyjs/helper-module-context@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-module-context/-/helper-module-context-1.8.5.tgz#def4b9927b0101dc8cbbd8d1edb5b7b9c82eb245" + dependencies: + "@webassemblyjs/ast" "1.8.5" + mamacro "^0.0.3" + +"@webassemblyjs/helper-wasm-bytecode@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.8.5.tgz#537a750eddf5c1e932f3744206551c91c1b93e61" + +"@webassemblyjs/helper-wasm-section@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.8.5.tgz#74ca6a6bcbe19e50a3b6b462847e69503e6bfcbf" + dependencies: + "@webassemblyjs/ast" "1.8.5" + "@webassemblyjs/helper-buffer" "1.8.5" + "@webassemblyjs/helper-wasm-bytecode" "1.8.5" + "@webassemblyjs/wasm-gen" "1.8.5" + +"@webassemblyjs/ieee754@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ieee754/-/ieee754-1.8.5.tgz#712329dbef240f36bf57bd2f7b8fb9bf4154421e" + dependencies: + "@xtuc/ieee754" "^1.2.0" + +"@webassemblyjs/leb128@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/leb128/-/leb128-1.8.5.tgz#044edeb34ea679f3e04cd4fd9824d5e35767ae10" + dependencies: + "@xtuc/long" "4.2.2" + +"@webassemblyjs/utf8@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/utf8/-/utf8-1.8.5.tgz#a8bf3b5d8ffe986c7c1e373ccbdc2a0915f0cedc" + +"@webassemblyjs/wasm-edit@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.8.5.tgz#962da12aa5acc1c131c81c4232991c82ce56e01a" + dependencies: + "@webassemblyjs/ast" "1.8.5" + "@webassemblyjs/helper-buffer" "1.8.5" + "@webassemblyjs/helper-wasm-bytecode" "1.8.5" + "@webassemblyjs/helper-wasm-section" "1.8.5" + "@webassemblyjs/wasm-gen" "1.8.5" + "@webassemblyjs/wasm-opt" "1.8.5" + "@webassemblyjs/wasm-parser" "1.8.5" + "@webassemblyjs/wast-printer" "1.8.5" + +"@webassemblyjs/wasm-gen@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.8.5.tgz#54840766c2c1002eb64ed1abe720aded714f98bc" + dependencies: + "@webassemblyjs/ast" "1.8.5" + "@webassemblyjs/helper-wasm-bytecode" "1.8.5" + "@webassemblyjs/ieee754" "1.8.5" + "@webassemblyjs/leb128" "1.8.5" + "@webassemblyjs/utf8" "1.8.5" + +"@webassemblyjs/wasm-opt@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.8.5.tgz#b24d9f6ba50394af1349f510afa8ffcb8a63d264" + dependencies: + "@webassemblyjs/ast" "1.8.5" + "@webassemblyjs/helper-buffer" "1.8.5" + "@webassemblyjs/wasm-gen" "1.8.5" + "@webassemblyjs/wasm-parser" "1.8.5" + +"@webassemblyjs/wasm-parser@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.8.5.tgz#21576f0ec88b91427357b8536383668ef7c66b8d" + dependencies: + "@webassemblyjs/ast" "1.8.5" + "@webassemblyjs/helper-api-error" "1.8.5" + "@webassemblyjs/helper-wasm-bytecode" "1.8.5" + "@webassemblyjs/ieee754" "1.8.5" + "@webassemblyjs/leb128" "1.8.5" + "@webassemblyjs/utf8" "1.8.5" + +"@webassemblyjs/wast-parser@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-parser/-/wast-parser-1.8.5.tgz#e10eecd542d0e7bd394f6827c49f3df6d4eefb8c" + dependencies: + "@webassemblyjs/ast" "1.8.5" + "@webassemblyjs/floating-point-hex-parser" "1.8.5" + "@webassemblyjs/helper-api-error" "1.8.5" + "@webassemblyjs/helper-code-frame" "1.8.5" + "@webassemblyjs/helper-fsm" "1.8.5" + "@xtuc/long" "4.2.2" + +"@webassemblyjs/wast-printer@1.8.5": + version "1.8.5" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-printer/-/wast-printer-1.8.5.tgz#114bbc481fd10ca0e23b3560fa812748b0bae5bc" + dependencies: + "@webassemblyjs/ast" "1.8.5" + "@webassemblyjs/wast-parser" "1.8.5" + "@xtuc/long" "4.2.2" + +"@xtuc/ieee754@^1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@xtuc/ieee754/-/ieee754-1.2.0.tgz#eef014a3145ae477a1cbc00cd1e552336dceb790" + +"@xtuc/long@4.2.2": + version "4.2.2" + resolved "https://registry.yarnpkg.com/@xtuc/long/-/long-4.2.2.tgz#d291c6a4e97989b5c61d9acf396ae4fe133a718d" + +abab@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/abab/-/abab-2.0.0.tgz#aba0ab4c5eee2d4c79d3487d85450fb2376ebb0f" + +abbrev@1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" + +accepts@~1.3.4, accepts@~1.3.5: + version "1.3.5" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.5.tgz#eb777df6011723a3b14e8a72c0805c8e86746bd2" + dependencies: + mime-types "~2.1.18" + negotiator "0.6.1" + +acorn-dynamic-import@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/acorn-dynamic-import/-/acorn-dynamic-import-4.0.0.tgz#482210140582a36b83c3e342e1cfebcaa9240948" + +acorn-globals@^4.1.0, acorn-globals@^4.3.0: + version "4.3.2" + resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-4.3.2.tgz#4e2c2313a597fd589720395f6354b41cd5ec8006" + dependencies: + acorn "^6.0.1" + acorn-walk "^6.0.1" + +acorn-jsx@^5.0.0: + version "5.0.1" + resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.0.1.tgz#32a064fd925429216a09b141102bfdd185fae40e" + +acorn-walk@^6.0.1: + version "6.1.1" + resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-6.1.1.tgz#d363b66f5fac5f018ff9c3a1e7b6f8e310cc3913" + +acorn@^5.5.3: + version "5.7.3" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-5.7.3.tgz#67aa231bf8812974b85235a96771eb6bd07ea279" + +acorn@^6.0.1, acorn@^6.0.4, acorn@^6.0.5, acorn@^6.0.7: + version "6.1.1" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-6.1.1.tgz#7d25ae05bb8ad1f9b699108e1094ecd7884adc1f" + +add-dom-event-listener@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/add-dom-event-listener/-/add-dom-event-listener-1.1.0.tgz#6a92db3a0dd0abc254e095c0f1dc14acbbaae310" + dependencies: + object-assign "4.x" + +address@1.0.3, address@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/address/-/address-1.0.3.tgz#b5f50631f8d6cec8bd20c963963afb55e06cbce9" + +ajv-errors@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/ajv-errors/-/ajv-errors-1.0.1.tgz#f35986aceb91afadec4102fbd85014950cefa64d" + +ajv-keywords@^3.1.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-3.4.0.tgz#4b831e7b531415a7cc518cd404e73f6193c6349d" + +ajv@^6.1.0, ajv@^6.5.5, ajv@^6.9.1: + version "6.10.0" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.10.0.tgz#90d0d54439da587cd7e843bfb7045f50bd22bdf1" + dependencies: + fast-deep-equal "^2.0.1" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.4.1" + uri-js "^4.2.2" + +alphanum-sort@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/alphanum-sort/-/alphanum-sort-1.0.2.tgz#97a1119649b211ad33691d9f9f486a8ec9fbe0a3" + +ansi-colors@^3.0.0: + version "3.2.4" + resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-3.2.4.tgz#e3a3da4bfbae6c86a9c285625de124a234026fbf" + +ansi-escapes@^3.0.0, ansi-escapes@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-3.2.0.tgz#8780b98ff9dbf5638152d1f1fe5c1d7b4442976b" + +ansi-html@0.0.7: + version "0.0.7" + resolved "https://registry.yarnpkg.com/ansi-html/-/ansi-html-0.0.7.tgz#813584021962a9e9e6fd039f940d12f56ca7859e" + +ansi-regex@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" + +ansi-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998" + +ansi-regex@^4.0.0, ansi-regex@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-4.1.0.tgz#8b9f8f08cf1acb843756a839ca8c7e3168c51997" + +ansi-styles@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" + +ansi-styles@^3.2.0, ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + dependencies: + color-convert "^1.9.0" + +ant-design-palettes@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/ant-design-palettes/-/ant-design-palettes-1.1.3.tgz#84119b1a4d86363adc52a38d587e65336a0a27dd" + dependencies: + tinycolor2 "^1.4.1" + +antd@^3.16.6: + version "3.16.6" + resolved "https://registry.yarnpkg.com/antd/-/antd-3.16.6.tgz#3cfb43ba2d82a2bf2efe854dd4d9e6838c401d2b" + dependencies: + "@ant-design/create-react-context" "^0.2.4" + "@ant-design/icons" "~1.2.0" + "@ant-design/icons-react" "~1.1.5" + "@types/hoist-non-react-statics" "^3.3.1" + "@types/react-slick" "^0.23.3" + array-tree-filter "^2.1.0" + babel-runtime "6.x" + classnames "~2.2.6" + copy-to-clipboard "^3.0.8" + css-animation "^1.5.0" + dom-closest "^0.2.0" + enquire.js "^2.1.6" + lodash "^4.17.11" + moment "^2.24.0" + omit.js "^1.0.0" + prop-types "^15.6.2" + raf "^3.4.0" + rc-animate "^2.5.4" + rc-calendar "~9.12.1" + rc-cascader "~0.17.0" + rc-checkbox "~2.1.5" + rc-collapse "~1.11.1" + rc-dialog "~7.3.0" + rc-drawer "~1.7.6" + rc-dropdown "~2.4.1" + rc-editor-mention "^1.1.7" + rc-form "^2.4.0" + rc-input-number "~4.4.0" + rc-menu "~7.4.12" + rc-notification "~3.3.0" + rc-pagination "~1.17.7" + rc-progress "~2.3.0" + rc-rate "~2.5.0" + rc-select "~9.1.0" + rc-slider "~8.6.5" + rc-steps "~3.3.0" + rc-switch "~1.9.0" + rc-table "~6.5.0" + rc-tabs "~9.6.0" + rc-time-picker "~3.6.1" + rc-tooltip "~3.7.3" + rc-tree "~1.15.2" + rc-tree-select "~2.6.0" + rc-trigger "^2.6.2" + rc-upload "~2.6.0" + rc-util "^4.5.1" + react-lazy-load "^3.0.13" + react-lifecycles-compat "^3.0.4" + react-slick "~0.24.0" + resize-observer-polyfill "^1.5.0" + shallowequal "^1.1.0" + warning "~4.0.2" + +anymatch@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-2.0.0.tgz#bcb24b4f37934d9aa7ac17b4adaf89e7c76ef2eb" + dependencies: + micromatch "^3.1.4" + normalize-path "^2.1.1" + +append-transform@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/append-transform/-/append-transform-1.0.0.tgz#046a52ae582a228bd72f58acfbe2967c678759ab" + dependencies: + default-require-extensions "^2.0.0" + +aproba@^1.0.3, aproba@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a" + +are-we-there-yet@~1.1.2: + version "1.1.5" + resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz#4b35c2944f062a8bfcda66410760350fe9ddfc21" + dependencies: + delegates "^1.0.0" + readable-stream "^2.0.6" + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + dependencies: + sprintf-js "~1.0.2" + +aria-query@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/aria-query/-/aria-query-3.0.0.tgz#65b3fcc1ca1155a8c9ae64d6eee297f15d5133cc" + dependencies: + ast-types-flow "0.0.7" + commander "^2.11.0" + +arr-diff@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-4.0.0.tgz#d6461074febfec71e7e15235761a329a5dc7c520" + +arr-flatten@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.1.0.tgz#36048bbff4e7b47e136644316c99669ea5ae91f1" + +arr-union@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/arr-union/-/arr-union-3.1.0.tgz#e39b09aea9def866a8f206e288af63919bae39c4" + +array-equal@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93" + +array-filter@~0.0.0: + version "0.0.1" + resolved "https://registry.yarnpkg.com/array-filter/-/array-filter-0.0.1.tgz#7da8cf2e26628ed732803581fd21f67cacd2eeec" + +array-flatten@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" + +array-flatten@^2.1.0: + version "2.1.2" + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-2.1.2.tgz#24ef80a28c1a893617e2149b0c6d0d788293b099" + +array-includes@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/array-includes/-/array-includes-3.0.3.tgz#184b48f62d92d7452bb31b323165c7f8bd02266d" + dependencies: + define-properties "^1.1.2" + es-abstract "^1.7.0" + +array-map@~0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/array-map/-/array-map-0.0.0.tgz#88a2bab73d1cf7bcd5c1b118a003f66f665fa662" + +array-reduce@~0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/array-reduce/-/array-reduce-0.0.0.tgz#173899d3ffd1c7d9383e4479525dbe278cab5f2b" + +array-tree-filter@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/array-tree-filter/-/array-tree-filter-2.1.0.tgz#873ac00fec83749f255ac8dd083814b4f6329190" + +array-union@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" + dependencies: + array-uniq "^1.0.1" + +array-uniq@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + +array-unique@^0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.3.2.tgz#a894b75d4bc4f6cd679ef3244a9fd8f46ae2d428" + +arrify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" + +asap@~2.0.3, asap@~2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46" + +asn1.js@^4.0.0: + version "4.10.1" + resolved "https://registry.yarnpkg.com/asn1.js/-/asn1.js-4.10.1.tgz#b9c2bf5805f1e64aadeed6df3a2bfafb5a73f5a0" + dependencies: + bn.js "^4.0.0" + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + +asn1@~0.2.3: + version "0.2.4" + resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.4.tgz#8d2475dfab553bb33e77b54e59e880bb8ce23136" + dependencies: + safer-buffer "~2.1.0" + +assert-plus@1.0.0, assert-plus@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" + +assert@^1.1.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/assert/-/assert-1.4.1.tgz#99912d591836b5a6f5b345c0f07eefc08fc65d91" + dependencies: + util "0.10.3" + +assign-symbols@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assign-symbols/-/assign-symbols-1.0.0.tgz#59667f41fadd4f20ccbc2bb96b8d4f7f78ec0367" + +ast-types-flow@0.0.7, ast-types-flow@^0.0.7: + version "0.0.7" + resolved "https://registry.yarnpkg.com/ast-types-flow/-/ast-types-flow-0.0.7.tgz#f70b735c6bca1a5c9c22d982c3e39e7feba3bdad" + +astral-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/astral-regex/-/astral-regex-1.0.0.tgz#6c8c3fb827dd43ee3918f27b82782ab7658a6fd9" + +async-each@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/async-each/-/async-each-1.0.3.tgz#b727dbf87d7651602f06f4d4ac387f47d91b0cbf" + +async-limiter@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.0.tgz#78faed8c3d074ab81f22b4e985d79e8738f720f8" + +async-validator@~1.8.5: + version "1.8.5" + resolved "https://registry.yarnpkg.com/async-validator/-/async-validator-1.8.5.tgz#dc3e08ec1fd0dddb67e60842f02c0cd1cec6d7f0" + dependencies: + babel-runtime "6.x" + +async@^1.5.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a" + +async@^2.6.1: + version "2.6.2" + resolved "https://registry.yarnpkg.com/async/-/async-2.6.2.tgz#18330ea7e6e313887f5d2f2a904bac6fe4dd5381" + dependencies: + lodash "^4.17.11" + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + +atob@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/atob/-/atob-2.1.2.tgz#6d9517eb9e030d2436666651e86bd9f6f13533c9" + +autoprefixer@^9.4.9: + version "9.5.1" + resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-9.5.1.tgz#243b1267b67e7e947f28919d786b50d3bb0fb357" + dependencies: + browserslist "^4.5.4" + caniuse-lite "^1.0.30000957" + normalize-range "^0.1.2" + num2fraction "^1.2.2" + postcss "^7.0.14" + postcss-value-parser "^3.3.1" + +aws-sign2@~0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" + +aws4@^1.8.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.8.0.tgz#f0e003d9ca9e7f59c7a508945d7b2ef9a04a542f" + +axobject-query@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/axobject-query/-/axobject-query-2.0.2.tgz#ea187abe5b9002b377f925d8bf7d1c561adf38f9" + dependencies: + ast-types-flow "0.0.7" + +babel-code-frame@^6.22.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b" + dependencies: + chalk "^1.1.3" + esutils "^2.0.2" + js-tokens "^3.0.2" + +babel-eslint@10.0.1: + version "10.0.1" + resolved "https://registry.yarnpkg.com/babel-eslint/-/babel-eslint-10.0.1.tgz#919681dc099614cd7d31d45c8908695092a1faed" + dependencies: + "@babel/code-frame" "^7.0.0" + "@babel/parser" "^7.0.0" + "@babel/traverse" "^7.0.0" + "@babel/types" "^7.0.0" + eslint-scope "3.7.1" + eslint-visitor-keys "^1.0.0" + +babel-extract-comments@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/babel-extract-comments/-/babel-extract-comments-1.0.0.tgz#0a2aedf81417ed391b85e18b4614e693a0351a21" + dependencies: + babylon "^6.18.0" + +babel-jest@24.7.1, babel-jest@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-24.7.1.tgz#73902c9ff15a7dfbdc9994b0b17fcefd96042178" + dependencies: + "@jest/transform" "^24.7.1" + "@jest/types" "^24.7.0" + "@types/babel__core" "^7.1.0" + babel-plugin-istanbul "^5.1.0" + babel-preset-jest "^24.6.0" + chalk "^2.4.2" + slash "^2.0.0" + +babel-loader@8.0.5: + version "8.0.5" + resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-8.0.5.tgz#225322d7509c2157655840bba52e46b6c2f2fe33" + dependencies: + find-cache-dir "^2.0.0" + loader-utils "^1.0.2" + mkdirp "^0.5.1" + util.promisify "^1.0.0" + +babel-plugin-dynamic-import-node@2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.2.0.tgz#c0adfb07d95f4a4495e9aaac6ec386c4d7c2524e" + dependencies: + object.assign "^4.1.0" + +babel-plugin-import@^1.11.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/babel-plugin-import/-/babel-plugin-import-1.11.0.tgz#78ac908e6b225206babb734e19eae5f78d6d1035" + dependencies: + "@babel/helper-module-imports" "^7.0.0" + "@babel/runtime" "^7.0.0" + +babel-plugin-istanbul@^5.1.0: + version "5.1.2" + resolved "https://registry.yarnpkg.com/babel-plugin-istanbul/-/babel-plugin-istanbul-5.1.2.tgz#d8c2e2e83f72695d6bfdcd297719c66161d5f0f9" + dependencies: + find-up "^3.0.0" + istanbul-lib-instrument "^3.2.0" + test-exclude "^5.2.2" + +babel-plugin-jest-hoist@^24.6.0: + version "24.6.0" + resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-24.6.0.tgz#f7f7f7ad150ee96d7a5e8e2c5da8319579e78019" + dependencies: + "@types/babel__traverse" "^7.0.6" + +babel-plugin-macros@2.5.1: + version "2.5.1" + resolved "https://registry.yarnpkg.com/babel-plugin-macros/-/babel-plugin-macros-2.5.1.tgz#4a119ac2c2e19b458c259b9accd7ee34fd57ec6f" + dependencies: + "@babel/runtime" "^7.4.2" + cosmiconfig "^5.2.0" + resolve "^1.10.0" + +babel-plugin-named-asset-import@^0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/babel-plugin-named-asset-import/-/babel-plugin-named-asset-import-0.3.2.tgz#20978ed446b8e1bf4a2f42d0a94c0ece85f75f4f" + +babel-plugin-syntax-object-rest-spread@^6.8.0: + version "6.13.0" + resolved "https://registry.yarnpkg.com/babel-plugin-syntax-object-rest-spread/-/babel-plugin-syntax-object-rest-spread-6.13.0.tgz#fd6536f2bce13836ffa3a5458c4903a597bb3bf5" + +babel-plugin-transform-object-rest-spread@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-object-rest-spread/-/babel-plugin-transform-object-rest-spread-6.26.0.tgz#0f36692d50fef6b7e2d4b3ac1478137a963b7b06" + dependencies: + babel-plugin-syntax-object-rest-spread "^6.8.0" + babel-runtime "^6.26.0" + +babel-plugin-transform-react-remove-prop-types@0.4.24: + version "0.4.24" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-react-remove-prop-types/-/babel-plugin-transform-react-remove-prop-types-0.4.24.tgz#f2edaf9b4c6a5fbe5c1d678bfb531078c1555f3a" + +babel-preset-jest@^24.6.0: + version "24.6.0" + resolved "https://registry.yarnpkg.com/babel-preset-jest/-/babel-preset-jest-24.6.0.tgz#66f06136eefce87797539c0d63f1769cc3915984" + dependencies: + "@babel/plugin-syntax-object-rest-spread" "^7.0.0" + babel-plugin-jest-hoist "^24.6.0" + +babel-preset-react-app@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/babel-preset-react-app/-/babel-preset-react-app-8.0.0.tgz#930b6e28cdcfdff97ddb8bef9226d504f244d326" + dependencies: + "@babel/core" "7.4.3" + "@babel/plugin-proposal-class-properties" "7.4.0" + "@babel/plugin-proposal-decorators" "7.4.0" + "@babel/plugin-proposal-object-rest-spread" "7.4.3" + "@babel/plugin-syntax-dynamic-import" "7.2.0" + "@babel/plugin-transform-classes" "7.4.3" + "@babel/plugin-transform-destructuring" "7.4.3" + "@babel/plugin-transform-flow-strip-types" "7.4.0" + "@babel/plugin-transform-react-constant-elements" "7.2.0" + "@babel/plugin-transform-react-display-name" "7.2.0" + "@babel/plugin-transform-runtime" "7.4.3" + "@babel/preset-env" "7.4.3" + "@babel/preset-react" "7.0.0" + "@babel/preset-typescript" "7.3.3" + "@babel/runtime" "7.4.3" + babel-plugin-dynamic-import-node "2.2.0" + babel-plugin-macros "2.5.1" + babel-plugin-transform-react-remove-prop-types "0.4.24" + +babel-runtime@6.x, babel-runtime@^6.23.0, babel-runtime@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.26.0.tgz#965c7058668e82b55d7bfe04ff2337bc8b5647fe" + dependencies: + core-js "^2.4.0" + regenerator-runtime "^0.11.0" + +babylon@^6.18.0: + version "6.18.0" + resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.18.0.tgz#af2f3b88fa6f5c1e4c634d1a0f8eac4f55b395e3" + +bail@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/bail/-/bail-1.0.3.tgz#63cfb9ddbac829b02a3128cd53224be78e6c21a3" + +balanced-match@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" + +base64-js@^1.0.2: + version "1.3.0" + resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.3.0.tgz#cab1e6118f051095e58b5281aea8c1cd22bfc0e3" + +base@^0.11.1: + version "0.11.2" + resolved "https://registry.yarnpkg.com/base/-/base-0.11.2.tgz#7bde5ced145b6d551a90db87f83c558b4eb48a8f" + dependencies: + cache-base "^1.0.1" + class-utils "^0.3.5" + component-emitter "^1.2.1" + define-property "^1.0.0" + isobject "^3.0.1" + mixin-deep "^1.2.0" + pascalcase "^0.1.1" + +batch@0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/batch/-/batch-0.6.1.tgz#dc34314f4e679318093fc760272525f94bf25c16" + +bcrypt-pbkdf@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" + dependencies: + tweetnacl "^0.14.3" + +big.js@^5.2.2: + version "5.2.2" + resolved "https://registry.yarnpkg.com/big.js/-/big.js-5.2.2.tgz#65f0af382f578bcdc742bd9c281e9cb2d7768328" + +binary-extensions@^1.0.0: + version "1.13.1" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.13.1.tgz#598afe54755b2868a5330d2aff9d4ebb53209b65" + +bluebird@^3.5.3: + version "3.5.4" + resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.4.tgz#d6cc661595de30d5b3af5fcedd3c0b3ef6ec5714" + +bn.js@^4.0.0, bn.js@^4.1.0, bn.js@^4.1.1, bn.js@^4.4.0: + version "4.11.8" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.11.8.tgz#2cde09eb5ee341f484746bb0309b3253b1b1442f" + +body-parser@1.18.3: + version "1.18.3" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.18.3.tgz#5b292198ffdd553b3a0f20ded0592b956955c8b4" + dependencies: + bytes "3.0.0" + content-type "~1.0.4" + debug "2.6.9" + depd "~1.1.2" + http-errors "~1.6.3" + iconv-lite "0.4.23" + on-finished "~2.3.0" + qs "6.5.2" + raw-body "2.3.3" + type-is "~1.6.16" + +bonjour@^3.5.0: + version "3.5.0" + resolved "https://registry.yarnpkg.com/bonjour/-/bonjour-3.5.0.tgz#8e890a183d8ee9a2393b3844c691a42bcf7bc9f5" + dependencies: + array-flatten "^2.1.0" + deep-equal "^1.0.1" + dns-equal "^1.0.0" + dns-txt "^2.0.2" + multicast-dns "^6.0.1" + multicast-dns-service-types "^1.1.0" + +boolbase@^1.0.0, boolbase@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +braces@^2.3.1, braces@^2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-2.3.2.tgz#5979fd3f14cd531565e5fa2df1abfff1dfaee729" + dependencies: + arr-flatten "^1.1.0" + array-unique "^0.3.2" + extend-shallow "^2.0.1" + fill-range "^4.0.0" + isobject "^3.0.1" + repeat-element "^1.1.2" + snapdragon "^0.8.1" + snapdragon-node "^2.0.1" + split-string "^3.0.2" + to-regex "^3.0.1" + +brorand@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" + +browser-process-hrtime@^0.1.2: + version "0.1.3" + resolved "https://registry.yarnpkg.com/browser-process-hrtime/-/browser-process-hrtime-0.1.3.tgz#616f00faef1df7ec1b5bf9cfe2bdc3170f26c7b4" + +browser-resolve@^1.11.3: + version "1.11.3" + resolved "https://registry.yarnpkg.com/browser-resolve/-/browser-resolve-1.11.3.tgz#9b7cbb3d0f510e4cb86bdbd796124d28b5890af6" + dependencies: + resolve "1.1.7" + +browserify-aes@^1.0.0, browserify-aes@^1.0.4: + version "1.2.0" + resolved "https://registry.yarnpkg.com/browserify-aes/-/browserify-aes-1.2.0.tgz#326734642f403dabc3003209853bb70ad428ef48" + dependencies: + buffer-xor "^1.0.3" + cipher-base "^1.0.0" + create-hash "^1.1.0" + evp_bytestokey "^1.0.3" + inherits "^2.0.1" + safe-buffer "^5.0.1" + +browserify-cipher@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/browserify-cipher/-/browserify-cipher-1.0.1.tgz#8d6474c1b870bfdabcd3bcfcc1934a10e94f15f0" + dependencies: + browserify-aes "^1.0.4" + browserify-des "^1.0.0" + evp_bytestokey "^1.0.0" + +browserify-des@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/browserify-des/-/browserify-des-1.0.2.tgz#3af4f1f59839403572f1c66204375f7a7f703e9c" + dependencies: + cipher-base "^1.0.1" + des.js "^1.0.0" + inherits "^2.0.1" + safe-buffer "^5.1.2" + +browserify-rsa@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/browserify-rsa/-/browserify-rsa-4.0.1.tgz#21e0abfaf6f2029cf2fafb133567a701d4135524" + dependencies: + bn.js "^4.1.0" + randombytes "^2.0.1" + +browserify-sign@^4.0.0: + version "4.0.4" + resolved "https://registry.yarnpkg.com/browserify-sign/-/browserify-sign-4.0.4.tgz#aa4eb68e5d7b658baa6bf6a57e630cbd7a93d298" + dependencies: + bn.js "^4.1.1" + browserify-rsa "^4.0.0" + create-hash "^1.1.0" + create-hmac "^1.1.2" + elliptic "^6.0.0" + inherits "^2.0.1" + parse-asn1 "^5.0.0" + +browserify-zlib@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/browserify-zlib/-/browserify-zlib-0.2.0.tgz#2869459d9aa3be245fe8fe2ca1f46e2e7f54d73f" + dependencies: + pako "~1.0.5" + +browserslist@4.5.4: + version "4.5.4" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.5.4.tgz#166c4ecef3b51737a42436ea8002aeea466ea2c7" + dependencies: + caniuse-lite "^1.0.30000955" + electron-to-chromium "^1.3.122" + node-releases "^1.1.13" + +browserslist@^4.0.0, browserslist@^4.1.1, browserslist@^4.4.2, browserslist@^4.5.2, browserslist@^4.5.4: + version "4.5.5" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.5.5.tgz#fe1a352330d2490d5735574c149a85bc18ef9b82" + dependencies: + caniuse-lite "^1.0.30000960" + electron-to-chromium "^1.3.124" + node-releases "^1.1.14" + +bser@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/bser/-/bser-2.0.0.tgz#9ac78d3ed5d915804fd87acb158bc797147a1719" + dependencies: + node-int64 "^0.4.0" + +buffer-from@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef" + +buffer-indexof@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/buffer-indexof/-/buffer-indexof-1.1.1.tgz#52fabcc6a606d1a00302802648ef68f639da268c" + +buffer-xor@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/buffer-xor/-/buffer-xor-1.0.3.tgz#26e61ed1422fb70dd42e6e36729ed51d855fe8d9" + +buffer@^4.3.0: + version "4.9.1" + resolved "https://registry.yarnpkg.com/buffer/-/buffer-4.9.1.tgz#6d1bb601b07a4efced97094132093027c95bc298" + dependencies: + base64-js "^1.0.2" + ieee754 "^1.1.4" + isarray "^1.0.0" + +builtin-status-codes@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz#85982878e21b98e1c66425e03d0174788f569ee8" + +bytes@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.0.0.tgz#d32815404d689699f85a4ea4fa8755dd13a96048" + +cacache@^11.0.2: + version "11.3.2" + resolved "https://registry.yarnpkg.com/cacache/-/cacache-11.3.2.tgz#2d81e308e3d258ca38125b676b98b2ac9ce69bfa" + dependencies: + bluebird "^3.5.3" + chownr "^1.1.1" + figgy-pudding "^3.5.1" + glob "^7.1.3" + graceful-fs "^4.1.15" + lru-cache "^5.1.1" + mississippi "^3.0.0" + mkdirp "^0.5.1" + move-concurrently "^1.0.1" + promise-inflight "^1.0.1" + rimraf "^2.6.2" + ssri "^6.0.1" + unique-filename "^1.1.1" + y18n "^4.0.0" + +cache-base@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/cache-base/-/cache-base-1.0.1.tgz#0a7f46416831c8b662ee36fe4e7c59d76f666ab2" + dependencies: + collection-visit "^1.0.0" + component-emitter "^1.2.1" + get-value "^2.0.6" + has-value "^1.0.0" + isobject "^3.0.1" + set-value "^2.0.0" + to-object-path "^0.3.0" + union-value "^1.0.0" + unset-value "^1.0.0" + +call-me-maybe@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/call-me-maybe/-/call-me-maybe-1.0.1.tgz#26d208ea89e37b5cbde60250a15f031c16a4d66b" + +caller-callsite@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/caller-callsite/-/caller-callsite-2.0.0.tgz#847e0fce0a223750a9a027c54b33731ad3154134" + dependencies: + callsites "^2.0.0" + +caller-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/caller-path/-/caller-path-2.0.0.tgz#468f83044e369ab2010fac5f06ceee15bb2cb1f4" + dependencies: + caller-callsite "^2.0.0" + +callsites@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-2.0.0.tgz#06eb84f00eea413da86affefacbffb36093b3c50" + +callsites@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" + +camel-case@3.0.x: + version "3.0.0" + resolved "https://registry.yarnpkg.com/camel-case/-/camel-case-3.0.0.tgz#ca3c3688a4e9cf3a4cda777dc4dcbc713249cf73" + dependencies: + no-case "^2.2.0" + upper-case "^1.1.1" + +camelcase@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-4.1.0.tgz#d545635be1e33c542649c69173e5de6acfae34dd" + +camelcase@^5.0.0, camelcase@^5.2.0, camelcase@^5.3.1: + version "5.3.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320" + +caniuse-api@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/caniuse-api/-/caniuse-api-3.0.0.tgz#5e4d90e2274961d46291997df599e3ed008ee4c0" + dependencies: + browserslist "^4.0.0" + caniuse-lite "^1.0.0" + lodash.memoize "^4.1.2" + lodash.uniq "^4.5.0" + +caniuse-lite@^1.0.0, caniuse-lite@^1.0.30000939, caniuse-lite@^1.0.30000957, caniuse-lite@^1.0.30000960: + version "1.0.30000962" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30000962.tgz#6c10c3ab304b89bea905e66adf98c0905088ee44" + +caniuse-lite@^1.0.30000955: + version "1.0.30000966" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30000966.tgz#f3c6fefacfbfbfb981df6dfa68f2aae7bff41b64" + +capture-exit@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/capture-exit/-/capture-exit-2.0.0.tgz#fb953bfaebeb781f62898239dabb426d08a509a4" + dependencies: + rsvp "^4.8.4" + +case-sensitive-paths-webpack-plugin@2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/case-sensitive-paths-webpack-plugin/-/case-sensitive-paths-webpack-plugin-2.2.0.tgz#3371ef6365ef9c25fa4b81c16ace0e9c7dc58c3e" + +caseless@~0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" + +ccount@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/ccount/-/ccount-1.0.3.tgz#f1cec43f332e2ea5a569fd46f9f5bde4e6102aff" + +chalk@2.4.2, chalk@^2.0.0, chalk@^2.0.1, chalk@^2.1.0, chalk@^2.4.1, chalk@^2.4.2: + version "2.4.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +chalk@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" + dependencies: + ansi-styles "^2.2.1" + escape-string-regexp "^1.0.2" + has-ansi "^2.0.0" + strip-ansi "^3.0.0" + supports-color "^2.0.0" + +chardet@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.7.0.tgz#90094849f0937f2eedc2425d0d28a9e5f0cbad9e" + +chokidar@^2.0.0, chokidar@^2.0.2, chokidar@^2.0.4: + version "2.1.5" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-2.1.5.tgz#0ae8434d962281a5f56c72869e79cb6d9d86ad4d" + dependencies: + anymatch "^2.0.0" + async-each "^1.0.1" + braces "^2.3.2" + glob-parent "^3.1.0" + inherits "^2.0.3" + is-binary-path "^1.0.0" + is-glob "^4.0.0" + normalize-path "^3.0.0" + path-is-absolute "^1.0.0" + readdirp "^2.2.1" + upath "^1.1.1" + optionalDependencies: + fsevents "^1.2.7" + +chownr@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.1.tgz#54726b8b8fff4df053c42187e801fb4412df1494" + +chrome-trace-event@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/chrome-trace-event/-/chrome-trace-event-1.0.0.tgz#45a91bd2c20c9411f0963b5aaeb9a1b95e09cc48" + dependencies: + tslib "^1.9.0" + +ci-info@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46" + +cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de" + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" + +class-utils@^0.3.5: + version "0.3.6" + resolved "https://registry.yarnpkg.com/class-utils/-/class-utils-0.3.6.tgz#f93369ae8b9a7ce02fd41faad0ca83033190c463" + dependencies: + arr-union "^3.1.0" + define-property "^0.2.5" + isobject "^3.0.0" + static-extend "^0.1.1" + +classnames@2.x, classnames@^2.2.0, classnames@^2.2.1, classnames@^2.2.3, classnames@^2.2.5, classnames@^2.2.6, classnames@~2.2.6: + version "2.2.6" + resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.2.6.tgz#43935bffdd291f326dad0a205309b38d00f650ce" + +clean-css@4.2.x: + version "4.2.1" + resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-4.2.1.tgz#2d411ef76b8569b6d0c84068dabe85b0aa5e5c17" + dependencies: + source-map "~0.6.0" + +cli-cursor@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-2.1.0.tgz#b35dac376479facc3e94747d41d0d0f5238ffcb5" + dependencies: + restore-cursor "^2.0.0" + +cli-width@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-2.2.0.tgz#ff19ede8a9a5e579324147b0c11f0fbcbabed639" + +cliui@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-4.1.0.tgz#348422dbe82d800b3022eef4f6ac10bf2e4d1b49" + dependencies: + string-width "^2.1.1" + strip-ansi "^4.0.0" + wrap-ansi "^2.0.0" + +clone-deep@^0.2.4: + version "0.2.4" + resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-0.2.4.tgz#4e73dd09e9fb971cc38670c5dced9c1896481cc6" + dependencies: + for-own "^0.1.3" + is-plain-object "^2.0.1" + kind-of "^3.0.2" + lazy-cache "^1.0.3" + shallow-clone "^0.1.2" + +clone-deep@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-2.0.2.tgz#00db3a1e173656730d1188c3d6aced6d7ea97713" + dependencies: + for-own "^1.0.0" + is-plain-object "^2.0.4" + kind-of "^6.0.0" + shallow-clone "^1.0.0" + +clone@^2.1.1, clone@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.2.tgz#1b7f4b9f591f1e8f83670401600345a02887435f" + +co@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" + +coa@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/coa/-/coa-2.0.2.tgz#43f6c21151b4ef2bf57187db0d73de229e3e7ec3" + dependencies: + "@types/q" "^1.5.1" + chalk "^2.4.1" + q "^1.1.2" + +code-point-at@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" + +collection-visit@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/collection-visit/-/collection-visit-1.0.0.tgz#4bc0373c164bc3291b4d368c829cf1a80a59dca0" + dependencies: + map-visit "^1.0.0" + object-visit "^1.0.0" + +color-convert@^1.9.0, color-convert@^1.9.1: + version "1.9.3" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" + dependencies: + color-name "1.1.3" + +color-name@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + +color-name@^1.0.0: + version "1.1.4" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" + +color-string@^1.5.2: + version "1.5.3" + resolved "https://registry.yarnpkg.com/color-string/-/color-string-1.5.3.tgz#c9bbc5f01b58b5492f3d6857459cb6590ce204cc" + dependencies: + color-name "^1.0.0" + simple-swizzle "^0.2.2" + +color@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/color/-/color-3.1.0.tgz#d8e9fb096732875774c84bf922815df0308d0ffc" + dependencies: + color-convert "^1.9.1" + color-string "^1.5.2" + +combined-stream@^1.0.6, combined-stream@~1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.7.tgz#2d1d24317afb8abe95d6d2c0b07b57813539d828" + dependencies: + delayed-stream "~1.0.0" + +comma-separated-tokens@^1.0.0: + version "1.0.6" + resolved "https://registry.yarnpkg.com/comma-separated-tokens/-/comma-separated-tokens-1.0.6.tgz#3cd3d8adc725ab473843db338bcdfd4a7bb087bf" + +commander@2.17.x: + version "2.17.1" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.17.1.tgz#bd77ab7de6de94205ceacc72f1716d29f20a77bf" + +commander@^2.11.0, commander@^2.19.0, commander@~2.20.0: + version "2.20.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.0.tgz#d58bb2b5c1ee8f87b0d340027e9e94e222c5a422" + +commander@~2.19.0: + version "2.19.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.19.0.tgz#f6198aa84e5b83c46054b94ddedbfed5ee9ff12a" + +common-tags@^1.8.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/common-tags/-/common-tags-1.8.0.tgz#8e3153e542d4a39e9b10554434afaaf98956a937" + +commondir@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b" + +compare-versions@^3.2.1: + version "3.4.0" + resolved "https://registry.yarnpkg.com/compare-versions/-/compare-versions-3.4.0.tgz#e0747df5c9cb7f054d6d3dc3e1dbc444f9e92b26" + +component-classes@1.x, component-classes@^1.2.5, component-classes@^1.2.6: + version "1.2.6" + resolved "https://registry.yarnpkg.com/component-classes/-/component-classes-1.2.6.tgz#c642394c3618a4d8b0b8919efccbbd930e5cd691" + dependencies: + component-indexof "0.0.3" + +component-emitter@^1.2.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.3.0.tgz#16e4070fba8ae29b679f2215853ee181ab2eabc0" + +component-indexof@0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/component-indexof/-/component-indexof-0.0.3.tgz#11d091312239eb8f32c8f25ae9cb002ffe8d3c24" + +compressible@~2.0.16: + version "2.0.16" + resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.16.tgz#a49bf9858f3821b64ce1be0296afc7380466a77f" + dependencies: + mime-db ">= 1.38.0 < 2" + +compression@^1.5.2: + version "1.7.4" + resolved "https://registry.yarnpkg.com/compression/-/compression-1.7.4.tgz#95523eff170ca57c29a0ca41e6fe131f41e5bb8f" + dependencies: + accepts "~1.3.5" + bytes "3.0.0" + compressible "~2.0.16" + debug "2.6.9" + on-headers "~1.0.2" + safe-buffer "5.1.2" + vary "~1.1.2" + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + +concat-stream@^1.5.0: + version "1.6.2" + resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34" + dependencies: + buffer-from "^1.0.0" + inherits "^2.0.3" + readable-stream "^2.2.2" + typedarray "^0.0.6" + +confusing-browser-globals@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/confusing-browser-globals/-/confusing-browser-globals-1.0.7.tgz#5ae852bd541a910e7ffb2dbb864a2d21a36ad29b" + +connect-history-api-fallback@^1.3.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/connect-history-api-fallback/-/connect-history-api-fallback-1.6.0.tgz#8b32089359308d111115d81cad3fceab888f97bc" + +console-browserify@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10" + dependencies: + date-now "^0.1.4" + +console-control-strings@^1.0.0, console-control-strings@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e" + +constants-browserify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/constants-browserify/-/constants-browserify-1.0.0.tgz#c20b96d8c617748aaf1c16021760cd27fcb8cb75" + +contains-path@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/contains-path/-/contains-path-0.1.0.tgz#fe8cf184ff6670b6baef01a9d4861a5cbec4120a" + +content-disposition@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4" + +content-type@~1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b" + +convert-source-map@^1.1.0, convert-source-map@^1.4.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.6.0.tgz#51b537a8c43e0f04dec1993bffcdd504e758ac20" + dependencies: + safe-buffer "~5.1.1" + +cookie-signature@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" + +cookie@0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb" + +copy-concurrently@^1.0.0: + version "1.0.5" + resolved "https://registry.yarnpkg.com/copy-concurrently/-/copy-concurrently-1.0.5.tgz#92297398cae34937fcafd6ec8139c18051f0b5e0" + dependencies: + aproba "^1.1.1" + fs-write-stream-atomic "^1.0.8" + iferr "^0.1.5" + mkdirp "^0.5.1" + rimraf "^2.5.4" + run-queue "^1.0.0" + +copy-descriptor@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/copy-descriptor/-/copy-descriptor-0.1.1.tgz#676f6eb3c39997c2ee1ac3a924fd6124748f578d" + +copy-to-clipboard@^3.0.8: + version "3.1.0" + resolved "https://registry.yarnpkg.com/copy-to-clipboard/-/copy-to-clipboard-3.1.0.tgz#0a28141899e6bd217b9dc13fd1689b3b38820b44" + dependencies: + toggle-selection "^1.0.6" + +core-js-compat@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.0.1.tgz#bff73ba31ca8687431b9c88f78d3362646fb76f0" + dependencies: + browserslist "^4.5.4" + core-js "3.0.1" + core-js-pure "3.0.1" + semver "^6.0.0" + +core-js-pure@3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.0.1.tgz#37358fb0d024e6b86d443d794f4e37e949098cbe" + +core-js@3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.0.1.tgz#1343182634298f7f38622f95e73f54e48ddf4738" + +core-js@^1.0.0: + version "1.2.7" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636" + +core-js@^2.4.0: + version "2.6.5" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.6.5.tgz#44bc8d249e7fb2ff5d00e0341a7ffb94fbf67895" + +core-util-is@1.0.2, core-util-is@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + +cosmiconfig@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-4.0.0.tgz#760391549580bbd2df1e562bc177b13c290972dc" + dependencies: + is-directory "^0.3.1" + js-yaml "^3.9.0" + parse-json "^4.0.0" + require-from-string "^2.0.1" + +cosmiconfig@^5.0.0, cosmiconfig@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-5.2.0.tgz#45038e4d28a7fe787203aede9c25bca4a08b12c8" + dependencies: + import-fresh "^2.0.0" + is-directory "^0.3.1" + js-yaml "^3.13.0" + parse-json "^4.0.0" + +create-ecdh@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/create-ecdh/-/create-ecdh-4.0.3.tgz#c9111b6f33045c4697f144787f9254cdc77c45ff" + dependencies: + bn.js "^4.1.0" + elliptic "^6.0.0" + +create-hash@^1.1.0, create-hash@^1.1.2: + version "1.2.0" + resolved "https://registry.yarnpkg.com/create-hash/-/create-hash-1.2.0.tgz#889078af11a63756bcfb59bd221996be3a9ef196" + dependencies: + cipher-base "^1.0.1" + inherits "^2.0.1" + md5.js "^1.3.4" + ripemd160 "^2.0.1" + sha.js "^2.4.0" + +create-hmac@^1.1.0, create-hmac@^1.1.2, create-hmac@^1.1.4: + version "1.1.7" + resolved "https://registry.yarnpkg.com/create-hmac/-/create-hmac-1.1.7.tgz#69170c78b3ab957147b2b8b04572e47ead2243ff" + dependencies: + cipher-base "^1.0.3" + create-hash "^1.1.0" + inherits "^2.0.1" + ripemd160 "^2.0.0" + safe-buffer "^5.0.1" + sha.js "^2.4.8" + +create-react-class@^15.5.3: + version "15.6.3" + resolved "https://registry.yarnpkg.com/create-react-class/-/create-react-class-15.6.3.tgz#2d73237fb3f970ae6ebe011a9e66f46dbca80036" + dependencies: + fbjs "^0.8.9" + loose-envify "^1.3.1" + object-assign "^4.1.1" + +create-react-context@0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/create-react-context/-/create-react-context-0.2.2.tgz#9836542f9aaa22868cd7d4a6f82667df38019dca" + dependencies: + fbjs "^0.8.0" + gud "^1.0.0" + +create-react-context@^0.2.2: + version "0.2.3" + resolved "https://registry.yarnpkg.com/create-react-context/-/create-react-context-0.2.3.tgz#9ec140a6914a22ef04b8b09b7771de89567cb6f3" + dependencies: + fbjs "^0.8.0" + gud "^1.0.0" + +cross-spawn@6.0.5, cross-spawn@^6.0.0, cross-spawn@^6.0.5: + version "6.0.5" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-6.0.5.tgz#4a5ec7c64dfae22c3a14124dbacdee846d80cbc4" + dependencies: + nice-try "^1.0.4" + path-key "^2.0.1" + semver "^5.5.0" + shebang-command "^1.2.0" + which "^1.2.9" + +crypto-browserify@^3.11.0: + version "3.12.0" + resolved "https://registry.yarnpkg.com/crypto-browserify/-/crypto-browserify-3.12.0.tgz#396cf9f3137f03e4b8e532c58f698254e00f80ec" + dependencies: + browserify-cipher "^1.0.0" + browserify-sign "^4.0.0" + create-ecdh "^4.0.0" + create-hash "^1.1.0" + create-hmac "^1.1.0" + diffie-hellman "^5.0.0" + inherits "^2.0.1" + pbkdf2 "^3.0.3" + public-encrypt "^4.0.0" + randombytes "^2.0.0" + randomfill "^1.0.3" + +css-animation@1.x, css-animation@^1.3.2, css-animation@^1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/css-animation/-/css-animation-1.5.0.tgz#c96b9097a5ef74a7be8480b45cc44e4ec6ca2bf5" + dependencies: + babel-runtime "6.x" + component-classes "^1.2.5" + +css-blank-pseudo@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/css-blank-pseudo/-/css-blank-pseudo-0.1.4.tgz#dfdefd3254bf8a82027993674ccf35483bfcb3c5" + dependencies: + postcss "^7.0.5" + +css-color-names@0.0.4, css-color-names@^0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/css-color-names/-/css-color-names-0.0.4.tgz#808adc2e79cf84738069b646cb20ec27beb629e0" + +css-declaration-sorter@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/css-declaration-sorter/-/css-declaration-sorter-4.0.1.tgz#c198940f63a76d7e36c1e71018b001721054cb22" + dependencies: + postcss "^7.0.1" + timsort "^0.3.0" + +css-has-pseudo@^0.10.0: + version "0.10.0" + resolved "https://registry.yarnpkg.com/css-has-pseudo/-/css-has-pseudo-0.10.0.tgz#3c642ab34ca242c59c41a125df9105841f6966ee" + dependencies: + postcss "^7.0.6" + postcss-selector-parser "^5.0.0-rc.4" + +css-loader@2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-2.1.1.tgz#d8254f72e412bb2238bb44dd674ffbef497333ea" + dependencies: + camelcase "^5.2.0" + icss-utils "^4.1.0" + loader-utils "^1.2.3" + normalize-path "^3.0.0" + postcss "^7.0.14" + postcss-modules-extract-imports "^2.0.0" + postcss-modules-local-by-default "^2.0.6" + postcss-modules-scope "^2.1.0" + postcss-modules-values "^2.0.0" + postcss-value-parser "^3.3.0" + schema-utils "^1.0.0" + +css-prefers-color-scheme@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/css-prefers-color-scheme/-/css-prefers-color-scheme-3.1.1.tgz#6f830a2714199d4f0d0d0bb8a27916ed65cff1f4" + dependencies: + postcss "^7.0.5" + +css-select-base-adapter@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz#3b2ff4972cc362ab88561507a95408a1432135d7" + +css-select@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/css-select/-/css-select-1.2.0.tgz#2b3a110539c5355f1cd8d314623e870b121ec858" + dependencies: + boolbase "~1.0.0" + css-what "2.1" + domutils "1.5.1" + nth-check "~1.0.1" + +css-select@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/css-select/-/css-select-2.0.2.tgz#ab4386cec9e1f668855564b17c3733b43b2a5ede" + dependencies: + boolbase "^1.0.0" + css-what "^2.1.2" + domutils "^1.7.0" + nth-check "^1.0.2" + +css-tree@1.0.0-alpha.28: + version "1.0.0-alpha.28" + resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.0.0-alpha.28.tgz#8e8968190d886c9477bc8d61e96f61af3f7ffa7f" + dependencies: + mdn-data "~1.1.0" + source-map "^0.5.3" + +css-tree@1.0.0-alpha.29: + version "1.0.0-alpha.29" + resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.0.0-alpha.29.tgz#3fa9d4ef3142cbd1c301e7664c1f352bd82f5a39" + dependencies: + mdn-data "~1.1.0" + source-map "^0.5.3" + +css-unit-converter@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/css-unit-converter/-/css-unit-converter-1.1.1.tgz#d9b9281adcfd8ced935bdbaba83786897f64e996" + +css-url-regex@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/css-url-regex/-/css-url-regex-1.1.0.tgz#83834230cc9f74c457de59eebd1543feeb83b7ec" + +css-what@2.1, css-what@^2.1.2: + version "2.1.3" + resolved "https://registry.yarnpkg.com/css-what/-/css-what-2.1.3.tgz#a6d7604573365fe74686c3f311c56513d88285f2" + +cssdb@^4.3.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/cssdb/-/cssdb-4.4.0.tgz#3bf2f2a68c10f5c6a08abd92378331ee803cddb0" + +cssesc@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-2.0.0.tgz#3b13bd1bb1cb36e1bcb5a4dcd27f54c5dcb35703" + +cssesc@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-3.0.0.tgz#37741919903b868565e1c09ea747445cd18983ee" + +cssnano-preset-default@^4.0.7: + version "4.0.7" + resolved "https://registry.yarnpkg.com/cssnano-preset-default/-/cssnano-preset-default-4.0.7.tgz#51ec662ccfca0f88b396dcd9679cdb931be17f76" + dependencies: + css-declaration-sorter "^4.0.1" + cssnano-util-raw-cache "^4.0.1" + postcss "^7.0.0" + postcss-calc "^7.0.1" + postcss-colormin "^4.0.3" + postcss-convert-values "^4.0.1" + postcss-discard-comments "^4.0.2" + postcss-discard-duplicates "^4.0.2" + postcss-discard-empty "^4.0.1" + postcss-discard-overridden "^4.0.1" + postcss-merge-longhand "^4.0.11" + postcss-merge-rules "^4.0.3" + postcss-minify-font-values "^4.0.2" + postcss-minify-gradients "^4.0.2" + postcss-minify-params "^4.0.2" + postcss-minify-selectors "^4.0.2" + postcss-normalize-charset "^4.0.1" + postcss-normalize-display-values "^4.0.2" + postcss-normalize-positions "^4.0.2" + postcss-normalize-repeat-style "^4.0.2" + postcss-normalize-string "^4.0.2" + postcss-normalize-timing-functions "^4.0.2" + postcss-normalize-unicode "^4.0.1" + postcss-normalize-url "^4.0.1" + postcss-normalize-whitespace "^4.0.2" + postcss-ordered-values "^4.1.2" + postcss-reduce-initial "^4.0.3" + postcss-reduce-transforms "^4.0.2" + postcss-svgo "^4.0.2" + postcss-unique-selectors "^4.0.1" + +cssnano-util-get-arguments@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz#ed3a08299f21d75741b20f3b81f194ed49cc150f" + +cssnano-util-get-match@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz#c0e4ca07f5386bb17ec5e52250b4f5961365156d" + +cssnano-util-raw-cache@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz#b26d5fd5f72a11dfe7a7846fb4c67260f96bf282" + dependencies: + postcss "^7.0.0" + +cssnano-util-same-parent@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz#574082fb2859d2db433855835d9a8456ea18bbf3" + +cssnano@^4.1.0: + version "4.1.10" + resolved "https://registry.yarnpkg.com/cssnano/-/cssnano-4.1.10.tgz#0ac41f0b13d13d465487e111b778d42da631b8b2" + dependencies: + cosmiconfig "^5.0.0" + cssnano-preset-default "^4.0.7" + is-resolvable "^1.0.0" + postcss "^7.0.0" + +csso@^3.5.1: + version "3.5.1" + resolved "https://registry.yarnpkg.com/csso/-/csso-3.5.1.tgz#7b9eb8be61628973c1b261e169d2f024008e758b" + dependencies: + css-tree "1.0.0-alpha.29" + +cssom@0.3.x, "cssom@>= 0.3.2 < 0.4.0", cssom@^0.3.4: + version "0.3.6" + resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.3.6.tgz#f85206cee04efa841f3c5982a74ba96ab20d65ad" + +cssstyle@^1.0.0, cssstyle@^1.1.1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-1.2.2.tgz#427ea4d585b18624f6fdbf9de7a2a1a3ba713077" + dependencies: + cssom "0.3.x" + +csstype@^2.2.0: + version "2.6.4" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-2.6.4.tgz#d585a6062096e324e7187f80e04f92bd0f00e37f" + +customize-cra@^0.2.12: + version "0.2.12" + resolved "https://registry.yarnpkg.com/customize-cra/-/customize-cra-0.2.12.tgz#ffc9b6378cbdffbe29ad02c479ae5d7a502aca69" + dependencies: + lodash.flow "^3.5.0" + +cyclist@~0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/cyclist/-/cyclist-0.2.2.tgz#1b33792e11e914a2fd6d6ed6447464444e5fa640" + +damerau-levenshtein@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/damerau-levenshtein/-/damerau-levenshtein-1.0.4.tgz#03191c432cb6eea168bb77f3a55ffdccb8978514" + +dashdash@^1.12.0: + version "1.14.1" + resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" + dependencies: + assert-plus "^1.0.0" + +data-urls@^1.0.0, data-urls@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/data-urls/-/data-urls-1.1.0.tgz#15ee0582baa5e22bb59c77140da8f9c76963bbfe" + dependencies: + abab "^2.0.0" + whatwg-mimetype "^2.2.0" + whatwg-url "^7.0.0" + +date-now@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b" + +debug@2.6.9, debug@^2.2.0, debug@^2.3.3, debug@^2.6.0, debug@^2.6.8, debug@^2.6.9: + version "2.6.9" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + dependencies: + ms "2.0.0" + +debug@^3.2.5, debug@^3.2.6: + version "3.2.6" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.6.tgz#e83d17de16d8a7efb7717edbe5fb10135eee629b" + dependencies: + ms "^2.1.1" + +debug@^4.0.1, debug@^4.1.0, debug@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.1.1.tgz#3b72260255109c6b589cee050f1d516139664791" + dependencies: + ms "^2.1.1" + +decamelize@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" + +decamelize@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-2.0.0.tgz#656d7bbc8094c4c788ea53c5840908c9c7d063c7" + dependencies: + xregexp "4.0.0" + +decode-uri-component@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" + +deep-equal@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-1.0.1.tgz#f5d260292b660e084eff4cdbc9f08ad3247448b5" + +deep-extend@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" + +deep-is@~0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" + +default-gateway@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/default-gateway/-/default-gateway-4.2.0.tgz#167104c7500c2115f6dd69b0a536bb8ed720552b" + dependencies: + execa "^1.0.0" + ip-regex "^2.1.0" + +default-require-extensions@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/default-require-extensions/-/default-require-extensions-2.0.0.tgz#f5f8fbb18a7d6d50b21f641f649ebb522cfe24f7" + dependencies: + strip-bom "^3.0.0" + +define-properties@^1.1.2, define-properties@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.3.tgz#cf88da6cbee26fe6db7094f61d870cbd84cee9f1" + dependencies: + object-keys "^1.0.12" + +define-property@^0.2.5: + version "0.2.5" + resolved "https://registry.yarnpkg.com/define-property/-/define-property-0.2.5.tgz#c35b1ef918ec3c990f9a5bc57be04aacec5c8116" + dependencies: + is-descriptor "^0.1.0" + +define-property@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/define-property/-/define-property-1.0.0.tgz#769ebaaf3f4a63aad3af9e8d304c9bbe79bfb0e6" + dependencies: + is-descriptor "^1.0.0" + +define-property@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/define-property/-/define-property-2.0.2.tgz#d459689e8d654ba77e02a817f8710d702cb16e9d" + dependencies: + is-descriptor "^1.0.2" + isobject "^3.0.1" + +del@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/del/-/del-3.0.0.tgz#53ecf699ffcbcb39637691ab13baf160819766e5" + dependencies: + globby "^6.1.0" + is-path-cwd "^1.0.0" + is-path-in-cwd "^1.0.0" + p-map "^1.1.1" + pify "^3.0.0" + rimraf "^2.2.8" + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + +delegates@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" + +depd@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9" + +des.js@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/des.js/-/des.js-1.0.0.tgz#c074d2e2aa6a8a9a07dbd61f9a15c2cd83ec8ecc" + dependencies: + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + +destroy@~1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80" + +detect-libc@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b" + +detect-newline@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-2.1.0.tgz#f41f1c10be4b00e87b5f13da680759f2c5bfd3e2" + +detect-node@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/detect-node/-/detect-node-2.0.4.tgz#014ee8f8f669c5c58023da64b8179c083a28c46c" + +detect-port-alt@1.1.6: + version "1.1.6" + resolved "https://registry.yarnpkg.com/detect-port-alt/-/detect-port-alt-1.1.6.tgz#24707deabe932d4a3cf621302027c2b266568275" + dependencies: + address "^1.0.1" + debug "^2.6.0" + +diff-sequences@^24.3.0: + version "24.3.0" + resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-24.3.0.tgz#0f20e8a1df1abddaf4d9c226680952e64118b975" + +diffie-hellman@^5.0.0: + version "5.0.3" + resolved "https://registry.yarnpkg.com/diffie-hellman/-/diffie-hellman-5.0.3.tgz#40e8ee98f55a2149607146921c63e1ae5f3d2875" + dependencies: + bn.js "^4.1.0" + miller-rabin "^4.0.0" + randombytes "^2.0.0" + +dir-glob@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-2.0.0.tgz#0b205d2b6aef98238ca286598a8204d29d0a0034" + dependencies: + arrify "^1.0.1" + path-type "^3.0.0" + +dns-equal@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/dns-equal/-/dns-equal-1.0.0.tgz#b39e7f1da6eb0a75ba9c17324b34753c47e0654d" + +dns-packet@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/dns-packet/-/dns-packet-1.3.1.tgz#12aa426981075be500b910eedcd0b47dd7deda5a" + dependencies: + ip "^1.1.0" + safe-buffer "^5.0.1" + +dns-txt@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/dns-txt/-/dns-txt-2.0.2.tgz#b91d806f5d27188e4ab3e7d107d881a1cc4642b6" + dependencies: + buffer-indexof "^1.0.0" + +doctrine@1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-1.5.0.tgz#379dce730f6166f76cefa4e6707a159b02c5a6fa" + dependencies: + esutils "^2.0.2" + isarray "^1.0.0" + +doctrine@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-2.1.0.tgz#5cd01fc101621b42c4cd7f5d1a66243716d3f39d" + dependencies: + esutils "^2.0.2" + +doctrine@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-3.0.0.tgz#addebead72a6574db783639dc87a121773973961" + dependencies: + esutils "^2.0.2" + +dom-align@^1.7.0: + version "1.8.2" + resolved "https://registry.yarnpkg.com/dom-align/-/dom-align-1.8.2.tgz#fdcd36bce25ba8d34fe3582efd57ac767df490bd" + +dom-closest@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/dom-closest/-/dom-closest-0.2.0.tgz#ebd9f91d1bf22e8d6f477876bbcd3ec90216c0cf" + dependencies: + dom-matches ">=1.0.1" + +dom-converter@^0.2: + version "0.2.0" + resolved "https://registry.yarnpkg.com/dom-converter/-/dom-converter-0.2.0.tgz#6721a9daee2e293682955b6afe416771627bb768" + dependencies: + utila "~0.4" + +dom-matches@>=1.0.1: + version "2.0.0" + resolved "https://registry.yarnpkg.com/dom-matches/-/dom-matches-2.0.0.tgz#d2728b416a87533980eb089b848d253cf23a758c" + +dom-scroll-into-view@1.x, dom-scroll-into-view@^1.2.0, dom-scroll-into-view@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/dom-scroll-into-view/-/dom-scroll-into-view-1.2.1.tgz#e8f36732dd089b0201a88d7815dc3f88e6d66c7e" + +dom-serializer@0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.1.tgz#1ec4059e284babed36eec2941d4a970a189ce7c0" + dependencies: + domelementtype "^1.3.0" + entities "^1.1.1" + +domain-browser@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/domain-browser/-/domain-browser-1.2.0.tgz#3d31f50191a6749dd1375a7f522e823d42e54eda" + +domelementtype@1, domelementtype@^1.3.0, domelementtype@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.1.tgz#d048c44b37b0d10a7f2a3d5fee3f4333d790481f" + +domexception@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/domexception/-/domexception-1.0.1.tgz#937442644ca6a31261ef36e3ec677fe805582c90" + dependencies: + webidl-conversions "^4.0.2" + +domhandler@^2.3.0: + version "2.4.2" + resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.4.2.tgz#8805097e933d65e85546f726d60f5eb88b44f803" + dependencies: + domelementtype "1" + +domutils@1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf" + dependencies: + dom-serializer "0" + domelementtype "1" + +domutils@^1.5.1, domutils@^1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.7.0.tgz#56ea341e834e06e6748af7a1cb25da67ea9f8c2a" + dependencies: + dom-serializer "0" + domelementtype "1" + +dot-prop@^4.1.1: + version "4.2.0" + resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-4.2.0.tgz#1f19e0c2e1aa0e32797c49799f2837ac6af69c57" + dependencies: + is-obj "^1.0.0" + +dotenv-expand@4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/dotenv-expand/-/dotenv-expand-4.2.0.tgz#def1f1ca5d6059d24a766e587942c21106ce1275" + +dotenv@6.2.0, dotenv@^6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-6.2.0.tgz#941c0410535d942c8becf28d3f357dbd9d476064" + +draft-js@^0.10.0, draft-js@~0.10.0: + version "0.10.5" + resolved "https://registry.yarnpkg.com/draft-js/-/draft-js-0.10.5.tgz#bfa9beb018fe0533dbb08d6675c371a6b08fa742" + dependencies: + fbjs "^0.8.15" + immutable "~3.7.4" + object-assign "^4.1.0" + +duplexer@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/duplexer/-/duplexer-0.1.1.tgz#ace6ff808c1ce66b57d1ebf97977acb02334cfc1" + +duplexify@^3.4.2, duplexify@^3.6.0: + version "3.7.1" + resolved "https://registry.yarnpkg.com/duplexify/-/duplexify-3.7.1.tgz#2a4df5317f6ccfd91f86d6fd25d8d8a103b88309" + dependencies: + end-of-stream "^1.0.0" + inherits "^2.0.1" + readable-stream "^2.0.0" + stream-shift "^1.0.0" + +ecc-jsbn@~0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" + dependencies: + jsbn "~0.1.0" + safer-buffer "^2.1.0" + +ee-first@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" + +electron-to-chromium@^1.3.122: + version "1.3.130" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.130.tgz#27f84e823bd80a5090e2baeca4fefbaf476cf7af" + +electron-to-chromium@^1.3.124: + version "1.3.125" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.125.tgz#dbde0e95e64ebe322db0eca764d951f885a5aff2" + +elliptic@^6.0.0: + version "6.4.1" + resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.4.1.tgz#c2d0b7776911b86722c632c3c06c60f2f819939a" + dependencies: + bn.js "^4.4.0" + brorand "^1.0.1" + hash.js "^1.0.0" + hmac-drbg "^1.0.0" + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + minimalistic-crypto-utils "^1.0.0" + +emoji-regex@^7.0.1, emoji-regex@^7.0.2: + version "7.0.3" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-7.0.3.tgz#933a04052860c85e83c122479c4748a8e4c72156" + +emojis-list@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-2.1.0.tgz#4daa4d9db00f9819880c79fa457ae5b09a1fd389" + +encodeurl@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" + +encoding@^0.1.11: + version "0.1.12" + resolved "https://registry.yarnpkg.com/encoding/-/encoding-0.1.12.tgz#538b66f3ee62cd1ab51ec323829d1f9480c74beb" + dependencies: + iconv-lite "~0.4.13" + +end-of-stream@^1.0.0, end-of-stream@^1.1.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.1.tgz#ed29634d19baba463b6ce6b80a37213eab71ec43" + dependencies: + once "^1.4.0" + +enhanced-resolve@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-4.1.0.tgz#41c7e0bfdfe74ac1ffe1e57ad6a5c6c9f3742a7f" + dependencies: + graceful-fs "^4.1.2" + memory-fs "^0.4.0" + tapable "^1.0.0" + +enquire.js@^2.1.6: + version "2.1.6" + resolved "https://registry.yarnpkg.com/enquire.js/-/enquire.js-2.1.6.tgz#3e8780c9b8b835084c3f60e166dbc3c2a3c89814" + +entities@^1.1.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.2.tgz#bdfa735299664dfafd34529ed4f8522a275fea56" + +errno@^0.1.1, errno@^0.1.3, errno@~0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.7.tgz#4684d71779ad39af177e3f007996f7c67c852618" + dependencies: + prr "~1.0.1" + +error-ex@^1.2.0, error-ex@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + dependencies: + is-arrayish "^0.2.1" + +es-abstract@^1.11.0, es-abstract@^1.12.0, es-abstract@^1.5.1, es-abstract@^1.7.0: + version "1.13.0" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.13.0.tgz#ac86145fdd5099d8dd49558ccba2eaf9b88e24e9" + dependencies: + es-to-primitive "^1.2.0" + function-bind "^1.1.1" + has "^1.0.3" + is-callable "^1.1.4" + is-regex "^1.0.4" + object-keys "^1.0.12" + +es-to-primitive@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.0.tgz#edf72478033456e8dda8ef09e00ad9650707f377" + dependencies: + is-callable "^1.1.4" + is-date-object "^1.0.1" + is-symbol "^1.0.2" + +escape-html@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" + +escape-string-regexp@1.0.5, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + +escodegen@^1.11.0, escodegen@^1.9.1: + version "1.11.1" + resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.11.1.tgz#c485ff8d6b4cdb89e27f4a856e91f118401ca510" + dependencies: + esprima "^3.1.3" + estraverse "^4.2.0" + esutils "^2.0.2" + optionator "^0.8.1" + optionalDependencies: + source-map "~0.6.1" + +eslint-config-react-app@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/eslint-config-react-app/-/eslint-config-react-app-4.0.0.tgz#1651f44b3830d863817af6ebed0916193aa870c3" + dependencies: + confusing-browser-globals "^1.0.7" + +eslint-import-resolver-node@^0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.2.tgz#58f15fb839b8d0576ca980413476aab2472db66a" + dependencies: + debug "^2.6.9" + resolve "^1.5.0" + +eslint-loader@2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/eslint-loader/-/eslint-loader-2.1.2.tgz#453542a1230d6ffac90e4e7cb9cadba9d851be68" + dependencies: + loader-fs-cache "^1.0.0" + loader-utils "^1.0.2" + object-assign "^4.0.1" + object-hash "^1.1.4" + rimraf "^2.6.1" + +eslint-module-utils@^2.3.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/eslint-module-utils/-/eslint-module-utils-2.4.0.tgz#8b93499e9b00eab80ccb6614e69f03678e84e09a" + dependencies: + debug "^2.6.8" + pkg-dir "^2.0.0" + +eslint-plugin-flowtype@2.50.1: + version "2.50.1" + resolved "https://registry.yarnpkg.com/eslint-plugin-flowtype/-/eslint-plugin-flowtype-2.50.1.tgz#36d4c961ac8b9e9e1dc091d3fba0537dad34ae8a" + dependencies: + lodash "^4.17.10" + +eslint-plugin-import@2.16.0: + version "2.16.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-import/-/eslint-plugin-import-2.16.0.tgz#97ac3e75d0791c4fac0e15ef388510217be7f66f" + dependencies: + contains-path "^0.1.0" + debug "^2.6.9" + doctrine "1.5.0" + eslint-import-resolver-node "^0.3.2" + eslint-module-utils "^2.3.0" + has "^1.0.3" + lodash "^4.17.11" + minimatch "^3.0.4" + read-pkg-up "^2.0.0" + resolve "^1.9.0" + +eslint-plugin-jsx-a11y@6.2.1: + version "6.2.1" + resolved "https://registry.yarnpkg.com/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.2.1.tgz#4ebba9f339b600ff415ae4166e3e2e008831cf0c" + dependencies: + aria-query "^3.0.0" + array-includes "^3.0.3" + ast-types-flow "^0.0.7" + axobject-query "^2.0.2" + damerau-levenshtein "^1.0.4" + emoji-regex "^7.0.2" + has "^1.0.3" + jsx-ast-utils "^2.0.1" + +eslint-plugin-react-hooks@^1.5.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-1.6.0.tgz#348efcda8fb426399ac7b8609607c7b4025a6f5f" + +eslint-plugin-react@7.12.4: + version "7.12.4" + resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.12.4.tgz#b1ecf26479d61aee650da612e425c53a99f48c8c" + dependencies: + array-includes "^3.0.3" + doctrine "^2.1.0" + has "^1.0.3" + jsx-ast-utils "^2.0.1" + object.fromentries "^2.0.0" + prop-types "^15.6.2" + resolve "^1.9.0" + +eslint-scope@3.7.1: + version "3.7.1" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-3.7.1.tgz#3d63c3edfda02e06e01a452ad88caacc7cdcb6e8" + dependencies: + esrecurse "^4.1.0" + estraverse "^4.1.1" + +eslint-scope@^4.0.0, eslint-scope@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-4.0.3.tgz#ca03833310f6889a3264781aa82e63eb9cfe7848" + dependencies: + esrecurse "^4.1.0" + estraverse "^4.1.1" + +eslint-utils@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-1.3.1.tgz#9a851ba89ee7c460346f97cf8939c7298827e512" + +eslint-visitor-keys@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-1.0.0.tgz#3f3180fb2e291017716acb4c9d6d5b5c34a6a81d" + +eslint@^5.16.0: + version "5.16.0" + resolved "https://registry.yarnpkg.com/eslint/-/eslint-5.16.0.tgz#a1e3ac1aae4a3fbd8296fcf8f7ab7314cbb6abea" + dependencies: + "@babel/code-frame" "^7.0.0" + ajv "^6.9.1" + chalk "^2.1.0" + cross-spawn "^6.0.5" + debug "^4.0.1" + doctrine "^3.0.0" + eslint-scope "^4.0.3" + eslint-utils "^1.3.1" + eslint-visitor-keys "^1.0.0" + espree "^5.0.1" + esquery "^1.0.1" + esutils "^2.0.2" + file-entry-cache "^5.0.1" + functional-red-black-tree "^1.0.1" + glob "^7.1.2" + globals "^11.7.0" + ignore "^4.0.6" + import-fresh "^3.0.0" + imurmurhash "^0.1.4" + inquirer "^6.2.2" + js-yaml "^3.13.0" + json-stable-stringify-without-jsonify "^1.0.1" + levn "^0.3.0" + lodash "^4.17.11" + minimatch "^3.0.4" + mkdirp "^0.5.1" + natural-compare "^1.4.0" + optionator "^0.8.2" + path-is-inside "^1.0.2" + progress "^2.0.0" + regexpp "^2.0.1" + semver "^5.5.1" + strip-ansi "^4.0.0" + strip-json-comments "^2.0.1" + table "^5.2.3" + text-table "^0.2.0" + +espree@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/espree/-/espree-5.0.1.tgz#5d6526fa4fc7f0788a5cf75b15f30323e2f81f7a" + dependencies: + acorn "^6.0.7" + acorn-jsx "^5.0.0" + eslint-visitor-keys "^1.0.0" + +esprima@^3.1.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633" + +esprima@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" + +esquery@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.0.1.tgz#406c51658b1f5991a5f9b62b1dc25b00e3e5c708" + dependencies: + estraverse "^4.0.0" + +esrecurse@^4.1.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.2.1.tgz#007a3b9fdbc2b3bb87e4879ea19c92fdbd3942cf" + dependencies: + estraverse "^4.1.0" + +estraverse@^4.0.0, estraverse@^4.1.0, estraverse@^4.1.1, estraverse@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.2.0.tgz#0dee3fed31fcd469618ce7342099fc1afa0bdb13" + +esutils@^2.0.0, esutils@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b" + +etag@~1.8.1: + version "1.8.1" + resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" + +eventemitter3@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-3.1.0.tgz#090b4d6cdbd645ed10bf750d4b5407942d7ba163" + +eventlistener@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/eventlistener/-/eventlistener-0.0.1.tgz#ed2baabb852227af2bcf889152c72c63ca532eb8" + +events@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/events/-/events-3.0.0.tgz#9a0a0dfaf62893d92b875b8f2698ca4114973e88" + +eventsource@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/eventsource/-/eventsource-1.0.7.tgz#8fbc72c93fcd34088090bc0a4e64f4b5cee6d8d0" + dependencies: + original "^1.0.0" + +evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz#7fcbdb198dc71959432efe13842684e0525acb02" + dependencies: + md5.js "^1.3.4" + safe-buffer "^5.1.1" + +exec-sh@^0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/exec-sh/-/exec-sh-0.3.2.tgz#6738de2eb7c8e671d0366aea0b0db8c6f7d7391b" + +execa@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/execa/-/execa-1.0.0.tgz#c6236a5bb4df6d6f15e88e7f017798216749ddd8" + dependencies: + cross-spawn "^6.0.0" + get-stream "^4.0.0" + is-stream "^1.1.0" + npm-run-path "^2.0.0" + p-finally "^1.0.0" + signal-exit "^3.0.0" + strip-eof "^1.0.0" + +exit@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c" + +expand-brackets@^2.1.4: + version "2.1.4" + resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-2.1.4.tgz#b77735e315ce30f6b6eff0f83b04151a22449622" + dependencies: + debug "^2.3.3" + define-property "^0.2.5" + extend-shallow "^2.0.1" + posix-character-classes "^0.1.0" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.1" + +expect@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/expect/-/expect-24.7.1.tgz#d91defbab4e627470a152feaf35b3c31aa1c7c14" + dependencies: + "@jest/types" "^24.7.0" + ansi-styles "^3.2.0" + jest-get-type "^24.3.0" + jest-matcher-utils "^24.7.0" + jest-message-util "^24.7.1" + jest-regex-util "^24.3.0" + +express@^4.16.2: + version "4.16.4" + resolved "https://registry.yarnpkg.com/express/-/express-4.16.4.tgz#fddef61926109e24c515ea97fd2f1bdbf62df12e" + dependencies: + accepts "~1.3.5" + array-flatten "1.1.1" + body-parser "1.18.3" + content-disposition "0.5.2" + content-type "~1.0.4" + cookie "0.3.1" + cookie-signature "1.0.6" + debug "2.6.9" + depd "~1.1.2" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + finalhandler "1.1.1" + fresh "0.5.2" + merge-descriptors "1.0.1" + methods "~1.1.2" + on-finished "~2.3.0" + parseurl "~1.3.2" + path-to-regexp "0.1.7" + proxy-addr "~2.0.4" + qs "6.5.2" + range-parser "~1.2.0" + safe-buffer "5.1.2" + send "0.16.2" + serve-static "1.13.2" + setprototypeof "1.1.0" + statuses "~1.4.0" + type-is "~1.6.16" + utils-merge "1.0.1" + vary "~1.1.2" + +extend-shallow@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-2.0.1.tgz#51af7d614ad9a9f610ea1bafbb989d6b1c56890f" + dependencies: + is-extendable "^0.1.0" + +extend-shallow@^3.0.0, extend-shallow@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-3.0.2.tgz#26a71aaf073b39fb2127172746131c2704028db8" + dependencies: + assign-symbols "^1.0.0" + is-extendable "^1.0.1" + +extend@^3.0.0, extend@~3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" + +external-editor@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/external-editor/-/external-editor-3.0.3.tgz#5866db29a97826dbe4bf3afd24070ead9ea43a27" + dependencies: + chardet "^0.7.0" + iconv-lite "^0.4.24" + tmp "^0.0.33" + +extglob@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/extglob/-/extglob-2.0.4.tgz#ad00fe4dc612a9232e8718711dc5cb5ab0285543" + dependencies: + array-unique "^0.3.2" + define-property "^1.0.0" + expand-brackets "^2.1.4" + extend-shallow "^2.0.1" + fragment-cache "^0.2.1" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.1" + +extsprintf@1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" + +extsprintf@^1.2.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.0.tgz#e2689f8f356fad62cca65a3a91c5df5f9551692f" + +fast-deep-equal@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz#7b05218ddf9667bf7f370bf7fdb2cb15fdd0aa49" + +fast-glob@^2.0.2: + version "2.2.6" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-2.2.6.tgz#a5d5b697ec8deda468d85a74035290a025a95295" + dependencies: + "@mrmlnc/readdir-enhanced" "^2.2.1" + "@nodelib/fs.stat" "^1.1.2" + glob-parent "^3.1.0" + is-glob "^4.0.0" + merge2 "^1.2.3" + micromatch "^3.1.10" + +fast-json-stable-stringify@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz#d5142c0caee6b1189f87d3a76111064f86c8bbf2" + +fast-levenshtein@~2.0.4: + version "2.0.6" + resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + +faye-websocket@^0.10.0: + version "0.10.0" + resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4" + dependencies: + websocket-driver ">=0.5.1" + +faye-websocket@~0.11.1: + version "0.11.1" + resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.11.1.tgz#f0efe18c4f56e4f40afc7e06c719fd5ee6188f38" + dependencies: + websocket-driver ">=0.5.1" + +fb-watchman@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-2.0.0.tgz#54e9abf7dfa2f26cd9b1636c588c1afc05de5d58" + dependencies: + bser "^2.0.0" + +fbjs@^0.8.0, fbjs@^0.8.15, fbjs@^0.8.16, fbjs@^0.8.9: + version "0.8.17" + resolved "https://registry.yarnpkg.com/fbjs/-/fbjs-0.8.17.tgz#c4d598ead6949112653d6588b01a5cdcd9f90fdd" + dependencies: + core-js "^1.0.0" + isomorphic-fetch "^2.1.1" + loose-envify "^1.0.0" + object-assign "^4.1.0" + promise "^7.1.1" + setimmediate "^1.0.5" + ua-parser-js "^0.7.18" + +figgy-pudding@^3.5.1: + version "3.5.1" + resolved "https://registry.yarnpkg.com/figgy-pudding/-/figgy-pudding-3.5.1.tgz#862470112901c727a0e495a80744bd5baa1d6790" + +figures@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/figures/-/figures-2.0.0.tgz#3ab1a2d2a62c8bfb431a0c94cb797a2fce27c962" + dependencies: + escape-string-regexp "^1.0.5" + +file-entry-cache@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-5.0.1.tgz#ca0f6efa6dd3d561333fb14515065c2fafdf439c" + dependencies: + flat-cache "^2.0.1" + +file-loader@3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/file-loader/-/file-loader-3.0.1.tgz#f8e0ba0b599918b51adfe45d66d1e771ad560faa" + dependencies: + loader-utils "^1.0.2" + schema-utils "^1.0.0" + +fileset@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/fileset/-/fileset-2.0.3.tgz#8e7548a96d3cc2327ee5e674168723a333bba2a0" + dependencies: + glob "^7.0.3" + minimatch "^3.0.3" + +filesize@3.6.1: + version "3.6.1" + resolved "https://registry.yarnpkg.com/filesize/-/filesize-3.6.1.tgz#090bb3ee01b6f801a8a8be99d31710b3422bb317" + +fill-range@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-4.0.0.tgz#d544811d428f98eb06a63dc402d2403c328c38f7" + dependencies: + extend-shallow "^2.0.1" + is-number "^3.0.0" + repeat-string "^1.6.1" + to-regex-range "^2.1.0" + +finalhandler@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.1.1.tgz#eebf4ed840079c83f4249038c9d703008301b105" + dependencies: + debug "2.6.9" + encodeurl "~1.0.2" + escape-html "~1.0.3" + on-finished "~2.3.0" + parseurl "~1.3.2" + statuses "~1.4.0" + unpipe "~1.0.0" + +find-cache-dir@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-0.1.1.tgz#c8defae57c8a52a8a784f9e31c57c742e993a0b9" + dependencies: + commondir "^1.0.1" + mkdirp "^0.5.1" + pkg-dir "^1.0.0" + +find-cache-dir@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-2.1.0.tgz#8d0f94cd13fe43c6c7c261a0d86115ca918c05f7" + dependencies: + commondir "^1.0.1" + make-dir "^2.0.0" + pkg-dir "^3.0.0" + +find-up@3.0.0, find-up@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-3.0.0.tgz#49169f1d7993430646da61ecc5ae355c21c97b73" + dependencies: + locate-path "^3.0.0" + +find-up@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-1.1.2.tgz#6b2e9822b1a2ce0a60ab64d610eccad53cb24d0f" + dependencies: + path-exists "^2.0.0" + pinkie-promise "^2.0.0" + +find-up@^2.0.0, find-up@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7" + dependencies: + locate-path "^2.0.0" + +flat-cache@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-2.0.1.tgz#5d296d6f04bda44a4630a301413bdbc2ec085ec0" + dependencies: + flatted "^2.0.0" + rimraf "2.6.3" + write "1.0.3" + +flatted@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/flatted/-/flatted-2.0.0.tgz#55122b6536ea496b4b44893ee2608141d10d9916" + +flatten@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/flatten/-/flatten-1.0.2.tgz#dae46a9d78fbe25292258cc1e780a41d95c03782" + +flush-write-stream@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/flush-write-stream/-/flush-write-stream-1.1.1.tgz#8dd7d873a1babc207d94ead0c2e0e44276ebf2e8" + dependencies: + inherits "^2.0.3" + readable-stream "^2.3.6" + +follow-redirects@^1.0.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.7.0.tgz#489ebc198dc0e7f64167bd23b03c4c19b5784c76" + dependencies: + debug "^3.2.6" + +for-in@^0.1.3: + version "0.1.8" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-0.1.8.tgz#d8773908e31256109952b1fdb9b3fa867d2775e1" + +for-in@^1.0.1, for-in@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80" + +for-own@^0.1.3: + version "0.1.5" + resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce" + dependencies: + for-in "^1.0.1" + +for-own@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/for-own/-/for-own-1.0.0.tgz#c63332f415cedc4b04dbfe70cf836494c53cb44b" + dependencies: + for-in "^1.0.1" + +forever-agent@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" + +fork-ts-checker-webpack-plugin@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-1.0.1.tgz#140453c4dc3dc35937034b7416b66a3bacfbc3a8" + dependencies: + babel-code-frame "^6.22.0" + chalk "^2.4.1" + chokidar "^2.0.4" + micromatch "^3.1.10" + minimatch "^3.0.4" + semver "^5.6.0" + tapable "^1.0.0" + +form-data@~2.3.2: + version "2.3.3" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.3.tgz#dcce52c05f644f298c6a7ab936bd724ceffbf3a6" + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.6" + mime-types "^2.1.12" + +forwarded@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.2.tgz#98c23dab1175657b8c0573e8ceccd91b0ff18c84" + +fragment-cache@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/fragment-cache/-/fragment-cache-0.2.1.tgz#4290fad27f13e89be7f33799c6bc5a0abfff0d19" + dependencies: + map-cache "^0.2.2" + +fresh@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" + +from2@^2.1.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/from2/-/from2-2.3.0.tgz#8bfb5502bde4a4d36cfdeea007fcca21d7e382af" + dependencies: + inherits "^2.0.1" + readable-stream "^2.0.0" + +fs-extra@7.0.1, fs-extra@^7.0.0: + version "7.0.1" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-7.0.1.tgz#4f189c44aa123b895f722804f55ea23eadc348e9" + dependencies: + graceful-fs "^4.1.2" + jsonfile "^4.0.0" + universalify "^0.1.0" + +fs-extra@^4.0.2: + version "4.0.3" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-4.0.3.tgz#0d852122e5bc5beb453fb028e9c0c9bf36340c94" + dependencies: + graceful-fs "^4.1.2" + jsonfile "^4.0.0" + universalify "^0.1.0" + +fs-minipass@^1.2.5: + version "1.2.5" + resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-1.2.5.tgz#06c277218454ec288df77ada54a03b8702aacb9d" + dependencies: + minipass "^2.2.1" + +fs-write-stream-atomic@^1.0.8: + version "1.0.10" + resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz#b47df53493ef911df75731e70a9ded0189db40c9" + dependencies: + graceful-fs "^4.1.2" + iferr "^0.1.5" + imurmurhash "^0.1.4" + readable-stream "1 || 2" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + +fsevents@2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.0.6.tgz#87b19df0bfb4a1a51d7ddb51b01b5f3bedb40c33" + +fsevents@^1.2.7: + version "1.2.8" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.2.8.tgz#57ea5320f762cd4696e5e8e87120eccc8b11cacf" + dependencies: + nan "^2.12.1" + node-pre-gyp "^0.12.0" + +function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + +functional-red-black-tree@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz#1b0ab3bd553b2a0d6399d29c0e3ea0b252078327" + +gauge@~2.7.3: + version "2.7.4" + resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" + dependencies: + aproba "^1.0.3" + console-control-strings "^1.0.0" + has-unicode "^2.0.0" + object-assign "^4.1.0" + signal-exit "^3.0.0" + string-width "^1.0.1" + strip-ansi "^3.0.1" + wide-align "^1.1.0" + +get-caller-file@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.3.tgz#f978fa4c90d1dfe7ff2d6beda2a515e713bdcf4a" + +get-own-enumerable-property-symbols@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.0.tgz#b877b49a5c16aefac3655f2ed2ea5b684df8d203" + +get-stream@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5" + dependencies: + pump "^3.0.0" + +get-value@^2.0.3, get-value@^2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/get-value/-/get-value-2.0.6.tgz#dc15ca1c672387ca76bd37ac0a395ba2042a2c28" + +getpass@^0.1.1: + version "0.1.7" + resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" + dependencies: + assert-plus "^1.0.0" + +glob-parent@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-3.1.0.tgz#9e6af6299d8d3bd2bd40430832bd113df906c5ae" + dependencies: + is-glob "^3.1.0" + path-dirname "^1.0.0" + +glob-to-regexp@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz#8c5a1494d2066c570cc3bfe4496175acc4d502ab" + +glob@^7.0.3, glob@^7.1.1, glob@^7.1.2, glob@^7.1.3: + version "7.1.3" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.3.tgz#3960832d3f1574108342dafd3a67b332c0969df1" + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +global-modules@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/global-modules/-/global-modules-2.0.0.tgz#997605ad2345f27f51539bea26574421215c7780" + dependencies: + global-prefix "^3.0.0" + +global-prefix@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/global-prefix/-/global-prefix-3.0.0.tgz#fc85f73064df69f50421f47f883fe5b913ba9b97" + dependencies: + ini "^1.3.5" + kind-of "^6.0.2" + which "^1.3.1" + +globals@^11.1.0, globals@^11.7.0: + version "11.11.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-11.11.0.tgz#dcf93757fa2de5486fbeed7118538adf789e9c2e" + +globby@8.0.2: + version "8.0.2" + resolved "https://registry.yarnpkg.com/globby/-/globby-8.0.2.tgz#5697619ccd95c5275dbb2d6faa42087c1a941d8d" + dependencies: + array-union "^1.0.1" + dir-glob "2.0.0" + fast-glob "^2.0.2" + glob "^7.1.2" + ignore "^3.3.5" + pify "^3.0.0" + slash "^1.0.0" + +globby@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-6.1.0.tgz#f5a6d70e8395e21c858fb0489d64df02424d506c" + dependencies: + array-union "^1.0.1" + glob "^7.0.3" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.6: + version "4.1.15" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.15.tgz#ffb703e1066e8a0eeaa4c8b80ba9253eeefbfb00" + +growly@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/growly/-/growly-1.3.0.tgz#f10748cbe76af964b7c96c93c6bcc28af120c081" + +gud@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/gud/-/gud-1.0.0.tgz#a489581b17e6a70beca9abe3ae57de7a499852c0" + +gzip-size@5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/gzip-size/-/gzip-size-5.0.0.tgz#a55ecd99222f4c48fd8c01c625ce3b349d0a0e80" + dependencies: + duplexer "^0.1.1" + pify "^3.0.0" + +hammerjs@^2.0.8: + version "2.0.8" + resolved "https://registry.yarnpkg.com/hammerjs/-/hammerjs-2.0.8.tgz#04ef77862cff2bb79d30f7692095930222bf60f1" + +handle-thing@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/handle-thing/-/handle-thing-2.0.0.tgz#0e039695ff50c93fc288557d696f3c1dc6776754" + +handlebars@^4.1.0: + version "4.1.2" + resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.1.2.tgz#b6b37c1ced0306b221e094fc7aca3ec23b131b67" + dependencies: + neo-async "^2.6.0" + optimist "^0.6.1" + source-map "^0.6.1" + optionalDependencies: + uglify-js "^3.1.4" + +har-schema@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" + +har-validator@~5.1.0: + version "5.1.3" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.3.tgz#1ef89ebd3e4996557675eed9893110dc350fa080" + dependencies: + ajv "^6.5.5" + har-schema "^2.0.0" + +harmony-reflect@^1.4.6: + version "1.6.1" + resolved "https://registry.yarnpkg.com/harmony-reflect/-/harmony-reflect-1.6.1.tgz#c108d4f2bb451efef7a37861fdbdae72c9bdefa9" + +has-ansi@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" + dependencies: + ansi-regex "^2.0.0" + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + +has-symbols@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.0.tgz#ba1a8f1af2a0fc39650f5c850367704122063b44" + +has-unicode@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" + +has-value@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/has-value/-/has-value-0.3.1.tgz#7b1f58bada62ca827ec0a2078025654845995e1f" + dependencies: + get-value "^2.0.3" + has-values "^0.1.4" + isobject "^2.0.0" + +has-value@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-value/-/has-value-1.0.0.tgz#18b281da585b1c5c51def24c930ed29a0be6b177" + dependencies: + get-value "^2.0.6" + has-values "^1.0.0" + isobject "^3.0.0" + +has-values@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/has-values/-/has-values-0.1.4.tgz#6d61de95d91dfca9b9a02089ad384bff8f62b771" + +has-values@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-values/-/has-values-1.0.0.tgz#95b0b63fec2146619a6fe57fe75628d5a39efe4f" + dependencies: + is-number "^3.0.0" + kind-of "^4.0.0" + +has@^1.0.0, has@^1.0.1, has@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" + dependencies: + function-bind "^1.1.1" + +hash-base@^3.0.0: + version "3.0.4" + resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-3.0.4.tgz#5fc8686847ecd73499403319a6b0a3f3f6ae4918" + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" + +hash.js@^1.0.0, hash.js@^1.0.3: + version "1.1.7" + resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.7.tgz#0babca538e8d4ee4a0f8988d68866537a003cf42" + dependencies: + inherits "^2.0.3" + minimalistic-assert "^1.0.1" + +hast-util-from-parse5@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/hast-util-from-parse5/-/hast-util-from-parse5-5.0.0.tgz#a505a05766e0f96e389bfb0b1dd809eeefcef47b" + dependencies: + ccount "^1.0.3" + hastscript "^5.0.0" + property-information "^5.0.0" + web-namespaces "^1.1.2" + xtend "^4.0.1" + +hast-util-parse-selector@^2.2.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/hast-util-parse-selector/-/hast-util-parse-selector-2.2.1.tgz#4ddbae1ae12c124e3eb91b581d2556441766f0ab" + +hastscript@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/hastscript/-/hastscript-5.0.0.tgz#fee10382c1bc4ba3f1be311521d368c047d2c43a" + dependencies: + comma-separated-tokens "^1.0.0" + hast-util-parse-selector "^2.2.0" + property-information "^5.0.1" + space-separated-tokens "^1.0.0" + +he@1.2.x: + version "1.2.0" + resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" + +hex-color-regex@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/hex-color-regex/-/hex-color-regex-1.1.0.tgz#4c06fccb4602fe2602b3c93df82d7e7dbf1a8a8e" + +history@^4.9.0: + version "4.9.0" + resolved "https://registry.yarnpkg.com/history/-/history-4.9.0.tgz#84587c2068039ead8af769e9d6a6860a14fa1bca" + dependencies: + "@babel/runtime" "^7.1.2" + loose-envify "^1.2.0" + resolve-pathname "^2.2.0" + tiny-invariant "^1.0.2" + tiny-warning "^1.0.0" + value-equal "^0.4.0" + +hmac-drbg@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1" + dependencies: + hash.js "^1.0.3" + minimalistic-assert "^1.0.0" + minimalistic-crypto-utils "^1.0.1" + +hoek@6.x.x: + version "6.1.3" + resolved "https://registry.yarnpkg.com/hoek/-/hoek-6.1.3.tgz#73b7d33952e01fe27a38b0457294b79dd8da242c" + +hoist-non-react-statics@^2.3.1: + version "2.5.5" + resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-2.5.5.tgz#c5903cf409c0dfd908f388e619d86b9c1174cb47" + +hoist-non-react-statics@^3.1.0, hoist-non-react-statics@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-3.3.0.tgz#b09178f0122184fb95acf525daaecb4d8f45958b" + dependencies: + react-is "^16.7.0" + +hosted-git-info@^2.1.4: + version "2.7.1" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.7.1.tgz#97f236977bd6e125408930ff6de3eec6281ec047" + +hpack.js@^2.1.6: + version "2.1.6" + resolved "https://registry.yarnpkg.com/hpack.js/-/hpack.js-2.1.6.tgz#87774c0949e513f42e84575b3c45681fade2a0b2" + dependencies: + inherits "^2.0.1" + obuf "^1.0.0" + readable-stream "^2.0.1" + wbuf "^1.1.0" + +hsl-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/hsl-regex/-/hsl-regex-1.0.0.tgz#d49330c789ed819e276a4c0d272dffa30b18fe6e" + +hsla-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/hsla-regex/-/hsla-regex-1.0.0.tgz#c1ce7a3168c8c6614033a4b5f7877f3b225f9c38" + +html-comment-regex@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/html-comment-regex/-/html-comment-regex-1.1.2.tgz#97d4688aeb5c81886a364faa0cad1dda14d433a7" + +html-encoding-sniffer@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-1.0.2.tgz#e70d84b94da53aa375e11fe3a351be6642ca46f8" + dependencies: + whatwg-encoding "^1.0.1" + +html-entities@^1.2.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/html-entities/-/html-entities-1.2.1.tgz#0df29351f0721163515dfb9e5543e5f6eed5162f" + +html-minifier@^3.5.20: + version "3.5.21" + resolved "https://registry.yarnpkg.com/html-minifier/-/html-minifier-3.5.21.tgz#d0040e054730e354db008463593194015212d20c" + dependencies: + camel-case "3.0.x" + clean-css "4.2.x" + commander "2.17.x" + he "1.2.x" + param-case "2.1.x" + relateurl "0.2.x" + uglify-js "3.4.x" + +html-webpack-plugin@4.0.0-beta.5: + version "4.0.0-beta.5" + resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-4.0.0-beta.5.tgz#2c53083c1151bfec20479b1f8aaf0039e77b5513" + dependencies: + html-minifier "^3.5.20" + loader-utils "^1.1.0" + lodash "^4.17.11" + pretty-error "^2.1.1" + tapable "^1.1.0" + util.promisify "1.0.0" + +htmlparser2@^3.3.0: + version "3.10.1" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.10.1.tgz#bd679dc3f59897b6a34bb10749c855bb53a9392f" + dependencies: + domelementtype "^1.3.1" + domhandler "^2.3.0" + domutils "^1.5.1" + entities "^1.1.1" + inherits "^2.0.1" + readable-stream "^3.1.1" + +http-deceiver@^1.2.7: + version "1.2.7" + resolved "https://registry.yarnpkg.com/http-deceiver/-/http-deceiver-1.2.7.tgz#fa7168944ab9a519d337cb0bec7284dc3e723d87" + +http-errors@1.6.3, http-errors@~1.6.2, http-errors@~1.6.3: + version "1.6.3" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.3.tgz#8b55680bb4be283a0b5bf4ea2e38580be1d9320d" + dependencies: + depd "~1.1.2" + inherits "2.0.3" + setprototypeof "1.1.0" + statuses ">= 1.4.0 < 2" + +http-parser-js@>=0.4.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.5.0.tgz#d65edbede84349d0dc30320815a15d39cc3cbbd8" + +http-proxy-middleware@^0.19.1: + version "0.19.1" + resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-0.19.1.tgz#183c7dc4aa1479150306498c210cdaf96080a43a" + dependencies: + http-proxy "^1.17.0" + is-glob "^4.0.0" + lodash "^4.17.11" + micromatch "^3.1.10" + +http-proxy@^1.17.0: + version "1.17.0" + resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.17.0.tgz#7ad38494658f84605e2f6db4436df410f4e5be9a" + dependencies: + eventemitter3 "^3.0.0" + follow-redirects "^1.0.0" + requires-port "^1.0.0" + +http-signature@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" + dependencies: + assert-plus "^1.0.0" + jsprim "^1.2.2" + sshpk "^1.7.0" + +https-browserify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/https-browserify/-/https-browserify-1.0.0.tgz#ec06c10e0a34c0f2faf199f7fd7fc78fffd03c73" + +iconv-lite@0.4.23: + version "0.4.23" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.23.tgz#297871f63be507adcfbfca715d0cd0eed84e9a63" + dependencies: + safer-buffer ">= 2.1.2 < 3" + +iconv-lite@0.4.24, iconv-lite@^0.4.24, iconv-lite@^0.4.4, iconv-lite@~0.4.13: + version "0.4.24" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" + dependencies: + safer-buffer ">= 2.1.2 < 3" + +icss-replace-symbols@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/icss-replace-symbols/-/icss-replace-symbols-1.1.0.tgz#06ea6f83679a7749e386cfe1fe812ae5db223ded" + +icss-utils@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/icss-utils/-/icss-utils-4.1.0.tgz#339dbbffb9f8729a243b701e1c29d4cc58c52f0e" + dependencies: + postcss "^7.0.14" + +identity-obj-proxy@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/identity-obj-proxy/-/identity-obj-proxy-3.0.0.tgz#94d2bda96084453ef36fbc5aaec37e0f79f1fc14" + dependencies: + harmony-reflect "^1.4.6" + +ieee754@^1.1.4: + version "1.1.13" + resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.1.13.tgz#ec168558e95aa181fd87d37f55c32bbcb6708b84" + +iferr@^0.1.5: + version "0.1.5" + resolved "https://registry.yarnpkg.com/iferr/-/iferr-0.1.5.tgz#c60eed69e6d8fdb6b3104a1fcbca1c192dc5b501" + +ignore-walk@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.1.tgz#a83e62e7d272ac0e3b551aaa82831a19b69f82f8" + dependencies: + minimatch "^3.0.4" + +ignore@^3.3.5: + version "3.3.10" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-3.3.10.tgz#0a97fb876986e8081c631160f8f9f389157f0043" + +ignore@^4.0.6: + version "4.0.6" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-4.0.6.tgz#750e3db5862087b4737ebac8207ffd1ef27b25fc" + +image-size@~0.5.0: + version "0.5.5" + resolved "https://registry.yarnpkg.com/image-size/-/image-size-0.5.5.tgz#09dfd4ab9d20e29eb1c3e80b8990378df9e3cb9c" + +immer@1.10.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/immer/-/immer-1.10.0.tgz#bad67605ba9c810275d91e1c2a47d4582e98286d" + +immutable@^3.7.4: + version "3.8.2" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.8.2.tgz#c2439951455bb39913daf281376f1530e104adf3" + +immutable@~3.7.4: + version "3.7.6" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.7.6.tgz#13b4d3cb12befa15482a26fe1b2ebae640071e4b" + +import-cwd@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/import-cwd/-/import-cwd-2.1.0.tgz#aa6cf36e722761285cb371ec6519f53e2435b0a9" + dependencies: + import-from "^2.1.0" + +import-fresh@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-2.0.0.tgz#d81355c15612d386c61f9ddd3922d4304822a546" + dependencies: + caller-path "^2.0.0" + resolve-from "^3.0.0" + +import-fresh@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.0.0.tgz#a3d897f420cab0e671236897f75bc14b4885c390" + dependencies: + parent-module "^1.0.0" + resolve-from "^4.0.0" + +import-from@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/import-from/-/import-from-2.1.0.tgz#335db7f2a7affd53aaa471d4b8021dee36b7f3b1" + dependencies: + resolve-from "^3.0.0" + +import-local@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/import-local/-/import-local-2.0.0.tgz#55070be38a5993cf18ef6db7e961f5bee5c5a09d" + dependencies: + pkg-dir "^3.0.0" + resolve-cwd "^2.0.0" + +imurmurhash@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + +indexes-of@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/indexes-of/-/indexes-of-1.0.1.tgz#f30f716c8e2bd346c7b67d3df3915566a7c05607" + +indexof@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d" + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.1, inherits@~2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + +inherits@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.1.tgz#b17d08d326b4423e568eff719f91b0b1cbdf69f1" + +ini@^1.3.5, ini@~1.3.0: + version "1.3.5" + resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.5.tgz#eee25f56db1c9ec6085e0c22778083f596abf927" + +inquirer@6.2.2: + version "6.2.2" + resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-6.2.2.tgz#46941176f65c9eb20804627149b743a218f25406" + dependencies: + ansi-escapes "^3.2.0" + chalk "^2.4.2" + cli-cursor "^2.1.0" + cli-width "^2.0.0" + external-editor "^3.0.3" + figures "^2.0.0" + lodash "^4.17.11" + mute-stream "0.0.7" + run-async "^2.2.0" + rxjs "^6.4.0" + string-width "^2.1.0" + strip-ansi "^5.0.0" + through "^2.3.6" + +inquirer@^6.2.2: + version "6.3.1" + resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-6.3.1.tgz#7a413b5e7950811013a3db491c61d1f3b776e8e7" + dependencies: + ansi-escapes "^3.2.0" + chalk "^2.4.2" + cli-cursor "^2.1.0" + cli-width "^2.0.0" + external-editor "^3.0.3" + figures "^2.0.0" + lodash "^4.17.11" + mute-stream "0.0.7" + run-async "^2.2.0" + rxjs "^6.4.0" + string-width "^2.1.0" + strip-ansi "^5.1.0" + through "^2.3.6" + +internal-ip@^4.2.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/internal-ip/-/internal-ip-4.3.0.tgz#845452baad9d2ca3b69c635a137acb9a0dad0907" + dependencies: + default-gateway "^4.2.0" + ipaddr.js "^1.9.0" + +invariant@^2.2.2, invariant@^2.2.4: + version "2.2.4" + resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" + dependencies: + loose-envify "^1.0.0" + +invert-kv@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-2.0.0.tgz#7393f5afa59ec9ff5f67a27620d11c226e3eec02" + +ip-regex@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-2.1.0.tgz#fa78bf5d2e6913c911ce9f819ee5146bb6d844e9" + +ip@^1.1.0, ip@^1.1.5: + version "1.1.5" + resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.5.tgz#bdded70114290828c0a039e72ef25f5aaec4354a" + +ipaddr.js@1.9.0, ipaddr.js@^1.9.0: + version "1.9.0" + resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.0.tgz#37df74e430a0e47550fe54a2defe30d8acd95f65" + +is-absolute-url@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-absolute-url/-/is-absolute-url-2.1.0.tgz#50530dfb84fcc9aa7dbe7852e83a37b93b9f2aa6" + +is-accessor-descriptor@^0.1.6: + version "0.1.6" + resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz#a9e12cb3ae8d876727eeef3843f8a0897b5c98d6" + dependencies: + kind-of "^3.0.2" + +is-accessor-descriptor@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz#169c2f6d3df1f992618072365c9b0ea1f6878656" + dependencies: + kind-of "^6.0.0" + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + +is-arrayish@^0.3.1: + version "0.3.2" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.3.2.tgz#4574a2ae56f7ab206896fb431eaeed066fdf8f03" + +is-binary-path@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898" + dependencies: + binary-extensions "^1.0.0" + +is-buffer@^1.0.2, is-buffer@^1.1.5: + version "1.1.6" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" + +is-buffer@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-2.0.3.tgz#4ecf3fcf749cbd1e472689e109ac66261a25e725" + +is-callable@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.1.4.tgz#1e1adf219e1eeb684d691f9d6a05ff0d30a24d75" + +is-ci@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-ci/-/is-ci-2.0.0.tgz#6bc6334181810e04b5c22b3d589fdca55026404c" + dependencies: + ci-info "^2.0.0" + +is-color-stop@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-color-stop/-/is-color-stop-1.1.0.tgz#cfff471aee4dd5c9e158598fbe12967b5cdad345" + dependencies: + css-color-names "^0.0.4" + hex-color-regex "^1.1.0" + hsl-regex "^1.0.0" + hsla-regex "^1.0.0" + rgb-regex "^1.0.1" + rgba-regex "^1.0.0" + +is-data-descriptor@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz#0b5ee648388e2c860282e793f1856fec3f301b56" + dependencies: + kind-of "^3.0.2" + +is-data-descriptor@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz#d84876321d0e7add03990406abbbbd36ba9268c7" + dependencies: + kind-of "^6.0.0" + +is-date-object@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.1.tgz#9aa20eb6aeebbff77fbd33e74ca01b33581d3a16" + +is-descriptor@^0.1.0: + version "0.1.6" + resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-0.1.6.tgz#366d8240dde487ca51823b1ab9f07a10a78251ca" + dependencies: + is-accessor-descriptor "^0.1.6" + is-data-descriptor "^0.1.4" + kind-of "^5.0.0" + +is-descriptor@^1.0.0, is-descriptor@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-1.0.2.tgz#3b159746a66604b04f8c81524ba365c5f14d86ec" + dependencies: + is-accessor-descriptor "^1.0.0" + is-data-descriptor "^1.0.0" + kind-of "^6.0.2" + +is-directory@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/is-directory/-/is-directory-0.3.1.tgz#61339b6f2475fc772fd9c9d83f5c8575dc154ae1" + +is-extendable@^0.1.0, is-extendable@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" + +is-extendable@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-1.0.1.tgz#a7470f9e426733d81bd81e1155264e3a3507cab4" + dependencies: + is-plain-object "^2.0.4" + +is-extglob@^2.1.0, is-extglob@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + +is-fullwidth-code-point@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" + dependencies: + number-is-nan "^1.0.0" + +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + +is-generator-fn@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-generator-fn/-/is-generator-fn-2.1.0.tgz#7d140adc389aaf3011a8f2a2a4cfa6faadffb118" + +is-glob@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-3.1.0.tgz#7ba5ae24217804ac70707b96922567486cc3e84a" + dependencies: + is-extglob "^2.1.0" + +is-glob@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.1.tgz#7567dbe9f2f5e2467bc77ab83c4a29482407a5dc" + dependencies: + is-extglob "^2.1.1" + +is-number@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-3.0.0.tgz#24fd6201a4782cf50561c810276afc7d12d71195" + dependencies: + kind-of "^3.0.2" + +is-obj@^1.0.0, is-obj@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-1.0.1.tgz#3e4729ac1f5fde025cd7d83a896dab9f4f67db0f" + +is-path-cwd@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-path-cwd/-/is-path-cwd-1.0.0.tgz#d225ec23132e89edd38fda767472e62e65f1106d" + +is-path-in-cwd@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-path-in-cwd/-/is-path-in-cwd-1.0.1.tgz#5ac48b345ef675339bd6c7a48a912110b241cf52" + dependencies: + is-path-inside "^1.0.0" + +is-path-inside@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-1.0.1.tgz#8ef5b7de50437a3fdca6b4e865ef7aa55cb48036" + dependencies: + path-is-inside "^1.0.1" + +is-plain-obj@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" + +is-plain-object@^2.0.1, is-plain-object@^2.0.3, is-plain-object@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" + dependencies: + isobject "^3.0.1" + +is-promise@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-promise/-/is-promise-2.1.0.tgz#79a2a9ece7f096e80f36d2b2f3bc16c1ff4bf3fa" + +is-regex@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.0.4.tgz#5517489b547091b0930e095654ced25ee97e9491" + dependencies: + has "^1.0.1" + +is-regexp@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-regexp/-/is-regexp-1.0.0.tgz#fd2d883545c46bac5a633e7b9a09e87fa2cb5069" + +is-resolvable@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-resolvable/-/is-resolvable-1.1.0.tgz#fb18f87ce1feb925169c9a407c19318a3206ed88" + +is-root@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-root/-/is-root-2.0.0.tgz#838d1e82318144e5a6f77819d90207645acc7019" + +is-stream@^1.0.1, is-stream@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" + +is-svg@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-svg/-/is-svg-3.0.0.tgz#9321dbd29c212e5ca99c4fa9794c714bcafa2f75" + dependencies: + html-comment-regex "^1.1.0" + +is-symbol@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.2.tgz#a055f6ae57192caee329e7a860118b497a950f38" + dependencies: + has-symbols "^1.0.0" + +is-typedarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" + +is-windows@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d" + +is-wsl@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-1.1.0.tgz#1f16e4aa22b04d1336b66188a66af3c600c3a66d" + +isarray@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" + +isarray@1.0.0, isarray@^1.0.0, isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + +isemail@3.x.x: + version "3.2.0" + resolved "https://registry.yarnpkg.com/isemail/-/isemail-3.2.0.tgz#59310a021931a9fb06bbb51e155ce0b3f236832c" + dependencies: + punycode "2.x.x" + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + +ismobilejs@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/ismobilejs/-/ismobilejs-0.5.1.tgz#0e3f825e29e32f84ad5ddbb60e9e04a894046488" + +isobject@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89" + dependencies: + isarray "1.0.0" + +isobject@^3.0.0, isobject@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" + +isomorphic-fetch@^2.1.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz#611ae1acf14f5e81f729507472819fe9733558a9" + dependencies: + node-fetch "^1.0.1" + whatwg-fetch ">=0.10.0" + +isstream@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" + +istanbul-api@^2.1.1: + version "2.1.5" + resolved "https://registry.yarnpkg.com/istanbul-api/-/istanbul-api-2.1.5.tgz#697b95ec69856c278aacafc0f86ee7392338d5b5" + dependencies: + async "^2.6.1" + compare-versions "^3.2.1" + fileset "^2.0.3" + istanbul-lib-coverage "^2.0.4" + istanbul-lib-hook "^2.0.6" + istanbul-lib-instrument "^3.2.0" + istanbul-lib-report "^2.0.7" + istanbul-lib-source-maps "^3.0.5" + istanbul-reports "^2.2.3" + js-yaml "^3.13.0" + make-dir "^2.1.0" + minimatch "^3.0.4" + once "^1.4.0" + +istanbul-lib-coverage@^2.0.2, istanbul-lib-coverage@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz#927a354005d99dd43a24607bb8b33fd4e9aca1ad" + +istanbul-lib-hook@^2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/istanbul-lib-hook/-/istanbul-lib-hook-2.0.6.tgz#5baa6067860a38290aef038b389068b225b01b7d" + dependencies: + append-transform "^1.0.0" + +istanbul-lib-instrument@^3.0.1, istanbul-lib-instrument@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-3.2.0.tgz#c549208da8a793f6622257a2da83e0ea96ae6a93" + dependencies: + "@babel/generator" "^7.0.0" + "@babel/parser" "^7.0.0" + "@babel/template" "^7.0.0" + "@babel/traverse" "^7.0.0" + "@babel/types" "^7.0.0" + istanbul-lib-coverage "^2.0.4" + semver "^6.0.0" + +istanbul-lib-report@^2.0.7: + version "2.0.7" + resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-2.0.7.tgz#370d80d433c4dbc7f58de63618f49599c74bd954" + dependencies: + istanbul-lib-coverage "^2.0.4" + make-dir "^2.1.0" + supports-color "^6.0.0" + +istanbul-lib-source-maps@^3.0.1, istanbul-lib-source-maps@^3.0.5: + version "3.0.5" + resolved "https://registry.yarnpkg.com/istanbul-lib-source-maps/-/istanbul-lib-source-maps-3.0.5.tgz#1d9ee9d94d2633f15611ee7aae28f9cac6d1aeb9" + dependencies: + debug "^4.1.1" + istanbul-lib-coverage "^2.0.4" + make-dir "^2.1.0" + rimraf "^2.6.2" + source-map "^0.6.1" + +istanbul-reports@^2.2.3: + version "2.2.3" + resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-2.2.3.tgz#14e0d00ecbfa9387757999cf36599b88e9f2176e" + dependencies: + handlebars "^4.1.0" + +jest-changed-files@^24.7.0: + version "24.7.0" + resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-24.7.0.tgz#39d723a11b16ed7b373ac83adc76a69464b0c4fa" + dependencies: + "@jest/types" "^24.7.0" + execa "^1.0.0" + throat "^4.0.0" + +jest-cli@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-24.7.1.tgz#6093a539073b6f4953145abeeb9709cd621044f1" + dependencies: + "@jest/core" "^24.7.1" + "@jest/test-result" "^24.7.1" + "@jest/types" "^24.7.0" + chalk "^2.0.1" + exit "^0.1.2" + import-local "^2.0.0" + is-ci "^2.0.0" + jest-config "^24.7.1" + jest-util "^24.7.1" + jest-validate "^24.7.0" + prompts "^2.0.1" + realpath-native "^1.1.0" + yargs "^12.0.2" + +jest-config@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-24.7.1.tgz#6c1dd4db82a89710a3cf66bdba97827c9a1cf052" + dependencies: + "@babel/core" "^7.1.0" + "@jest/test-sequencer" "^24.7.1" + "@jest/types" "^24.7.0" + babel-jest "^24.7.1" + chalk "^2.0.1" + glob "^7.1.1" + jest-environment-jsdom "^24.7.1" + jest-environment-node "^24.7.1" + jest-get-type "^24.3.0" + jest-jasmine2 "^24.7.1" + jest-regex-util "^24.3.0" + jest-resolve "^24.7.1" + jest-util "^24.7.1" + jest-validate "^24.7.0" + micromatch "^3.1.10" + pretty-format "^24.7.0" + realpath-native "^1.1.0" + +jest-diff@^24.7.0: + version "24.7.0" + resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-24.7.0.tgz#5d862899be46249754806f66e5729c07fcb3580f" + dependencies: + chalk "^2.0.1" + diff-sequences "^24.3.0" + jest-get-type "^24.3.0" + pretty-format "^24.7.0" + +jest-docblock@^24.3.0: + version "24.3.0" + resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-24.3.0.tgz#b9c32dac70f72e4464520d2ba4aec02ab14db5dd" + dependencies: + detect-newline "^2.1.0" + +jest-each@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-24.7.1.tgz#fcc7dda4147c28430ad9fb6dc7211cd17ab54e74" + dependencies: + "@jest/types" "^24.7.0" + chalk "^2.0.1" + jest-get-type "^24.3.0" + jest-util "^24.7.1" + pretty-format "^24.7.0" + +jest-environment-jsdom-fourteen@0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/jest-environment-jsdom-fourteen/-/jest-environment-jsdom-fourteen-0.1.0.tgz#aad6393a9d4b565b69a609109bf469f62bf18ccc" + dependencies: + jest-mock "^24.5.0" + jest-util "^24.5.0" + jsdom "^14.0.0" + +jest-environment-jsdom@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-environment-jsdom/-/jest-environment-jsdom-24.7.1.tgz#a40e004b4458ebeb8a98082df135fd501b9fbbd6" + dependencies: + "@jest/environment" "^24.7.1" + "@jest/fake-timers" "^24.7.1" + "@jest/types" "^24.7.0" + jest-mock "^24.7.0" + jest-util "^24.7.1" + jsdom "^11.5.1" + +jest-environment-node@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-24.7.1.tgz#fa2c047a31522a48038d26ee4f7c8fd9c1ecfe12" + dependencies: + "@jest/environment" "^24.7.1" + "@jest/fake-timers" "^24.7.1" + "@jest/types" "^24.7.0" + jest-mock "^24.7.0" + jest-util "^24.7.1" + +jest-get-type@^24.3.0: + version "24.3.0" + resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-24.3.0.tgz#582cfd1a4f91b5cdad1d43d2932f816d543c65da" + +jest-haste-map@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-24.7.1.tgz#772e215cd84080d4bbcb759cfb668ad649a21471" + dependencies: + "@jest/types" "^24.7.0" + anymatch "^2.0.0" + fb-watchman "^2.0.0" + graceful-fs "^4.1.15" + invariant "^2.2.4" + jest-serializer "^24.4.0" + jest-util "^24.7.1" + jest-worker "^24.6.0" + micromatch "^3.1.10" + sane "^4.0.3" + walker "^1.0.7" + optionalDependencies: + fsevents "^1.2.7" + +jest-jasmine2@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-jasmine2/-/jest-jasmine2-24.7.1.tgz#01398686dabe46553716303993f3be62e5d9d818" + dependencies: + "@babel/traverse" "^7.1.0" + "@jest/environment" "^24.7.1" + "@jest/test-result" "^24.7.1" + "@jest/types" "^24.7.0" + chalk "^2.0.1" + co "^4.6.0" + expect "^24.7.1" + is-generator-fn "^2.0.0" + jest-each "^24.7.1" + jest-matcher-utils "^24.7.0" + jest-message-util "^24.7.1" + jest-runtime "^24.7.1" + jest-snapshot "^24.7.1" + jest-util "^24.7.1" + pretty-format "^24.7.0" + throat "^4.0.0" + +jest-leak-detector@^24.7.0: + version "24.7.0" + resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-24.7.0.tgz#323ff93ed69be12e898f5b040952f08a94288ff9" + dependencies: + pretty-format "^24.7.0" + +jest-matcher-utils@^24.7.0: + version "24.7.0" + resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-24.7.0.tgz#bbee1ff37bc8b2e4afcaabc91617c1526af4bcd4" + dependencies: + chalk "^2.0.1" + jest-diff "^24.7.0" + jest-get-type "^24.3.0" + pretty-format "^24.7.0" + +jest-message-util@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-24.7.1.tgz#f1dc3a6c195647096a99d0f1dadbc447ae547018" + dependencies: + "@babel/code-frame" "^7.0.0" + "@jest/test-result" "^24.7.1" + "@jest/types" "^24.7.0" + "@types/stack-utils" "^1.0.1" + chalk "^2.0.1" + micromatch "^3.1.10" + slash "^2.0.0" + stack-utils "^1.0.1" + +jest-mock@^24.5.0, jest-mock@^24.7.0: + version "24.7.0" + resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-24.7.0.tgz#e49ce7262c12d7f5897b0d8af77f6db8e538023b" + dependencies: + "@jest/types" "^24.7.0" + +jest-pnp-resolver@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/jest-pnp-resolver/-/jest-pnp-resolver-1.2.1.tgz#ecdae604c077a7fbc70defb6d517c3c1c898923a" + +jest-regex-util@^24.3.0: + version "24.3.0" + resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-24.3.0.tgz#d5a65f60be1ae3e310d5214a0307581995227b36" + +jest-resolve-dependencies@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-24.7.1.tgz#cf93bbef26999488a96a2b2012f9fe7375aa378f" + dependencies: + "@jest/types" "^24.7.0" + jest-regex-util "^24.3.0" + jest-snapshot "^24.7.1" + +jest-resolve@24.7.1, jest-resolve@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-24.7.1.tgz#e4150198299298380a75a9fd55043fa3b9b17fde" + dependencies: + "@jest/types" "^24.7.0" + browser-resolve "^1.11.3" + chalk "^2.0.1" + jest-pnp-resolver "^1.2.1" + realpath-native "^1.1.0" + +jest-runner@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-24.7.1.tgz#41c8a02a06aa23ea82d8bffd69d7fa98d32f85bf" + dependencies: + "@jest/console" "^24.7.1" + "@jest/environment" "^24.7.1" + "@jest/test-result" "^24.7.1" + "@jest/types" "^24.7.0" + chalk "^2.4.2" + exit "^0.1.2" + graceful-fs "^4.1.15" + jest-config "^24.7.1" + jest-docblock "^24.3.0" + jest-haste-map "^24.7.1" + jest-jasmine2 "^24.7.1" + jest-leak-detector "^24.7.0" + jest-message-util "^24.7.1" + jest-resolve "^24.7.1" + jest-runtime "^24.7.1" + jest-util "^24.7.1" + jest-worker "^24.6.0" + source-map-support "^0.5.6" + throat "^4.0.0" + +jest-runtime@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-24.7.1.tgz#2ffd70b22dd03a5988c0ab9465c85cdf5d25c597" + dependencies: + "@jest/console" "^24.7.1" + "@jest/environment" "^24.7.1" + "@jest/source-map" "^24.3.0" + "@jest/transform" "^24.7.1" + "@jest/types" "^24.7.0" + "@types/yargs" "^12.0.2" + chalk "^2.0.1" + exit "^0.1.2" + glob "^7.1.3" + graceful-fs "^4.1.15" + jest-config "^24.7.1" + jest-haste-map "^24.7.1" + jest-message-util "^24.7.1" + jest-mock "^24.7.0" + jest-regex-util "^24.3.0" + jest-resolve "^24.7.1" + jest-snapshot "^24.7.1" + jest-util "^24.7.1" + jest-validate "^24.7.0" + realpath-native "^1.1.0" + slash "^2.0.0" + strip-bom "^3.0.0" + yargs "^12.0.2" + +jest-serializer@^24.4.0: + version "24.4.0" + resolved "https://registry.yarnpkg.com/jest-serializer/-/jest-serializer-24.4.0.tgz#f70c5918c8ea9235ccb1276d232e459080588db3" + +jest-snapshot@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-24.7.1.tgz#bd5a35f74aedff070975e9e9c90024f082099568" + dependencies: + "@babel/types" "^7.0.0" + "@jest/types" "^24.7.0" + chalk "^2.0.1" + expect "^24.7.1" + jest-diff "^24.7.0" + jest-matcher-utils "^24.7.0" + jest-message-util "^24.7.1" + jest-resolve "^24.7.1" + mkdirp "^0.5.1" + natural-compare "^1.4.0" + pretty-format "^24.7.0" + semver "^5.5.0" + +jest-util@^24.5.0, jest-util@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-24.7.1.tgz#b4043df57b32a23be27c75a2763d8faf242038ff" + dependencies: + "@jest/console" "^24.7.1" + "@jest/fake-timers" "^24.7.1" + "@jest/source-map" "^24.3.0" + "@jest/test-result" "^24.7.1" + "@jest/types" "^24.7.0" + callsites "^3.0.0" + chalk "^2.0.1" + graceful-fs "^4.1.15" + is-ci "^2.0.0" + mkdirp "^0.5.1" + slash "^2.0.0" + source-map "^0.6.0" + +jest-validate@^24.7.0: + version "24.7.0" + resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-24.7.0.tgz#70007076f338528ee1b1c8a8258b1b0bb982508d" + dependencies: + "@jest/types" "^24.7.0" + camelcase "^5.0.0" + chalk "^2.0.1" + jest-get-type "^24.3.0" + leven "^2.1.0" + pretty-format "^24.7.0" + +jest-watch-typeahead@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/jest-watch-typeahead/-/jest-watch-typeahead-0.3.0.tgz#f56d9ee17ea71ecbf8253fed213df3185a1584c9" + dependencies: + ansi-escapes "^3.0.0" + chalk "^2.4.1" + jest-watcher "^24.3.0" + slash "^2.0.0" + string-length "^2.0.0" + strip-ansi "^5.0.0" + +jest-watcher@^24.3.0, jest-watcher@^24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-24.7.1.tgz#e161363d7f3f4e1ef3d389b7b3a0aad247b673f5" + dependencies: + "@jest/test-result" "^24.7.1" + "@jest/types" "^24.7.0" + "@types/yargs" "^12.0.9" + ansi-escapes "^3.0.0" + chalk "^2.0.1" + jest-util "^24.7.1" + string-length "^2.0.0" + +jest-worker@^24.6.0: + version "24.6.0" + resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-24.6.0.tgz#7f81ceae34b7cde0c9827a6980c35b7cdc0161b3" + dependencies: + merge-stream "^1.0.1" + supports-color "^6.1.0" + +jest@24.7.1: + version "24.7.1" + resolved "https://registry.yarnpkg.com/jest/-/jest-24.7.1.tgz#0d94331cf510c75893ee32f87d7321d5bf8f2501" + dependencies: + import-local "^2.0.0" + jest-cli "^24.7.1" + +joi@^14.3.1: + version "14.3.1" + resolved "https://registry.yarnpkg.com/joi/-/joi-14.3.1.tgz#164a262ec0b855466e0c35eea2a885ae8b6c703c" + dependencies: + hoek "6.x.x" + isemail "3.x.x" + topo "3.x.x" + +js-levenshtein@^1.1.3: + version "1.1.6" + resolved "https://registry.yarnpkg.com/js-levenshtein/-/js-levenshtein-1.1.6.tgz#c6cee58eb3550372df8deb85fad5ce66ce01d59d" + +"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + +js-tokens@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" + +js-yaml@^3.13.0, js-yaml@^3.13.1, js-yaml@^3.9.0: + version "3.13.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.13.1.tgz#aff151b30bfdfa8e49e05da22e7415e9dfa37847" + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +jsbn@~0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" + +jsdom@^11.5.1: + version "11.12.0" + resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-11.12.0.tgz#1a80d40ddd378a1de59656e9e6dc5a3ba8657bc8" + dependencies: + abab "^2.0.0" + acorn "^5.5.3" + acorn-globals "^4.1.0" + array-equal "^1.0.0" + cssom ">= 0.3.2 < 0.4.0" + cssstyle "^1.0.0" + data-urls "^1.0.0" + domexception "^1.0.1" + escodegen "^1.9.1" + html-encoding-sniffer "^1.0.2" + left-pad "^1.3.0" + nwsapi "^2.0.7" + parse5 "4.0.0" + pn "^1.1.0" + request "^2.87.0" + request-promise-native "^1.0.5" + sax "^1.2.4" + symbol-tree "^3.2.2" + tough-cookie "^2.3.4" + w3c-hr-time "^1.0.1" + webidl-conversions "^4.0.2" + whatwg-encoding "^1.0.3" + whatwg-mimetype "^2.1.0" + whatwg-url "^6.4.1" + ws "^5.2.0" + xml-name-validator "^3.0.0" + +jsdom@^14.0.0: + version "14.1.0" + resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-14.1.0.tgz#916463b6094956b0a6c1782c94e380cd30e1981b" + dependencies: + abab "^2.0.0" + acorn "^6.0.4" + acorn-globals "^4.3.0" + array-equal "^1.0.0" + cssom "^0.3.4" + cssstyle "^1.1.1" + data-urls "^1.1.0" + domexception "^1.0.1" + escodegen "^1.11.0" + html-encoding-sniffer "^1.0.2" + nwsapi "^2.1.3" + parse5 "5.1.0" + pn "^1.1.0" + request "^2.88.0" + request-promise-native "^1.0.5" + saxes "^3.1.9" + symbol-tree "^3.2.2" + tough-cookie "^2.5.0" + w3c-hr-time "^1.0.1" + w3c-xmlserializer "^1.1.2" + webidl-conversions "^4.0.2" + whatwg-encoding "^1.0.5" + whatwg-mimetype "^2.3.0" + whatwg-url "^7.0.0" + ws "^6.1.2" + xml-name-validator "^3.0.0" + +jsesc@^2.5.1: + version "2.5.2" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" + +jsesc@~0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d" + +json-parse-better-errors@^1.0.1, json-parse-better-errors@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" + +json-schema-traverse@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" + +json-schema@0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13" + +json-stable-stringify-without-jsonify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" + +json-stable-stringify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af" + dependencies: + jsonify "~0.0.0" + +json-stringify-safe@~5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" + +json2mq@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/json2mq/-/json2mq-0.2.0.tgz#b637bd3ba9eabe122c83e9720483aeb10d2c904a" + dependencies: + string-convert "^0.2.0" + +json3@^3.3.2: + version "3.3.2" + resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.2.tgz#3c0434743df93e2f5c42aee7b19bcb483575f4e1" + +json5@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.1.tgz#779fb0018604fa854eacbf6252180d83543e3dbe" + dependencies: + minimist "^1.2.0" + +json5@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/json5/-/json5-2.1.0.tgz#e7a0c62c48285c628d20a10b85c89bb807c32850" + dependencies: + minimist "^1.2.0" + +jsonfile@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb" + optionalDependencies: + graceful-fs "^4.1.6" + +jsonify@~0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73" + +jsprim@^1.2.2: + version "1.4.1" + resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.1.tgz#313e66bc1e5cc06e438bc1b7499c2e5c56acb6a2" + dependencies: + assert-plus "1.0.0" + extsprintf "1.3.0" + json-schema "0.2.3" + verror "1.10.0" + +jsx-ast-utils@^2.0.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/jsx-ast-utils/-/jsx-ast-utils-2.1.0.tgz#0ee4e2c971fb9601c67b5641b71be80faecf0b36" + dependencies: + array-includes "^3.0.3" + +killable@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/killable/-/killable-1.0.1.tgz#4c8ce441187a061c7474fb87ca08e2a638194892" + +kind-of@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-2.0.1.tgz#018ec7a4ce7e3a86cb9141be519d24c8faa981b5" + dependencies: + is-buffer "^1.0.2" + +kind-of@^3.0.2, kind-of@^3.0.3, kind-of@^3.2.0: + version "3.2.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" + dependencies: + is-buffer "^1.1.5" + +kind-of@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-4.0.0.tgz#20813df3d712928b207378691a45066fae72dd57" + dependencies: + is-buffer "^1.1.5" + +kind-of@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-5.1.0.tgz#729c91e2d857b7a419a1f9aa65685c4c33f5845d" + +kind-of@^6.0.0, kind-of@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.2.tgz#01146b36a6218e64e58f3a8d66de5d7fc6f6d051" + +kleur@^3.0.2: + version "3.0.3" + resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" + +last-call-webpack-plugin@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/last-call-webpack-plugin/-/last-call-webpack-plugin-3.0.0.tgz#9742df0e10e3cf46e5c0381c2de90d3a7a2d7555" + dependencies: + lodash "^4.17.5" + webpack-sources "^1.1.0" + +lazy-cache@^0.2.3: + version "0.2.7" + resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-0.2.7.tgz#7feddf2dcb6edb77d11ef1d117ab5ffdf0ab1b65" + +lazy-cache@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e" + +lcid@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/lcid/-/lcid-2.0.0.tgz#6ef5d2df60e52f82eb228a4c373e8d1f397253cf" + dependencies: + invert-kv "^2.0.0" + +left-pad@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/left-pad/-/left-pad-1.3.0.tgz#5b8a3a7765dfe001261dde915589e782f8c94d1e" + +less-loader@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/less-loader/-/less-loader-5.0.0.tgz#498dde3a6c6c4f887458ee9ed3f086a12ad1b466" + dependencies: + clone "^2.1.1" + loader-utils "^1.1.0" + pify "^4.0.1" + +less@^3.9.0: + version "3.9.0" + resolved "https://registry.yarnpkg.com/less/-/less-3.9.0.tgz#b7511c43f37cf57dc87dffd9883ec121289b1474" + dependencies: + clone "^2.1.2" + optionalDependencies: + errno "^0.1.1" + graceful-fs "^4.1.2" + image-size "~0.5.0" + mime "^1.4.1" + mkdirp "^0.5.0" + promise "^7.1.1" + request "^2.83.0" + source-map "~0.6.0" + +leven@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/leven/-/leven-2.1.0.tgz#c2e7a9f772094dee9d34202ae8acce4687875580" + +levn@^0.3.0, levn@~0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" + dependencies: + prelude-ls "~1.1.2" + type-check "~0.3.2" + +load-json-file@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-2.0.0.tgz#7947e42149af80d696cbf797bcaabcfe1fe29ca8" + dependencies: + graceful-fs "^4.1.2" + parse-json "^2.2.0" + pify "^2.0.0" + strip-bom "^3.0.0" + +load-json-file@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-4.0.0.tgz#2f5f45ab91e33216234fd53adab668eb4ec0993b" + dependencies: + graceful-fs "^4.1.2" + parse-json "^4.0.0" + pify "^3.0.0" + strip-bom "^3.0.0" + +loader-fs-cache@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/loader-fs-cache/-/loader-fs-cache-1.0.2.tgz#54cedf6b727e1779fd8f01205f05f6e88706f086" + dependencies: + find-cache-dir "^0.1.1" + mkdirp "0.5.1" + +loader-runner@^2.3.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/loader-runner/-/loader-runner-2.4.0.tgz#ed47066bfe534d7e84c4c7b9998c2a75607d9357" + +loader-utils@1.2.3, loader-utils@^1.0.1, loader-utils@^1.0.2, loader-utils@^1.1.0, loader-utils@^1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-1.2.3.tgz#1ff5dc6911c9f0a062531a4c04b609406108c2c7" + dependencies: + big.js "^5.2.2" + emojis-list "^2.0.0" + json5 "^1.0.1" + +locate-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-2.0.0.tgz#2b568b265eec944c6d9c0de9c3dbbbca0354cd8e" + dependencies: + p-locate "^2.0.0" + path-exists "^3.0.0" + +locate-path@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-3.0.0.tgz#dbec3b3ab759758071b58fe59fc41871af21400e" + dependencies: + p-locate "^3.0.0" + path-exists "^3.0.0" + +lodash._getnative@^3.0.0: + version "3.9.1" + resolved "https://registry.yarnpkg.com/lodash._getnative/-/lodash._getnative-3.9.1.tgz#570bc7dede46d61cdcde687d65d3eecbaa3aaff5" + +lodash._reinterpolate@~3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz#0ccf2d89166af03b3663c796538b75ac6e114d9d" + +lodash.debounce@^4.0.0, lodash.debounce@^4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" + +lodash.flow@^3.5.0: + version "3.5.0" + resolved "https://registry.yarnpkg.com/lodash.flow/-/lodash.flow-3.5.0.tgz#87bf40292b8cf83e4e8ce1a3ae4209e20071675a" + +lodash.isarguments@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz#2f573d85c6a24289ff00663b491c1d338ff3458a" + +lodash.isarray@^3.0.0: + version "3.0.4" + resolved "https://registry.yarnpkg.com/lodash.isarray/-/lodash.isarray-3.0.4.tgz#79e4eb88c36a8122af86f844aa9bcd851b5fbb55" + +lodash.keys@^3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/lodash.keys/-/lodash.keys-3.1.2.tgz#4dbc0472b156be50a0b286855d1bd0b0c656098a" + dependencies: + lodash._getnative "^3.0.0" + lodash.isarguments "^3.0.0" + lodash.isarray "^3.0.0" + +lodash.memoize@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe" + +lodash.sortby@^4.7.0: + version "4.7.0" + resolved "https://registry.yarnpkg.com/lodash.sortby/-/lodash.sortby-4.7.0.tgz#edd14c824e2cc9c1e0b0a1b42bb5210516a42438" + +lodash.tail@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/lodash.tail/-/lodash.tail-4.1.1.tgz#d2333a36d9e7717c8ad2f7cacafec7c32b444664" + +lodash.template@^4.2.4, lodash.template@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.template/-/lodash.template-4.4.0.tgz#e73a0385c8355591746e020b99679c690e68fba0" + dependencies: + lodash._reinterpolate "~3.0.0" + lodash.templatesettings "^4.0.0" + +lodash.templatesettings@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/lodash.templatesettings/-/lodash.templatesettings-4.1.0.tgz#2b4d4e95ba440d915ff08bc899e4553666713316" + dependencies: + lodash._reinterpolate "~3.0.0" + +lodash.throttle@^4.0.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/lodash.throttle/-/lodash.throttle-4.1.1.tgz#c23e91b710242ac70c37f1e1cda9274cc39bf2f4" + +lodash.unescape@4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/lodash.unescape/-/lodash.unescape-4.0.1.tgz#bf2249886ce514cda112fae9218cdc065211fc9c" + +lodash.uniq@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" + +"lodash@>=3.5 <5", lodash@^4.16.5, lodash@^4.17.10, lodash@^4.17.11, lodash@^4.17.4, lodash@^4.17.5: + version "4.17.11" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.11.tgz#b39ea6229ef607ecd89e2c8df12536891cac9b8d" + +loglevel@^1.4.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.6.1.tgz#e0fc95133b6ef276cdc8887cdaf24aa6f156f8fa" + +loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.2.0, loose-envify@^1.3.1, loose-envify@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" + dependencies: + js-tokens "^3.0.0 || ^4.0.0" + +lower-case@^1.1.1: + version "1.1.4" + resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-1.1.4.tgz#9a2cabd1b9e8e0ae993a4bf7d5875c39c42e8eac" + +lru-cache@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" + dependencies: + yallist "^3.0.2" + +make-dir@^2.0.0, make-dir@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-2.1.0.tgz#5f0310e18b8be898cc07009295a30ae41e91e6f5" + dependencies: + pify "^4.0.1" + semver "^5.6.0" + +makeerror@1.0.x: + version "1.0.11" + resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.11.tgz#e01a5c9109f2af79660e4e8b9587790184f5a96c" + dependencies: + tmpl "1.0.x" + +mamacro@^0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/mamacro/-/mamacro-0.0.3.tgz#ad2c9576197c9f1abf308d0787865bd975a3f3e4" + +map-age-cleaner@^0.1.1: + version "0.1.3" + resolved "https://registry.yarnpkg.com/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz#7d583a7306434c055fe474b0f45078e6e1b4b92a" + dependencies: + p-defer "^1.0.0" + +map-cache@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/map-cache/-/map-cache-0.2.2.tgz#c32abd0bd6525d9b051645bb4f26ac5dc98a0dbf" + +map-visit@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/map-visit/-/map-visit-1.0.0.tgz#ecdca8f13144e660f1b5bd41f12f3479d98dfb8f" + dependencies: + object-visit "^1.0.0" + +md5.js@^1.3.4: + version "1.3.5" + resolved "https://registry.yarnpkg.com/md5.js/-/md5.js-1.3.5.tgz#b5d07b8e3216e3e27cd728d72f70d1e6a342005f" + dependencies: + hash-base "^3.0.0" + inherits "^2.0.1" + safe-buffer "^5.1.2" + +mdn-data@~1.1.0: + version "1.1.4" + resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-1.1.4.tgz#50b5d4ffc4575276573c4eedb8780812a8419f01" + +media-typer@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" + +mem@^4.0.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/mem/-/mem-4.3.0.tgz#461af497bc4ae09608cdb2e60eefb69bff744178" + dependencies: + map-age-cleaner "^0.1.1" + mimic-fn "^2.0.0" + p-is-promise "^2.0.0" + +memory-fs@^0.4.0, memory-fs@^0.4.1, memory-fs@~0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.4.1.tgz#3a9a20b8462523e447cfbc7e8bb80ed667bfc552" + dependencies: + errno "^0.1.3" + readable-stream "^2.0.1" + +merge-deep@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/merge-deep/-/merge-deep-3.0.2.tgz#f39fa100a4f1bd34ff29f7d2bf4508fbb8d83ad2" + dependencies: + arr-union "^3.1.0" + clone-deep "^0.2.4" + kind-of "^3.0.2" + +merge-descriptors@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" + +merge-stream@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-1.0.1.tgz#4041202d508a342ba00174008df0c251b8c135e1" + dependencies: + readable-stream "^2.0.1" + +merge2@^1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.2.3.tgz#7ee99dbd69bb6481689253f018488a1b902b0ed5" + +methods@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" + +micromatch@^3.1.10, micromatch@^3.1.4, micromatch@^3.1.8: + version "3.1.10" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-3.1.10.tgz#70859bc95c9840952f359a068a3fc49f9ecfac23" + dependencies: + arr-diff "^4.0.0" + array-unique "^0.3.2" + braces "^2.3.1" + define-property "^2.0.2" + extend-shallow "^3.0.2" + extglob "^2.0.4" + fragment-cache "^0.2.1" + kind-of "^6.0.2" + nanomatch "^1.2.9" + object.pick "^1.3.0" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.2" + +miller-rabin@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/miller-rabin/-/miller-rabin-4.0.1.tgz#f080351c865b0dc562a8462966daa53543c78a4d" + dependencies: + bn.js "^4.0.0" + brorand "^1.0.1" + +mime-db@1.40.0, "mime-db@>= 1.38.0 < 2": + version "1.40.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.40.0.tgz#a65057e998db090f732a68f6c276d387d4126c32" + +mime-types@^2.1.12, mime-types@~2.1.17, mime-types@~2.1.18, mime-types@~2.1.19: + version "2.1.24" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.24.tgz#b6f8d0b3e951efb77dedeca194cff6d16f676f81" + dependencies: + mime-db "1.40.0" + +mime@1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.4.1.tgz#121f9ebc49e3766f311a76e1fa1c8003c4b03aa6" + +mime@^1.4.1: + version "1.6.0" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1" + +mime@^2.0.3, mime@^2.3.1: + version "2.4.2" + resolved "https://registry.yarnpkg.com/mime/-/mime-2.4.2.tgz#ce5229a5e99ffc313abac806b482c10e7ba6ac78" + +mimic-fn@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-1.2.0.tgz#820c86a39334640e99516928bd03fca88057d022" + +mimic-fn@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" + +mini-css-extract-plugin@0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-0.5.0.tgz#ac0059b02b9692515a637115b0cc9fed3a35c7b0" + dependencies: + loader-utils "^1.1.0" + schema-utils "^1.0.0" + webpack-sources "^1.1.0" + +mini-store@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/mini-store/-/mini-store-2.0.0.tgz#0843c048d6942ce55e3e78b1b67fc063022b5488" + dependencies: + hoist-non-react-statics "^2.3.1" + prop-types "^15.6.0" + react-lifecycles-compat "^3.0.4" + shallowequal "^1.0.2" + +minimalistic-assert@^1.0.0, minimalistic-assert@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7" + +minimalistic-crypto-utils@^1.0.0, minimalistic-crypto-utils@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a" + +minimatch@3.0.4, minimatch@^3.0.3, minimatch@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + dependencies: + brace-expansion "^1.1.7" + +minimist@0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" + +minimist@^1.1.1, minimist@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284" + +minimist@~0.0.1: + version "0.0.10" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.10.tgz#de3f98543dbf96082be48ad1a0c7cda836301dcf" + +minipass@^2.2.1, minipass@^2.3.4: + version "2.3.5" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.3.5.tgz#cacebe492022497f656b0f0f51e2682a9ed2d848" + dependencies: + safe-buffer "^5.1.2" + yallist "^3.0.0" + +minizlib@^1.1.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.2.1.tgz#dd27ea6136243c7c880684e8672bb3a45fd9b614" + dependencies: + minipass "^2.2.1" + +mississippi@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/mississippi/-/mississippi-3.0.0.tgz#ea0a3291f97e0b5e8776b363d5f0a12d94c67022" + dependencies: + concat-stream "^1.5.0" + duplexify "^3.4.2" + end-of-stream "^1.1.0" + flush-write-stream "^1.0.0" + from2 "^2.1.0" + parallel-transform "^1.1.0" + pump "^3.0.0" + pumpify "^1.3.3" + stream-each "^1.1.0" + through2 "^2.0.0" + +mixin-deep@^1.2.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/mixin-deep/-/mixin-deep-1.3.1.tgz#a49e7268dce1a0d9698e45326c5626df3543d0fe" + dependencies: + for-in "^1.0.2" + is-extendable "^1.0.1" + +mixin-object@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/mixin-object/-/mixin-object-2.0.1.tgz#4fb949441dab182540f1fe035ba60e1947a5e57e" + dependencies: + for-in "^0.1.3" + is-extendable "^0.1.1" + +mkdirp@0.5.1, mkdirp@0.5.x, mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" + dependencies: + minimist "0.0.8" + +moment@2.x, moment@^2.24.0: + version "2.24.0" + resolved "https://registry.yarnpkg.com/moment/-/moment-2.24.0.tgz#0d055d53f5052aa653c9f6eb68bb5d12bf5c2b5b" + +move-concurrently@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/move-concurrently/-/move-concurrently-1.0.1.tgz#be2c005fda32e0b29af1f05d7c4b33214c701f92" + dependencies: + aproba "^1.1.1" + copy-concurrently "^1.0.0" + fs-write-stream-atomic "^1.0.8" + mkdirp "^0.5.1" + rimraf "^2.5.4" + run-queue "^1.0.3" + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + +ms@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.1.tgz#30a5864eb3ebb0a66f2ebe6d727af06a09d86e0a" + +multicast-dns-service-types@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/multicast-dns-service-types/-/multicast-dns-service-types-1.1.0.tgz#899f11d9686e5e05cb91b35d5f0e63b773cfc901" + +multicast-dns@^6.0.1: + version "6.2.3" + resolved "https://registry.yarnpkg.com/multicast-dns/-/multicast-dns-6.2.3.tgz#a0ec7bd9055c4282f790c3c82f4e28db3b31b229" + dependencies: + dns-packet "^1.3.1" + thunky "^1.0.2" + +mutationobserver-shim@^0.3.2: + version "0.3.3" + resolved "https://registry.yarnpkg.com/mutationobserver-shim/-/mutationobserver-shim-0.3.3.tgz#65869630bc89d7bf8c9cd9cb82188cd955aacd2b" + +mute-stream@0.0.7: + version "0.0.7" + resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.7.tgz#3075ce93bc21b8fab43e1bc4da7e8115ed1e7bab" + +nan@^2.12.1: + version "2.13.2" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.13.2.tgz#f51dc7ae66ba7d5d55e1e6d4d8092e802c9aefe7" + +nanomatch@^1.2.9: + version "1.2.13" + resolved "https://registry.yarnpkg.com/nanomatch/-/nanomatch-1.2.13.tgz#b87a8aa4fc0de8fe6be88895b38983ff265bd119" + dependencies: + arr-diff "^4.0.0" + array-unique "^0.3.2" + define-property "^2.0.2" + extend-shallow "^3.0.2" + fragment-cache "^0.2.1" + is-windows "^1.0.2" + kind-of "^6.0.2" + object.pick "^1.3.0" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.1" + +natural-compare@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" + +needle@^2.2.1: + version "2.3.0" + resolved "https://registry.yarnpkg.com/needle/-/needle-2.3.0.tgz#ce3fea21197267bacb310705a7bbe24f2a3a3492" + dependencies: + debug "^4.1.0" + iconv-lite "^0.4.4" + sax "^1.2.4" + +negotiator@0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9" + +neo-async@^2.5.0, neo-async@^2.6.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.0.tgz#b9d15e4d71c6762908654b5183ed38b753340835" + +nice-try@^1.0.4: + version "1.0.5" + resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366" + +no-case@^2.2.0: + version "2.3.2" + resolved "https://registry.yarnpkg.com/no-case/-/no-case-2.3.2.tgz#60b813396be39b3f1288a4c1ed5d1e7d28b464ac" + dependencies: + lower-case "^1.1.1" + +node-fetch@^1.0.1: + version "1.7.3" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-1.7.3.tgz#980f6f72d85211a5347c6b2bc18c5b84c3eb47ef" + dependencies: + encoding "^0.1.11" + is-stream "^1.0.1" + +node-forge@0.7.5: + version "0.7.5" + resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.7.5.tgz#6c152c345ce11c52f465c2abd957e8639cd674df" + +node-int64@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b" + +node-libs-browser@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/node-libs-browser/-/node-libs-browser-2.2.0.tgz#c72f60d9d46de08a940dedbb25f3ffa2f9bbaa77" + dependencies: + assert "^1.1.1" + browserify-zlib "^0.2.0" + buffer "^4.3.0" + console-browserify "^1.1.0" + constants-browserify "^1.0.0" + crypto-browserify "^3.11.0" + domain-browser "^1.1.1" + events "^3.0.0" + https-browserify "^1.0.0" + os-browserify "^0.3.0" + path-browserify "0.0.0" + process "^0.11.10" + punycode "^1.2.4" + querystring-es3 "^0.2.0" + readable-stream "^2.3.3" + stream-browserify "^2.0.1" + stream-http "^2.7.2" + string_decoder "^1.0.0" + timers-browserify "^2.0.4" + tty-browserify "0.0.0" + url "^0.11.0" + util "^0.11.0" + vm-browserify "0.0.4" + +node-modules-regexp@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/node-modules-regexp/-/node-modules-regexp-1.0.0.tgz#8d9dbe28964a4ac5712e9131642107c71e90ec40" + +node-notifier@^5.2.1: + version "5.4.0" + resolved "https://registry.yarnpkg.com/node-notifier/-/node-notifier-5.4.0.tgz#7b455fdce9f7de0c63538297354f3db468426e6a" + dependencies: + growly "^1.3.0" + is-wsl "^1.1.0" + semver "^5.5.0" + shellwords "^0.1.1" + which "^1.3.0" + +node-pre-gyp@^0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.12.0.tgz#39ba4bb1439da030295f899e3b520b7785766149" + dependencies: + detect-libc "^1.0.2" + mkdirp "^0.5.1" + needle "^2.2.1" + nopt "^4.0.1" + npm-packlist "^1.1.6" + npmlog "^4.0.2" + rc "^1.2.7" + rimraf "^2.6.1" + semver "^5.3.0" + tar "^4" + +node-releases@^1.1.13: + version "1.1.17" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.17.tgz#71ea4631f0a97d5cd4f65f7d04ecf9072eac711a" + dependencies: + semver "^5.3.0" + +node-releases@^1.1.14: + version "1.1.15" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.15.tgz#9e76a73b0eca3bf7801addaa0e6ce90c795f2b9a" + dependencies: + semver "^5.3.0" + +nopt@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.1.tgz#d0d4685afd5415193c8c7505602d0d17cd64474d" + dependencies: + abbrev "1" + osenv "^0.1.4" + +normalize-package-data@^2.3.2: + version "2.5.0" + resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.5.0.tgz#e66db1838b200c1dfc233225d12cb36520e234a8" + dependencies: + hosted-git-info "^2.1.4" + resolve "^1.10.0" + semver "2 || 3 || 4 || 5" + validate-npm-package-license "^3.0.1" + +normalize-path@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9" + dependencies: + remove-trailing-separator "^1.0.1" + +normalize-path@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" + +normalize-range@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942" + +normalize-url@^3.0.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-3.3.0.tgz#b2e1c4dc4f7c6d57743df733a4f5978d18650559" + +npm-bundled@^1.0.1: + version "1.0.6" + resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.0.6.tgz#e7ba9aadcef962bb61248f91721cd932b3fe6bdd" + +npm-packlist@^1.1.6: + version "1.4.1" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.4.1.tgz#19064cdf988da80ea3cee45533879d90192bbfbc" + dependencies: + ignore-walk "^3.0.1" + npm-bundled "^1.0.1" + +npm-run-path@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f" + dependencies: + path-key "^2.0.0" + +npmlog@^4.0.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.2.tgz#08a7f2a8bf734604779a9efa4ad5cc717abb954b" + dependencies: + are-we-there-yet "~1.1.2" + console-control-strings "~1.1.0" + gauge "~2.7.3" + set-blocking "~2.0.0" + +nth-check@^1.0.2, nth-check@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-1.0.2.tgz#b2bd295c37e3dd58a3bf0700376663ba4d9cf05c" + dependencies: + boolbase "~1.0.0" + +num2fraction@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/num2fraction/-/num2fraction-1.2.2.tgz#6f682b6a027a4e9ddfa4564cd2589d1d4e669ede" + +number-is-nan@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" + +nwsapi@^2.0.7, nwsapi@^2.1.3: + version "2.1.3" + resolved "https://registry.yarnpkg.com/nwsapi/-/nwsapi-2.1.3.tgz#25f3a5cec26c654f7376df6659cdf84b99df9558" + +oauth-sign@~0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455" + +object-assign@4.1.1, object-assign@4.x, object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + +object-copy@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/object-copy/-/object-copy-0.1.0.tgz#7e7d858b781bd7c991a41ba975ed3812754e998c" + dependencies: + copy-descriptor "^0.1.0" + define-property "^0.2.5" + kind-of "^3.0.3" + +object-hash@^1.1.4: + version "1.3.1" + resolved "https://registry.yarnpkg.com/object-hash/-/object-hash-1.3.1.tgz#fde452098a951cb145f039bb7d455449ddc126df" + +object-keys@^1.0.11, object-keys@^1.0.12: + version "1.1.1" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" + +object-visit@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/object-visit/-/object-visit-1.0.1.tgz#f79c4493af0c5377b59fe39d395e41042dd045bb" + dependencies: + isobject "^3.0.0" + +object.assign@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da" + dependencies: + define-properties "^1.1.2" + function-bind "^1.1.1" + has-symbols "^1.0.0" + object-keys "^1.0.11" + +object.fromentries@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/object.fromentries/-/object.fromentries-2.0.0.tgz#49a543d92151f8277b3ac9600f1e930b189d30ab" + dependencies: + define-properties "^1.1.2" + es-abstract "^1.11.0" + function-bind "^1.1.1" + has "^1.0.1" + +object.getownpropertydescriptors@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.0.3.tgz#8758c846f5b407adab0f236e0986f14b051caa16" + dependencies: + define-properties "^1.1.2" + es-abstract "^1.5.1" + +object.pick@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/object.pick/-/object.pick-1.3.0.tgz#87a10ac4c1694bd2e1cbf53591a66141fb5dd747" + dependencies: + isobject "^3.0.1" + +object.values@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.1.0.tgz#bf6810ef5da3e5325790eaaa2be213ea84624da9" + dependencies: + define-properties "^1.1.3" + es-abstract "^1.12.0" + function-bind "^1.1.1" + has "^1.0.3" + +obuf@^1.0.0, obuf@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/obuf/-/obuf-1.1.2.tgz#09bea3343d41859ebd446292d11c9d4db619084e" + +omit.js@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/omit.js/-/omit.js-1.0.2.tgz#91a14f0eba84066dfa015bf30e474c47f30bc858" + dependencies: + babel-runtime "^6.23.0" + +on-finished@~2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947" + dependencies: + ee-first "1.1.1" + +on-headers@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.2.tgz#772b0ae6aaa525c399e489adfad90c403eb3c28f" + +once@^1.3.0, once@^1.3.1, once@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + dependencies: + wrappy "1" + +onetime@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-2.0.1.tgz#067428230fd67443b2794b22bba528b6867962d4" + dependencies: + mimic-fn "^1.0.0" + +opn@5.4.0: + version "5.4.0" + resolved "https://registry.yarnpkg.com/opn/-/opn-5.4.0.tgz#cb545e7aab78562beb11aa3bfabc7042e1761035" + dependencies: + is-wsl "^1.1.0" + +opn@^5.1.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/opn/-/opn-5.5.0.tgz#fc7164fab56d235904c51c3b27da6758ca3b9bfc" + dependencies: + is-wsl "^1.1.0" + +optimist@^0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686" + dependencies: + minimist "~0.0.1" + wordwrap "~0.0.2" + +optimize-css-assets-webpack-plugin@5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-5.0.1.tgz#9eb500711d35165b45e7fd60ba2df40cb3eb9159" + dependencies: + cssnano "^4.1.0" + last-call-webpack-plugin "^3.0.0" + +optionator@^0.8.1, optionator@^0.8.2: + version "0.8.2" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.2.tgz#364c5e409d3f4d6301d6c0b4c05bba50180aeb64" + dependencies: + deep-is "~0.1.3" + fast-levenshtein "~2.0.4" + levn "~0.3.0" + prelude-ls "~1.1.2" + type-check "~0.3.2" + wordwrap "~1.0.0" + +original@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/original/-/original-1.0.2.tgz#e442a61cffe1c5fd20a65f3261c26663b303f25f" + dependencies: + url-parse "^1.4.3" + +os-browserify@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/os-browserify/-/os-browserify-0.3.0.tgz#854373c7f5c2315914fc9bfc6bd8238fdda1ec27" + +os-homedir@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" + +os-locale@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-3.1.0.tgz#a802a6ee17f24c10483ab9935719cef4ed16bf1a" + dependencies: + execa "^1.0.0" + lcid "^2.0.0" + mem "^4.0.0" + +os-tmpdir@^1.0.0, os-tmpdir@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + +osenv@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.5.tgz#85cdfafaeb28e8677f416e287592b5f3f49ea410" + dependencies: + os-homedir "^1.0.0" + os-tmpdir "^1.0.0" + +p-defer@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-1.0.0.tgz#9f6eb182f6c9aa8cd743004a7d4f96b196b0fb0c" + +p-each-series@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-each-series/-/p-each-series-1.0.0.tgz#930f3d12dd1f50e7434457a22cd6f04ac6ad7f71" + dependencies: + p-reduce "^1.0.0" + +p-finally@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" + +p-is-promise@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/p-is-promise/-/p-is-promise-2.1.0.tgz#918cebaea248a62cf7ffab8e3bca8c5f882fc42e" + +p-limit@^1.1.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.3.0.tgz#b86bd5f0c25690911c7590fcbfc2010d54b3ccb8" + dependencies: + p-try "^1.0.0" + +p-limit@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.2.0.tgz#417c9941e6027a9abcba5092dd2904e255b5fbc2" + dependencies: + p-try "^2.0.0" + +p-locate@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43" + dependencies: + p-limit "^1.1.0" + +p-locate@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4" + dependencies: + p-limit "^2.0.0" + +p-map@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/p-map/-/p-map-1.2.0.tgz#e4e94f311eabbc8633a1e79908165fca26241b6b" + +p-reduce@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-reduce/-/p-reduce-1.0.0.tgz#18c2b0dd936a4690a529f8231f58a0fdb6a47dfa" + +p-try@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-1.0.0.tgz#cbc79cdbaf8fd4228e13f621f2b1a237c1b207b3" + +p-try@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" + +pako@~1.0.5: + version "1.0.10" + resolved "https://registry.yarnpkg.com/pako/-/pako-1.0.10.tgz#4328badb5086a426aa90f541977d4955da5c9732" + +parallel-transform@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/parallel-transform/-/parallel-transform-1.1.0.tgz#d410f065b05da23081fcd10f28854c29bda33b06" + dependencies: + cyclist "~0.2.2" + inherits "^2.0.3" + readable-stream "^2.1.5" + +param-case@2.1.x: + version "2.1.1" + resolved "https://registry.yarnpkg.com/param-case/-/param-case-2.1.1.tgz#df94fd8cf6531ecf75e6bef9a0858fbc72be2247" + dependencies: + no-case "^2.2.0" + +parent-module@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" + dependencies: + callsites "^3.0.0" + +parse-asn1@^5.0.0: + version "5.1.4" + resolved "https://registry.yarnpkg.com/parse-asn1/-/parse-asn1-5.1.4.tgz#37f6628f823fbdeb2273b4d540434a22f3ef1fcc" + dependencies: + asn1.js "^4.0.0" + browserify-aes "^1.0.0" + create-hash "^1.1.0" + evp_bytestokey "^1.0.0" + pbkdf2 "^3.0.3" + safe-buffer "^5.1.1" + +parse-json@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9" + dependencies: + error-ex "^1.2.0" + +parse-json@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-4.0.0.tgz#be35f5425be1f7f6c747184f98a788cb99477ee0" + dependencies: + error-ex "^1.3.1" + json-parse-better-errors "^1.0.1" + +parse5@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-4.0.0.tgz#6d78656e3da8d78b4ec0b906f7c08ef1dfe3f608" + +parse5@5.1.0, parse5@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-5.1.0.tgz#c59341c9723f414c452975564c7c00a68d58acd2" + +parseurl@~1.3.2: + version "1.3.3" + resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4" + +pascalcase@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/pascalcase/-/pascalcase-0.1.1.tgz#b363e55e8006ca6fe21784d2db22bd15d7917f14" + +path-browserify@0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-0.0.0.tgz#a0b870729aae214005b7d5032ec2cbbb0fb4451a" + +path-dirname@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-dirname/-/path-dirname-1.0.2.tgz#cc33d24d525e099a5388c0336c6e32b9160609e0" + +path-exists@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b" + dependencies: + pinkie-promise "^2.0.0" + +path-exists@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + +path-is-inside@^1.0.1, path-is-inside@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53" + +path-key@^2.0.0, path-key@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40" + +path-parse@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.6.tgz#d62dbb5679405d72c4737ec58600e9ddcf06d24c" + +path-to-regexp@0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" + +path-to-regexp@^1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.7.0.tgz#59fde0f435badacba103a84e9d3bc64e96b9937d" + dependencies: + isarray "0.0.1" + +path-type@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-2.0.0.tgz#f012ccb8415b7096fc2daa1054c3d72389594c73" + dependencies: + pify "^2.0.0" + +path-type@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-3.0.0.tgz#cef31dc8e0a1a3bb0d105c0cd97cf3bf47f4e36f" + dependencies: + pify "^3.0.0" + +pbkdf2@^3.0.3: + version "3.0.17" + resolved "https://registry.yarnpkg.com/pbkdf2/-/pbkdf2-3.0.17.tgz#976c206530617b14ebb32114239f7b09336e93a6" + dependencies: + create-hash "^1.1.2" + create-hmac "^1.1.4" + ripemd160 "^2.0.1" + safe-buffer "^5.0.1" + sha.js "^2.4.8" + +performance-now@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" + +pify@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" + +pify@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176" + +pify@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/pify/-/pify-4.0.1.tgz#4b2cd25c50d598735c50292224fd8c6df41e3231" + +pinkie-promise@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" + dependencies: + pinkie "^2.0.0" + +pinkie@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" + +pirates@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/pirates/-/pirates-4.0.1.tgz#643a92caf894566f91b2b986d2c66950a8e2fb87" + dependencies: + node-modules-regexp "^1.0.0" + +pkg-dir@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-1.0.0.tgz#7a4b508a8d5bb2d629d447056ff4e9c9314cf3d4" + dependencies: + find-up "^1.0.0" + +pkg-dir@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-2.0.0.tgz#f6d5d1109e19d63edf428e0bd57e12777615334b" + dependencies: + find-up "^2.1.0" + +pkg-dir@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-3.0.0.tgz#2749020f239ed990881b1f71210d51eb6523bea3" + dependencies: + find-up "^3.0.0" + +pkg-up@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/pkg-up/-/pkg-up-2.0.0.tgz#c819ac728059a461cab1c3889a2be3c49a004d7f" + dependencies: + find-up "^2.1.0" + +pn@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/pn/-/pn-1.1.0.tgz#e2f4cef0e219f463c179ab37463e4e1ecdccbafb" + +pnp-webpack-plugin@1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/pnp-webpack-plugin/-/pnp-webpack-plugin-1.2.1.tgz#cd9d698df2a6fcf7255093c1c9511adf65b9421b" + dependencies: + ts-pnp "^1.0.0" + +portfinder@^1.0.9: + version "1.0.20" + resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-1.0.20.tgz#bea68632e54b2e13ab7b0c4775e9b41bf270e44a" + dependencies: + async "^1.5.2" + debug "^2.2.0" + mkdirp "0.5.x" + +posix-character-classes@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/posix-character-classes/-/posix-character-classes-0.1.1.tgz#01eac0fe3b5af71a2a6c02feabb8c1fef7e00eab" + +postcss-attribute-case-insensitive@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-attribute-case-insensitive/-/postcss-attribute-case-insensitive-4.0.1.tgz#b2a721a0d279c2f9103a36331c88981526428cc7" + dependencies: + postcss "^7.0.2" + postcss-selector-parser "^5.0.0" + +postcss-browser-comments@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-browser-comments/-/postcss-browser-comments-2.0.0.tgz#dc48d6a8ddbff188a80a000b7393436cb18aed88" + dependencies: + postcss "^7.0.2" + +postcss-calc@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/postcss-calc/-/postcss-calc-7.0.1.tgz#36d77bab023b0ecbb9789d84dcb23c4941145436" + dependencies: + css-unit-converter "^1.1.1" + postcss "^7.0.5" + postcss-selector-parser "^5.0.0-rc.4" + postcss-value-parser "^3.3.1" + +postcss-color-functional-notation@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/postcss-color-functional-notation/-/postcss-color-functional-notation-2.0.1.tgz#5efd37a88fbabeb00a2966d1e53d98ced93f74e0" + dependencies: + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-color-gray@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/postcss-color-gray/-/postcss-color-gray-5.0.0.tgz#532a31eb909f8da898ceffe296fdc1f864be8547" + dependencies: + "@csstools/convert-colors" "^1.4.0" + postcss "^7.0.5" + postcss-values-parser "^2.0.0" + +postcss-color-hex-alpha@^5.0.2: + version "5.0.3" + resolved "https://registry.yarnpkg.com/postcss-color-hex-alpha/-/postcss-color-hex-alpha-5.0.3.tgz#a8d9ca4c39d497c9661e374b9c51899ef0f87388" + dependencies: + postcss "^7.0.14" + postcss-values-parser "^2.0.1" + +postcss-color-mod-function@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/postcss-color-mod-function/-/postcss-color-mod-function-3.0.3.tgz#816ba145ac11cc3cb6baa905a75a49f903e4d31d" + dependencies: + "@csstools/convert-colors" "^1.4.0" + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-color-rebeccapurple@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-color-rebeccapurple/-/postcss-color-rebeccapurple-4.0.1.tgz#c7a89be872bb74e45b1e3022bfe5748823e6de77" + dependencies: + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-colormin@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/postcss-colormin/-/postcss-colormin-4.0.3.tgz#ae060bce93ed794ac71264f08132d550956bd381" + dependencies: + browserslist "^4.0.0" + color "^3.0.0" + has "^1.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-convert-values@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-convert-values/-/postcss-convert-values-4.0.1.tgz#ca3813ed4da0f812f9d43703584e449ebe189a7f" + dependencies: + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-custom-media@^7.0.7: + version "7.0.8" + resolved "https://registry.yarnpkg.com/postcss-custom-media/-/postcss-custom-media-7.0.8.tgz#fffd13ffeffad73621be5f387076a28b00294e0c" + dependencies: + postcss "^7.0.14" + +postcss-custom-properties@^8.0.9: + version "8.0.10" + resolved "https://registry.yarnpkg.com/postcss-custom-properties/-/postcss-custom-properties-8.0.10.tgz#e8dc969e1e15c555f0b836b7f278ef47e3cdeaff" + dependencies: + postcss "^7.0.14" + postcss-values-parser "^2.0.1" + +postcss-custom-selectors@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/postcss-custom-selectors/-/postcss-custom-selectors-5.1.2.tgz#64858c6eb2ecff2fb41d0b28c9dd7b3db4de7fba" + dependencies: + postcss "^7.0.2" + postcss-selector-parser "^5.0.0-rc.3" + +postcss-dir-pseudo-class@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/postcss-dir-pseudo-class/-/postcss-dir-pseudo-class-5.0.0.tgz#6e3a4177d0edb3abcc85fdb6fbb1c26dabaeaba2" + dependencies: + postcss "^7.0.2" + postcss-selector-parser "^5.0.0-rc.3" + +postcss-discard-comments@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-discard-comments/-/postcss-discard-comments-4.0.2.tgz#1fbabd2c246bff6aaad7997b2b0918f4d7af4033" + dependencies: + postcss "^7.0.0" + +postcss-discard-duplicates@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-discard-duplicates/-/postcss-discard-duplicates-4.0.2.tgz#3fe133cd3c82282e550fc9b239176a9207b784eb" + dependencies: + postcss "^7.0.0" + +postcss-discard-empty@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-discard-empty/-/postcss-discard-empty-4.0.1.tgz#c8c951e9f73ed9428019458444a02ad90bb9f765" + dependencies: + postcss "^7.0.0" + +postcss-discard-overridden@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-discard-overridden/-/postcss-discard-overridden-4.0.1.tgz#652aef8a96726f029f5e3e00146ee7a4e755ff57" + dependencies: + postcss "^7.0.0" + +postcss-double-position-gradients@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/postcss-double-position-gradients/-/postcss-double-position-gradients-1.0.0.tgz#fc927d52fddc896cb3a2812ebc5df147e110522e" + dependencies: + postcss "^7.0.5" + postcss-values-parser "^2.0.0" + +postcss-env-function@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/postcss-env-function/-/postcss-env-function-2.0.2.tgz#0f3e3d3c57f094a92c2baf4b6241f0b0da5365d7" + dependencies: + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-flexbugs-fixes@4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/postcss-flexbugs-fixes/-/postcss-flexbugs-fixes-4.1.0.tgz#e094a9df1783e2200b7b19f875dcad3b3aff8b20" + dependencies: + postcss "^7.0.0" + +postcss-focus-visible@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-focus-visible/-/postcss-focus-visible-4.0.0.tgz#477d107113ade6024b14128317ade2bd1e17046e" + dependencies: + postcss "^7.0.2" + +postcss-focus-within@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-focus-within/-/postcss-focus-within-3.0.0.tgz#763b8788596cee9b874c999201cdde80659ef680" + dependencies: + postcss "^7.0.2" + +postcss-font-variant@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-font-variant/-/postcss-font-variant-4.0.0.tgz#71dd3c6c10a0d846c5eda07803439617bbbabacc" + dependencies: + postcss "^7.0.2" + +postcss-gap-properties@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-gap-properties/-/postcss-gap-properties-2.0.0.tgz#431c192ab3ed96a3c3d09f2ff615960f902c1715" + dependencies: + postcss "^7.0.2" + +postcss-image-set-function@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/postcss-image-set-function/-/postcss-image-set-function-3.0.1.tgz#28920a2f29945bed4c3198d7df6496d410d3f288" + dependencies: + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-initial@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-initial/-/postcss-initial-3.0.0.tgz#1772512faf11421b791fb2ca6879df5f68aa0517" + dependencies: + lodash.template "^4.2.4" + postcss "^7.0.2" + +postcss-lab-function@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/postcss-lab-function/-/postcss-lab-function-2.0.1.tgz#bb51a6856cd12289ab4ae20db1e3821ef13d7d2e" + dependencies: + "@csstools/convert-colors" "^1.4.0" + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-load-config@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-load-config/-/postcss-load-config-2.0.0.tgz#f1312ddbf5912cd747177083c5ef7a19d62ee484" + dependencies: + cosmiconfig "^4.0.0" + import-cwd "^2.0.0" + +postcss-loader@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-loader/-/postcss-loader-3.0.0.tgz#6b97943e47c72d845fa9e03f273773d4e8dd6c2d" + dependencies: + loader-utils "^1.1.0" + postcss "^7.0.0" + postcss-load-config "^2.0.0" + schema-utils "^1.0.0" + +postcss-logical@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-logical/-/postcss-logical-3.0.0.tgz#2495d0f8b82e9f262725f75f9401b34e7b45d5b5" + dependencies: + postcss "^7.0.2" + +postcss-media-minmax@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-media-minmax/-/postcss-media-minmax-4.0.0.tgz#b75bb6cbc217c8ac49433e12f22048814a4f5ed5" + dependencies: + postcss "^7.0.2" + +postcss-merge-longhand@^4.0.11: + version "4.0.11" + resolved "https://registry.yarnpkg.com/postcss-merge-longhand/-/postcss-merge-longhand-4.0.11.tgz#62f49a13e4a0ee04e7b98f42bb16062ca2549e24" + dependencies: + css-color-names "0.0.4" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + stylehacks "^4.0.0" + +postcss-merge-rules@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/postcss-merge-rules/-/postcss-merge-rules-4.0.3.tgz#362bea4ff5a1f98e4075a713c6cb25aefef9a650" + dependencies: + browserslist "^4.0.0" + caniuse-api "^3.0.0" + cssnano-util-same-parent "^4.0.0" + postcss "^7.0.0" + postcss-selector-parser "^3.0.0" + vendors "^1.0.0" + +postcss-minify-font-values@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-minify-font-values/-/postcss-minify-font-values-4.0.2.tgz#cd4c344cce474343fac5d82206ab2cbcb8afd5a6" + dependencies: + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-minify-gradients@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-minify-gradients/-/postcss-minify-gradients-4.0.2.tgz#93b29c2ff5099c535eecda56c4aa6e665a663471" + dependencies: + cssnano-util-get-arguments "^4.0.0" + is-color-stop "^1.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-minify-params@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-minify-params/-/postcss-minify-params-4.0.2.tgz#6b9cef030c11e35261f95f618c90036d680db874" + dependencies: + alphanum-sort "^1.0.0" + browserslist "^4.0.0" + cssnano-util-get-arguments "^4.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + uniqs "^2.0.0" + +postcss-minify-selectors@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-minify-selectors/-/postcss-minify-selectors-4.0.2.tgz#e2e5eb40bfee500d0cd9243500f5f8ea4262fbd8" + dependencies: + alphanum-sort "^1.0.0" + has "^1.0.0" + postcss "^7.0.0" + postcss-selector-parser "^3.0.0" + +postcss-modules-extract-imports@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-2.0.0.tgz#818719a1ae1da325f9832446b01136eeb493cd7e" + dependencies: + postcss "^7.0.5" + +postcss-modules-local-by-default@^2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-2.0.6.tgz#dd9953f6dd476b5fd1ef2d8830c8929760b56e63" + dependencies: + postcss "^7.0.6" + postcss-selector-parser "^6.0.0" + postcss-value-parser "^3.3.1" + +postcss-modules-scope@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-2.1.0.tgz#ad3f5bf7856114f6fcab901b0502e2a2bc39d4eb" + dependencies: + postcss "^7.0.6" + postcss-selector-parser "^6.0.0" + +postcss-modules-values@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-values/-/postcss-modules-values-2.0.0.tgz#479b46dc0c5ca3dc7fa5270851836b9ec7152f64" + dependencies: + icss-replace-symbols "^1.1.0" + postcss "^7.0.6" + +postcss-nesting@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/postcss-nesting/-/postcss-nesting-7.0.0.tgz#6e26a770a0c8fcba33782a6b6f350845e1a448f6" + dependencies: + postcss "^7.0.2" + +postcss-normalize-charset@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-normalize-charset/-/postcss-normalize-charset-4.0.1.tgz#8b35add3aee83a136b0471e0d59be58a50285dd4" + dependencies: + postcss "^7.0.0" + +postcss-normalize-display-values@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.2.tgz#0dbe04a4ce9063d4667ed2be476bb830c825935a" + dependencies: + cssnano-util-get-match "^4.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-positions@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-positions/-/postcss-normalize-positions-4.0.2.tgz#05f757f84f260437378368a91f8932d4b102917f" + dependencies: + cssnano-util-get-arguments "^4.0.0" + has "^1.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-repeat-style@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-4.0.2.tgz#c4ebbc289f3991a028d44751cbdd11918b17910c" + dependencies: + cssnano-util-get-arguments "^4.0.0" + cssnano-util-get-match "^4.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-string@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-string/-/postcss-normalize-string-4.0.2.tgz#cd44c40ab07a0c7a36dc5e99aace1eca4ec2690c" + dependencies: + has "^1.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-timing-functions@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-4.0.2.tgz#8e009ca2a3949cdaf8ad23e6b6ab99cb5e7d28d9" + dependencies: + cssnano-util-get-match "^4.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-unicode@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-normalize-unicode/-/postcss-normalize-unicode-4.0.1.tgz#841bd48fdcf3019ad4baa7493a3d363b52ae1cfb" + dependencies: + browserslist "^4.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-url@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-normalize-url/-/postcss-normalize-url-4.0.1.tgz#10e437f86bc7c7e58f7b9652ed878daaa95faae1" + dependencies: + is-absolute-url "^2.0.0" + normalize-url "^3.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize-whitespace@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-normalize-whitespace/-/postcss-normalize-whitespace-4.0.2.tgz#bf1d4070fe4fcea87d1348e825d8cc0c5faa7d82" + dependencies: + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-normalize@7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/postcss-normalize/-/postcss-normalize-7.0.1.tgz#eb51568d962b8aa61a8318383c8bb7e54332282e" + dependencies: + "@csstools/normalize.css" "^9.0.1" + browserslist "^4.1.1" + postcss "^7.0.2" + postcss-browser-comments "^2.0.0" + +postcss-ordered-values@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/postcss-ordered-values/-/postcss-ordered-values-4.1.2.tgz#0cf75c820ec7d5c4d280189559e0b571ebac0eee" + dependencies: + cssnano-util-get-arguments "^4.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-overflow-shorthand@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-overflow-shorthand/-/postcss-overflow-shorthand-2.0.0.tgz#31ecf350e9c6f6ddc250a78f0c3e111f32dd4c30" + dependencies: + postcss "^7.0.2" + +postcss-page-break@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-page-break/-/postcss-page-break-2.0.0.tgz#add52d0e0a528cabe6afee8b46e2abb277df46bf" + dependencies: + postcss "^7.0.2" + +postcss-place@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-place/-/postcss-place-4.0.1.tgz#e9f39d33d2dc584e46ee1db45adb77ca9d1dcc62" + dependencies: + postcss "^7.0.2" + postcss-values-parser "^2.0.0" + +postcss-preset-env@6.6.0: + version "6.6.0" + resolved "https://registry.yarnpkg.com/postcss-preset-env/-/postcss-preset-env-6.6.0.tgz#642e7d962e2bdc2e355db117c1eb63952690ed5b" + dependencies: + autoprefixer "^9.4.9" + browserslist "^4.4.2" + caniuse-lite "^1.0.30000939" + css-blank-pseudo "^0.1.4" + css-has-pseudo "^0.10.0" + css-prefers-color-scheme "^3.1.1" + cssdb "^4.3.0" + postcss "^7.0.14" + postcss-attribute-case-insensitive "^4.0.1" + postcss-color-functional-notation "^2.0.1" + postcss-color-gray "^5.0.0" + postcss-color-hex-alpha "^5.0.2" + postcss-color-mod-function "^3.0.3" + postcss-color-rebeccapurple "^4.0.1" + postcss-custom-media "^7.0.7" + postcss-custom-properties "^8.0.9" + postcss-custom-selectors "^5.1.2" + postcss-dir-pseudo-class "^5.0.0" + postcss-double-position-gradients "^1.0.0" + postcss-env-function "^2.0.2" + postcss-focus-visible "^4.0.0" + postcss-focus-within "^3.0.0" + postcss-font-variant "^4.0.0" + postcss-gap-properties "^2.0.0" + postcss-image-set-function "^3.0.1" + postcss-initial "^3.0.0" + postcss-lab-function "^2.0.1" + postcss-logical "^3.0.0" + postcss-media-minmax "^4.0.0" + postcss-nesting "^7.0.0" + postcss-overflow-shorthand "^2.0.0" + postcss-page-break "^2.0.0" + postcss-place "^4.0.1" + postcss-pseudo-class-any-link "^6.0.0" + postcss-replace-overflow-wrap "^3.0.0" + postcss-selector-matches "^4.0.0" + postcss-selector-not "^4.0.0" + +postcss-pseudo-class-any-link@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/postcss-pseudo-class-any-link/-/postcss-pseudo-class-any-link-6.0.0.tgz#2ed3eed393b3702879dec4a87032b210daeb04d1" + dependencies: + postcss "^7.0.2" + postcss-selector-parser "^5.0.0-rc.3" + +postcss-reduce-initial@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/postcss-reduce-initial/-/postcss-reduce-initial-4.0.3.tgz#7fd42ebea5e9c814609639e2c2e84ae270ba48df" + dependencies: + browserslist "^4.0.0" + caniuse-api "^3.0.0" + has "^1.0.0" + postcss "^7.0.0" + +postcss-reduce-transforms@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-reduce-transforms/-/postcss-reduce-transforms-4.0.2.tgz#17efa405eacc6e07be3414a5ca2d1074681d4e29" + dependencies: + cssnano-util-get-match "^4.0.0" + has "^1.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + +postcss-replace-overflow-wrap@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-replace-overflow-wrap/-/postcss-replace-overflow-wrap-3.0.0.tgz#61b360ffdaedca84c7c918d2b0f0d0ea559ab01c" + dependencies: + postcss "^7.0.2" + +postcss-safe-parser@4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-safe-parser/-/postcss-safe-parser-4.0.1.tgz#8756d9e4c36fdce2c72b091bbc8ca176ab1fcdea" + dependencies: + postcss "^7.0.0" + +postcss-selector-matches@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-selector-matches/-/postcss-selector-matches-4.0.0.tgz#71c8248f917ba2cc93037c9637ee09c64436fcff" + dependencies: + balanced-match "^1.0.0" + postcss "^7.0.2" + +postcss-selector-not@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-selector-not/-/postcss-selector-not-4.0.0.tgz#c68ff7ba96527499e832724a2674d65603b645c0" + dependencies: + balanced-match "^1.0.0" + postcss "^7.0.2" + +postcss-selector-parser@^3.0.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz#4f875f4afb0c96573d5cf4d74011aee250a7e865" + dependencies: + dot-prop "^4.1.1" + indexes-of "^1.0.1" + uniq "^1.0.1" + +postcss-selector-parser@^5.0.0, postcss-selector-parser@^5.0.0-rc.3, postcss-selector-parser@^5.0.0-rc.4: + version "5.0.0" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-5.0.0.tgz#249044356697b33b64f1a8f7c80922dddee7195c" + dependencies: + cssesc "^2.0.0" + indexes-of "^1.0.1" + uniq "^1.0.1" + +postcss-selector-parser@^6.0.0: + version "6.0.2" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.0.2.tgz#934cf799d016c83411859e09dcecade01286ec5c" + dependencies: + cssesc "^3.0.0" + indexes-of "^1.0.1" + uniq "^1.0.1" + +postcss-svgo@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/postcss-svgo/-/postcss-svgo-4.0.2.tgz#17b997bc711b333bab143aaed3b8d3d6e3d38258" + dependencies: + is-svg "^3.0.0" + postcss "^7.0.0" + postcss-value-parser "^3.0.0" + svgo "^1.0.0" + +postcss-unique-selectors@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/postcss-unique-selectors/-/postcss-unique-selectors-4.0.1.tgz#9446911f3289bfd64c6d680f073c03b1f9ee4bac" + dependencies: + alphanum-sort "^1.0.0" + postcss "^7.0.0" + uniqs "^2.0.0" + +postcss-value-parser@^3.0.0, postcss-value-parser@^3.3.0, postcss-value-parser@^3.3.1: + version "3.3.1" + resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz#9ff822547e2893213cf1c30efa51ac5fd1ba8281" + +postcss-values-parser@^2.0.0, postcss-values-parser@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/postcss-values-parser/-/postcss-values-parser-2.0.1.tgz#da8b472d901da1e205b47bdc98637b9e9e550e5f" + dependencies: + flatten "^1.0.2" + indexes-of "^1.0.1" + uniq "^1.0.1" + +postcss@^7.0.0, postcss@^7.0.1, postcss@^7.0.14, postcss@^7.0.2, postcss@^7.0.5, postcss@^7.0.6: + version "7.0.14" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-7.0.14.tgz#4527ed6b1ca0d82c53ce5ec1a2041c2346bbd6e5" + dependencies: + chalk "^2.4.2" + source-map "^0.6.1" + supports-color "^6.1.0" + +prelude-ls@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" + +pretty-bytes@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/pretty-bytes/-/pretty-bytes-5.1.0.tgz#6237ecfbdc6525beaef4de722cc60a58ae0e6c6d" + +pretty-error@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/pretty-error/-/pretty-error-2.1.1.tgz#5f4f87c8f91e5ae3f3ba87ab4cf5e03b1a17f1a3" + dependencies: + renderkid "^2.0.1" + utila "~0.4" + +pretty-format@^24.7.0: + version "24.7.0" + resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-24.7.0.tgz#d23106bc2edcd776079c2daa5da02bcb12ed0c10" + dependencies: + "@jest/types" "^24.7.0" + ansi-regex "^4.0.0" + ansi-styles "^3.2.0" + react-is "^16.8.4" + +private@^0.1.6: + version "0.1.8" + resolved "https://registry.yarnpkg.com/private/-/private-0.1.8.tgz#2381edb3689f7a53d653190060fcf822d2f368ff" + +process-nextick-args@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.0.tgz#a37d732f4271b4ab1ad070d35508e8290788ffaa" + +process@^0.11.10: + version "0.11.10" + resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" + +progress@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/progress/-/progress-2.0.3.tgz#7e8cf8d8f5b8f239c1bc68beb4eb78567d572ef8" + +promise-inflight@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/promise-inflight/-/promise-inflight-1.0.1.tgz#98472870bf228132fcbdd868129bad12c3c029e3" + +promise@8.0.2: + version "8.0.2" + resolved "https://registry.yarnpkg.com/promise/-/promise-8.0.2.tgz#9dcd0672192c589477d56891271bdc27547ae9f0" + dependencies: + asap "~2.0.6" + +promise@^7.1.1: + version "7.3.1" + resolved "https://registry.yarnpkg.com/promise/-/promise-7.3.1.tgz#064b72602b18f90f29192b8b1bc418ffd1ebd3bf" + dependencies: + asap "~2.0.3" + +prompts@^2.0.1: + version "2.0.4" + resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.0.4.tgz#179f9d4db3128b9933aa35f93a800d8fce76a682" + dependencies: + kleur "^3.0.2" + sisteransi "^1.0.0" + +prop-types@15.x, prop-types@^15.5.0, prop-types@^15.5.10, prop-types@^15.5.4, prop-types@^15.5.6, prop-types@^15.5.7, prop-types@^15.5.8, prop-types@^15.5.9, prop-types@^15.6.0, prop-types@^15.6.2: + version "15.7.2" + resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.7.2.tgz#52c41e75b8c87e72b9d9360e0206b99dcbffa6c5" + dependencies: + loose-envify "^1.4.0" + object-assign "^4.1.1" + react-is "^16.8.1" + +property-information@^5.0.0, property-information@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/property-information/-/property-information-5.0.1.tgz#c3b09f4f5750b1634c0b24205adbf78f18bdf94f" + dependencies: + xtend "^4.0.1" + +proxy-addr@~2.0.4: + version "2.0.5" + resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.5.tgz#34cbd64a2d81f4b1fd21e76f9f06c8a45299ee34" + dependencies: + forwarded "~0.1.2" + ipaddr.js "1.9.0" + +prr@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/prr/-/prr-1.0.1.tgz#d3fc114ba06995a45ec6893f484ceb1d78f5f476" + +psl@^1.1.24, psl@^1.1.28: + version "1.1.31" + resolved "https://registry.yarnpkg.com/psl/-/psl-1.1.31.tgz#e9aa86d0101b5b105cbe93ac6b784cd547276184" + +public-encrypt@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/public-encrypt/-/public-encrypt-4.0.3.tgz#4fcc9d77a07e48ba7527e7cbe0de33d0701331e0" + dependencies: + bn.js "^4.1.0" + browserify-rsa "^4.0.0" + create-hash "^1.1.0" + parse-asn1 "^5.0.0" + randombytes "^2.0.1" + safe-buffer "^5.1.2" + +pump@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pump/-/pump-2.0.1.tgz#12399add6e4cf7526d973cbc8b5ce2e2908b3909" + dependencies: + end-of-stream "^1.1.0" + once "^1.3.1" + +pump@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64" + dependencies: + end-of-stream "^1.1.0" + once "^1.3.1" + +pumpify@^1.3.3: + version "1.5.1" + resolved "https://registry.yarnpkg.com/pumpify/-/pumpify-1.5.1.tgz#36513be246ab27570b1a374a5ce278bfd74370ce" + dependencies: + duplexify "^3.6.0" + inherits "^2.0.3" + pump "^2.0.0" + +punycode@1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.3.2.tgz#9653a036fb7c1ee42342f2325cceefea3926c48d" + +punycode@2.x.x, punycode@^2.1.0, punycode@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" + +punycode@^1.2.4, punycode@^1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" + +q@^1.1.2: + version "1.5.1" + resolved "https://registry.yarnpkg.com/q/-/q-1.5.1.tgz#7e32f75b41381291d04611f1bf14109ac00651d7" + +qs@6.5.2, qs@~6.5.2: + version "6.5.2" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.2.tgz#cb3ae806e8740444584ef154ce8ee98d403f3e36" + +querystring-es3@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/querystring-es3/-/querystring-es3-0.2.1.tgz#9ec61f79049875707d69414596fd907a4d711e73" + +querystring@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.0.tgz#b209849203bb25df820da756e747005878521620" + +querystringify@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-2.1.1.tgz#60e5a5fd64a7f8bfa4d2ab2ed6fdf4c85bad154e" + +raf@3.4.1, raf@^3.4.0, raf@^3.4.1: + version "3.4.1" + resolved "https://registry.yarnpkg.com/raf/-/raf-3.4.1.tgz#0742e99a4a6552f445d73e3ee0328af0ff1ede39" + dependencies: + performance-now "^2.1.0" + +randombytes@^2.0.0, randombytes@^2.0.1, randombytes@^2.0.5: + version "2.1.0" + resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.1.0.tgz#df6f84372f0270dc65cdf6291349ab7a473d4f2a" + dependencies: + safe-buffer "^5.1.0" + +randomfill@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/randomfill/-/randomfill-1.0.4.tgz#c92196fc86ab42be983f1bf31778224931d61458" + dependencies: + randombytes "^2.0.5" + safe-buffer "^5.1.0" + +range-parser@^1.0.3, range-parser@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e" + +raw-body@2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.3.3.tgz#1b324ece6b5706e153855bc1148c65bb7f6ea0c3" + dependencies: + bytes "3.0.0" + http-errors "1.6.3" + iconv-lite "0.4.23" + unpipe "1.0.0" + +rc-align@^2.4.0, rc-align@^2.4.1: + version "2.4.5" + resolved "https://registry.yarnpkg.com/rc-align/-/rc-align-2.4.5.tgz#c941a586f59d1017f23a428f0b468663fb7102ab" + dependencies: + babel-runtime "^6.26.0" + dom-align "^1.7.0" + prop-types "^15.5.8" + rc-util "^4.0.4" + +rc-animate@2.x, rc-animate@^2.3.0, rc-animate@^2.5.4: + version "2.8.2" + resolved "https://registry.yarnpkg.com/rc-animate/-/rc-animate-2.8.2.tgz#1c517728e361745efead6dd8c3c74ecfa8e54cda" + dependencies: + babel-runtime "6.x" + classnames "^2.2.6" + css-animation "^1.3.2" + prop-types "15.x" + raf "^3.4.0" + react-lifecycles-compat "^3.0.4" + +rc-animate@^3.0.0-rc.1, rc-animate@^3.0.0-rc.4, rc-animate@^3.0.0-rc.5: + version "3.0.0-rc.6" + resolved "https://registry.yarnpkg.com/rc-animate/-/rc-animate-3.0.0-rc.6.tgz#04288eefa118e0cae214536c8a903ffaac1bc3fb" + dependencies: + babel-runtime "6.x" + classnames "^2.2.5" + component-classes "^1.2.6" + fbjs "^0.8.16" + prop-types "15.x" + raf "^3.4.0" + rc-util "^4.5.0" + react-lifecycles-compat "^3.0.4" + +rc-calendar@~9.12.1: + version "9.12.4" + resolved "https://registry.yarnpkg.com/rc-calendar/-/rc-calendar-9.12.4.tgz#68ee3a857b5341d780d9473541926cfe0b449154" + dependencies: + babel-runtime "6.x" + classnames "2.x" + moment "2.x" + prop-types "^15.5.8" + rc-trigger "^2.2.0" + rc-util "^4.1.1" + react-lifecycles-compat "^3.0.4" + +rc-cascader@~0.17.0: + version "0.17.3" + resolved "https://registry.yarnpkg.com/rc-cascader/-/rc-cascader-0.17.3.tgz#d68ee44fceff11fbf7ab1beb48a89a2c23dcd57a" + dependencies: + array-tree-filter "^2.1.0" + prop-types "^15.5.8" + rc-trigger "^2.2.0" + rc-util "^4.0.4" + react-lifecycles-compat "^3.0.4" + shallow-equal "^1.0.0" + warning "^4.0.1" + +rc-checkbox@~2.1.5: + version "2.1.6" + resolved "https://registry.yarnpkg.com/rc-checkbox/-/rc-checkbox-2.1.6.tgz#5dc00653e5277018c431fec55e38b91c1f976e90" + dependencies: + babel-runtime "^6.23.0" + classnames "2.x" + prop-types "15.x" + rc-util "^4.0.4" + +rc-collapse@~1.11.1: + version "1.11.1" + resolved "https://registry.yarnpkg.com/rc-collapse/-/rc-collapse-1.11.1.tgz#4aa0977adbf5229d7db58205646b13905add72ad" + dependencies: + classnames "2.x" + css-animation "1.x" + prop-types "^15.5.6" + rc-animate "2.x" + react-is "^16.7.0" + shallowequal "^1.1.0" + +rc-dialog@~7.3.0: + version "7.3.1" + resolved "https://registry.yarnpkg.com/rc-dialog/-/rc-dialog-7.3.1.tgz#45041ec35bfc8e337c91b64b52cebef6ea5cd4a2" + dependencies: + babel-runtime "6.x" + rc-animate "2.x" + rc-util "^4.4.0" + +rc-drawer@~1.7.6: + version "1.7.8" + resolved "https://registry.yarnpkg.com/rc-drawer/-/rc-drawer-1.7.8.tgz#e4d0659dc203909e5ffacbac8a68926e0c222fb5" + dependencies: + babel-runtime "6.x" + classnames "^2.2.5" + prop-types "^15.5.0" + rc-util "^4.5.1" + +rc-dropdown@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/rc-dropdown/-/rc-dropdown-2.4.1.tgz#aaef6eb3a5152cdd9982895c2a78d9b5f046cdec" + dependencies: + babel-runtime "^6.26.0" + classnames "^2.2.6" + prop-types "^15.5.8" + rc-trigger "^2.5.1" + react-lifecycles-compat "^3.0.2" + +rc-editor-core@~0.8.3: + version "0.8.9" + resolved "https://registry.yarnpkg.com/rc-editor-core/-/rc-editor-core-0.8.9.tgz#f611952c8eed965e3e348d84ae7be885daeb221c" + dependencies: + babel-runtime "^6.26.0" + classnames "^2.2.5" + draft-js "^0.10.0" + immutable "^3.7.4" + lodash "^4.16.5" + prop-types "^15.5.8" + setimmediate "^1.0.5" + +rc-editor-mention@^1.1.7: + version "1.1.12" + resolved "https://registry.yarnpkg.com/rc-editor-mention/-/rc-editor-mention-1.1.12.tgz#896bcb172112f18812e96fdd33ba603c0fc7306a" + dependencies: + babel-runtime "^6.23.0" + classnames "^2.2.5" + dom-scroll-into-view "^1.2.0" + draft-js "~0.10.0" + immutable "^3.7.4" + prop-types "^15.5.8" + rc-animate "^2.3.0" + rc-editor-core "~0.8.3" + +rc-form@^2.4.0: + version "2.4.4" + resolved "https://registry.yarnpkg.com/rc-form/-/rc-form-2.4.4.tgz#ac0f3cc643724991c742d5ae6742777fbc0bcfbb" + dependencies: + async-validator "~1.8.5" + babel-runtime "6.x" + create-react-class "^15.5.3" + dom-scroll-into-view "1.x" + hoist-non-react-statics "^3.3.0" + lodash "^4.17.4" + warning "^4.0.3" + +rc-hammerjs@~0.6.0: + version "0.6.9" + resolved "https://registry.yarnpkg.com/rc-hammerjs/-/rc-hammerjs-0.6.9.tgz#9a4ddbda1b2ec8f9b9596091a6a989842a243907" + dependencies: + babel-runtime "6.x" + hammerjs "^2.0.8" + prop-types "^15.5.9" + +rc-input-number@~4.4.0: + version "4.4.2" + resolved "https://registry.yarnpkg.com/rc-input-number/-/rc-input-number-4.4.2.tgz#3abeaadd2cbef59eb350f09f7c0f8c743b584f13" + dependencies: + babel-runtime "6.x" + classnames "^2.2.0" + prop-types "^15.5.7" + rc-util "^4.5.1" + rmc-feedback "^2.0.0" + +rc-menu@^7.3.0, rc-menu@~7.4.12: + version "7.4.22" + resolved "https://registry.yarnpkg.com/rc-menu/-/rc-menu-7.4.22.tgz#3305517cc284ba8979a45a0d16271780adafb2ee" + dependencies: + babel-runtime "6.x" + classnames "2.x" + dom-scroll-into-view "1.x" + ismobilejs "^0.5.1" + mini-store "^2.0.0" + mutationobserver-shim "^0.3.2" + prop-types "^15.5.6" + rc-animate "2.x" + rc-trigger "^2.3.0" + rc-util "^4.1.0" + resize-observer-polyfill "^1.5.0" + +rc-notification@~3.3.0: + version "3.3.1" + resolved "https://registry.yarnpkg.com/rc-notification/-/rc-notification-3.3.1.tgz#0baa3e70f8d40ab015ce8fa78c260c490fc7beb4" + dependencies: + babel-runtime "6.x" + classnames "2.x" + prop-types "^15.5.8" + rc-animate "2.x" + rc-util "^4.0.4" + +rc-pagination@~1.17.7: + version "1.17.14" + resolved "https://registry.yarnpkg.com/rc-pagination/-/rc-pagination-1.17.14.tgz#ffb2882fd89d95b3b603938dc5db2fb2c30026d3" + dependencies: + babel-runtime "6.x" + prop-types "^15.5.7" + react-lifecycles-compat "^3.0.4" + +rc-progress@~2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/rc-progress/-/rc-progress-2.3.0.tgz#cfbd07ff9026c450100980de209a92650e24f313" + dependencies: + babel-runtime "6.x" + prop-types "^15.5.8" + +rc-rate@~2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/rc-rate/-/rc-rate-2.5.0.tgz#72d4984a03d0a7a0e6779c7a79efcea27626abf6" + dependencies: + classnames "^2.2.5" + prop-types "^15.5.8" + rc-util "^4.3.0" + react-lifecycles-compat "^3.0.4" + +rc-select@~9.1.0: + version "9.1.1" + resolved "https://registry.yarnpkg.com/rc-select/-/rc-select-9.1.1.tgz#c4218945502a95f77a11a99d380cf163ae1d2b72" + dependencies: + babel-runtime "^6.23.0" + classnames "2.x" + component-classes "1.x" + dom-scroll-into-view "1.x" + prop-types "^15.5.8" + raf "^3.4.0" + rc-animate "2.x" + rc-menu "^7.3.0" + rc-trigger "^2.5.4" + rc-util "^4.0.4" + react-lifecycles-compat "^3.0.2" + warning "^4.0.2" + +rc-slider@~8.6.5: + version "8.6.9" + resolved "https://registry.yarnpkg.com/rc-slider/-/rc-slider-8.6.9.tgz#b32148a498c927c93f20dc1f95e8682c4924bf8e" + dependencies: + babel-runtime "6.x" + classnames "^2.2.5" + prop-types "^15.5.4" + rc-tooltip "^3.7.0" + rc-util "^4.0.4" + shallowequal "^1.0.1" + warning "^4.0.3" + +rc-steps@~3.3.0: + version "3.3.1" + resolved "https://registry.yarnpkg.com/rc-steps/-/rc-steps-3.3.1.tgz#4877e2897331e3bfdb6b789e88aea78f4f15f732" + dependencies: + babel-runtime "^6.23.0" + classnames "^2.2.3" + lodash "^4.17.5" + prop-types "^15.5.7" + +rc-switch@~1.9.0: + version "1.9.0" + resolved "https://registry.yarnpkg.com/rc-switch/-/rc-switch-1.9.0.tgz#ab2b878f2713c681358a453391976c9b95b290f7" + dependencies: + classnames "^2.2.1" + prop-types "^15.5.6" + react-lifecycles-compat "^3.0.4" + +rc-table@~6.5.0: + version "6.5.0" + resolved "https://registry.yarnpkg.com/rc-table/-/rc-table-6.5.0.tgz#0887240342f8f3a764c954638cc46dea45921a14" + dependencies: + babel-runtime "6.x" + classnames "^2.2.5" + component-classes "^1.2.6" + lodash "^4.17.5" + mini-store "^2.0.0" + prop-types "^15.5.8" + rc-util "^4.0.4" + react-lifecycles-compat "^3.0.2" + shallowequal "^1.0.2" + warning "^3.0.0" + +rc-tabs@~9.6.0: + version "9.6.3" + resolved "https://registry.yarnpkg.com/rc-tabs/-/rc-tabs-9.6.3.tgz#5ee02816521769f92e28dc0360a2eb1214b53075" + dependencies: + babel-runtime "6.x" + classnames "2.x" + create-react-context "0.2.2" + lodash "^4.17.5" + prop-types "15.x" + raf "^3.4.1" + rc-hammerjs "~0.6.0" + rc-util "^4.0.4" + resize-observer-polyfill "^1.5.1" + warning "^3.0.0" + +rc-time-picker@~3.6.1: + version "3.6.4" + resolved "https://registry.yarnpkg.com/rc-time-picker/-/rc-time-picker-3.6.4.tgz#3feb05ec88f9c91701d4f691025e9bad9085af3c" + dependencies: + classnames "2.x" + moment "2.x" + prop-types "^15.5.8" + rc-trigger "^2.2.0" + +rc-tooltip@^3.7.0, rc-tooltip@~3.7.3: + version "3.7.3" + resolved "https://registry.yarnpkg.com/rc-tooltip/-/rc-tooltip-3.7.3.tgz#280aec6afcaa44e8dff0480fbaff9e87fc00aecc" + dependencies: + babel-runtime "6.x" + prop-types "^15.5.8" + rc-trigger "^2.2.2" + +rc-tree-select@~2.6.0: + version "2.6.3" + resolved "https://registry.yarnpkg.com/rc-tree-select/-/rc-tree-select-2.6.3.tgz#376c368f0bd60d266f2740b99ab80f1a00bd44ad" + dependencies: + classnames "^2.2.1" + dom-scroll-into-view "^1.2.1" + prop-types "^15.5.8" + raf "^3.4.0" + rc-animate "^3.0.0-rc.4" + rc-tree "~1.15.0" + rc-trigger "^3.0.0-rc.2" + rc-util "^4.5.0" + react-lifecycles-compat "^3.0.4" + shallowequal "^1.0.2" + warning "^4.0.1" + +rc-tree@~1.15.0, rc-tree@~1.15.2: + version "1.15.3" + resolved "https://registry.yarnpkg.com/rc-tree/-/rc-tree-1.15.3.tgz#4924814c468dc8095cac212fa38ba2b1cb6cb5c9" + dependencies: + babel-runtime "^6.23.0" + classnames "2.x" + prop-types "^15.5.8" + rc-animate "^3.0.0-rc.5" + rc-util "^4.5.1" + react-lifecycles-compat "^3.0.4" + warning "^3.0.0" + +rc-trigger@^2.2.0, rc-trigger@^2.2.2, rc-trigger@^2.3.0, rc-trigger@^2.5.1, rc-trigger@^2.5.4, rc-trigger@^2.6.2: + version "2.6.2" + resolved "https://registry.yarnpkg.com/rc-trigger/-/rc-trigger-2.6.2.tgz#a9c09ba5fad63af3b2ec46349c7db6cb46657001" + dependencies: + babel-runtime "6.x" + classnames "^2.2.6" + prop-types "15.x" + rc-align "^2.4.0" + rc-animate "2.x" + rc-util "^4.4.0" + +rc-trigger@^3.0.0-rc.2: + version "3.0.0-rc.3" + resolved "https://registry.yarnpkg.com/rc-trigger/-/rc-trigger-3.0.0-rc.3.tgz#35842df1674d25315e1426a44882a4c97652258b" + dependencies: + babel-runtime "6.x" + classnames "^2.2.6" + prop-types "15.x" + raf "^3.4.0" + rc-align "^2.4.1" + rc-animate "^3.0.0-rc.1" + rc-util "^4.4.0" + +rc-upload@~2.6.0: + version "2.6.4" + resolved "https://registry.yarnpkg.com/rc-upload/-/rc-upload-2.6.4.tgz#4c2eb84b83003ca80028fb2411d420377e43846a" + dependencies: + babel-runtime "6.x" + classnames "^2.2.5" + prop-types "^15.5.7" + warning "4.x" + +rc-util@^4.0.4, rc-util@^4.1.0, rc-util@^4.1.1, rc-util@^4.3.0, rc-util@^4.4.0, rc-util@^4.5.0, rc-util@^4.5.1: + version "4.6.0" + resolved "https://registry.yarnpkg.com/rc-util/-/rc-util-4.6.0.tgz#ba33721783192ec4f3afb259e182b04e55deb7f6" + dependencies: + add-dom-event-listener "^1.1.0" + babel-runtime "6.x" + prop-types "^15.5.10" + shallowequal "^0.2.2" + +rc@^1.2.7: + version "1.2.8" + resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" + dependencies: + deep-extend "^0.6.0" + ini "~1.3.0" + minimist "^1.2.0" + strip-json-comments "~2.0.1" + +react-app-polyfill@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/react-app-polyfill/-/react-app-polyfill-1.0.0.tgz#735c725c1d4261b710cb200c498789c8b80e0f74" + dependencies: + core-js "3.0.1" + object-assign "4.1.1" + promise "8.0.2" + raf "3.4.1" + regenerator-runtime "0.13.2" + whatwg-fetch "3.0.0" + +react-app-rewired@^2.1.3: + version "2.1.3" + resolved "https://registry.yarnpkg.com/react-app-rewired/-/react-app-rewired-2.1.3.tgz#5ae8583ecc9f9f968d40b735d2abbe871378a52f" + dependencies: + cross-spawn "^6.0.5" + dotenv "^6.2.0" + semver "^5.6.0" + +react-dev-utils@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/react-dev-utils/-/react-dev-utils-9.0.0.tgz#356d95db442441c5d4748e0e49f4fd1e71aecbbd" + dependencies: + "@babel/code-frame" "7.0.0" + address "1.0.3" + browserslist "4.5.4" + chalk "2.4.2" + cross-spawn "6.0.5" + detect-port-alt "1.1.6" + escape-string-regexp "1.0.5" + filesize "3.6.1" + find-up "3.0.0" + fork-ts-checker-webpack-plugin "1.0.1" + global-modules "2.0.0" + globby "8.0.2" + gzip-size "5.0.0" + immer "1.10.0" + inquirer "6.2.2" + is-root "2.0.0" + loader-utils "1.2.3" + opn "5.4.0" + pkg-up "2.0.0" + react-error-overlay "^5.1.5" + recursive-readdir "2.2.2" + shell-quote "1.6.1" + sockjs-client "1.3.0" + strip-ansi "5.2.0" + text-table "0.2.0" + +react-dom@^16.8.6: + version "16.8.6" + resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-16.8.6.tgz#71d6303f631e8b0097f56165ef608f051ff6e10f" + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + prop-types "^15.6.2" + scheduler "^0.13.6" + +react-error-overlay@^5.1.5: + version "5.1.5" + resolved "https://registry.yarnpkg.com/react-error-overlay/-/react-error-overlay-5.1.5.tgz#884530fd055476c764eaa8ab13b8ecf1f57bbf2c" + +react-is@^16.6.0, react-is@^16.7.0, react-is@^16.8.1, react-is@^16.8.4: + version "16.8.6" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.8.6.tgz#5bbc1e2d29141c9fbdfed456343fe2bc430a6a16" + +react-lazy-load@^3.0.13: + version "3.0.13" + resolved "https://registry.yarnpkg.com/react-lazy-load/-/react-lazy-load-3.0.13.tgz#3b0a92d336d43d3f0d73cbe6f35b17050b08b824" + dependencies: + eventlistener "0.0.1" + lodash.debounce "^4.0.0" + lodash.throttle "^4.0.0" + prop-types "^15.5.8" + +react-lifecycles-compat@^3.0.2, react-lifecycles-compat@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz#4f1a273afdfc8f3488a8c516bfda78f872352362" + +react-router-dom@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-5.0.0.tgz#542a9b86af269a37f0b87218c4c25ea8dcf0c073" + dependencies: + "@babel/runtime" "^7.1.2" + history "^4.9.0" + loose-envify "^1.3.1" + prop-types "^15.6.2" + react-router "5.0.0" + tiny-invariant "^1.0.2" + tiny-warning "^1.0.0" + +react-router@5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/react-router/-/react-router-5.0.0.tgz#349863f769ffc2fa10ee7331a4296e86bc12879d" + dependencies: + "@babel/runtime" "^7.1.2" + create-react-context "^0.2.2" + history "^4.9.0" + hoist-non-react-statics "^3.1.0" + loose-envify "^1.3.1" + path-to-regexp "^1.7.0" + prop-types "^15.6.2" + react-is "^16.6.0" + tiny-invariant "^1.0.2" + tiny-warning "^1.0.0" + +react-scripts@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/react-scripts/-/react-scripts-3.0.0.tgz#a715613ef3eace025907b409cec8505096e0233e" + dependencies: + "@babel/core" "7.4.3" + "@svgr/webpack" "4.1.0" + "@typescript-eslint/eslint-plugin" "1.6.0" + "@typescript-eslint/parser" "1.6.0" + babel-eslint "10.0.1" + babel-jest "24.7.1" + babel-loader "8.0.5" + babel-plugin-named-asset-import "^0.3.2" + babel-preset-react-app "^8.0.0" + case-sensitive-paths-webpack-plugin "2.2.0" + css-loader "2.1.1" + dotenv "6.2.0" + dotenv-expand "4.2.0" + eslint "^5.16.0" + eslint-config-react-app "^4.0.0" + eslint-loader "2.1.2" + eslint-plugin-flowtype "2.50.1" + eslint-plugin-import "2.16.0" + eslint-plugin-jsx-a11y "6.2.1" + eslint-plugin-react "7.12.4" + eslint-plugin-react-hooks "^1.5.0" + file-loader "3.0.1" + fs-extra "7.0.1" + html-webpack-plugin "4.0.0-beta.5" + identity-obj-proxy "3.0.0" + is-wsl "^1.1.0" + jest "24.7.1" + jest-environment-jsdom-fourteen "0.1.0" + jest-resolve "24.7.1" + jest-watch-typeahead "0.3.0" + mini-css-extract-plugin "0.5.0" + optimize-css-assets-webpack-plugin "5.0.1" + pnp-webpack-plugin "1.2.1" + postcss-flexbugs-fixes "4.1.0" + postcss-loader "3.0.0" + postcss-normalize "7.0.1" + postcss-preset-env "6.6.0" + postcss-safe-parser "4.0.1" + react-app-polyfill "^1.0.0" + react-dev-utils "^9.0.0" + resolve "1.10.0" + sass-loader "7.1.0" + semver "6.0.0" + style-loader "0.23.1" + terser-webpack-plugin "1.2.3" + url-loader "1.1.2" + webpack "4.29.6" + webpack-dev-server "3.2.1" + webpack-manifest-plugin "2.0.4" + workbox-webpack-plugin "4.2.0" + optionalDependencies: + fsevents "2.0.6" + +react-slick@~0.24.0: + version "0.24.0" + resolved "https://registry.yarnpkg.com/react-slick/-/react-slick-0.24.0.tgz#1a4e078a82de4e9458255d9ce26aa6f3b17b168b" + dependencies: + classnames "^2.2.5" + enquire.js "^2.1.6" + json2mq "^0.2.0" + lodash.debounce "^4.0.8" + resize-observer-polyfill "^1.5.0" + +react@^16.8.6: + version "16.8.6" + resolved "https://registry.yarnpkg.com/react/-/react-16.8.6.tgz#ad6c3a9614fd3a4e9ef51117f54d888da01f2bbe" + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + prop-types "^15.6.2" + scheduler "^0.13.6" + +read-pkg-up@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-2.0.0.tgz#6b72a8048984e0c41e79510fd5e9fa99b3b549be" + dependencies: + find-up "^2.0.0" + read-pkg "^2.0.0" + +read-pkg-up@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-4.0.0.tgz#1b221c6088ba7799601c808f91161c66e58f8978" + dependencies: + find-up "^3.0.0" + read-pkg "^3.0.0" + +read-pkg@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-2.0.0.tgz#8ef1c0623c6a6db0dc6713c4bfac46332b2368f8" + dependencies: + load-json-file "^2.0.0" + normalize-package-data "^2.3.2" + path-type "^2.0.0" + +read-pkg@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-3.0.0.tgz#9cbc686978fee65d16c00e2b19c237fcf6e38389" + dependencies: + load-json-file "^4.0.0" + normalize-package-data "^2.3.2" + path-type "^3.0.0" + +"readable-stream@1 || 2", readable-stream@^2.0.0, readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.0.6, readable-stream@^2.1.5, readable-stream@^2.2.2, readable-stream@^2.3.3, readable-stream@^2.3.6, readable-stream@~2.3.6: + version "2.3.6" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf" + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + +readable-stream@^3.0.6, readable-stream@^3.1.1: + version "3.3.0" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.3.0.tgz#cb8011aad002eb717bf040291feba8569c986fb9" + dependencies: + inherits "^2.0.3" + string_decoder "^1.1.1" + util-deprecate "^1.0.1" + +readdirp@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-2.2.1.tgz#0e87622a3325aa33e892285caf8b4e846529a525" + dependencies: + graceful-fs "^4.1.11" + micromatch "^3.1.10" + readable-stream "^2.0.2" + +realpath-native@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/realpath-native/-/realpath-native-1.1.0.tgz#2003294fea23fb0672f2476ebe22fcf498a2d65c" + dependencies: + util.promisify "^1.0.0" + +recursive-readdir@2.2.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/recursive-readdir/-/recursive-readdir-2.2.2.tgz#9946fb3274e1628de6e36b2f6714953b4845094f" + dependencies: + minimatch "3.0.4" + +regenerate-unicode-properties@^8.0.2: + version "8.0.2" + resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-8.0.2.tgz#7b38faa296252376d363558cfbda90c9ce709662" + dependencies: + regenerate "^1.4.0" + +regenerate@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.0.tgz#4a856ec4b56e4077c557589cae85e7a4c8869a11" + +regenerator-runtime@0.13.2, regenerator-runtime@^0.13.2: + version "0.13.2" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.2.tgz#32e59c9a6fb9b1a4aff09b4930ca2d4477343447" + +regenerator-runtime@^0.11.0: + version "0.11.1" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz#be05ad7f9bf7d22e056f9726cee5017fbf19e2e9" + +regenerator-transform@^0.13.4: + version "0.13.4" + resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.13.4.tgz#18f6763cf1382c69c36df76c6ce122cc694284fb" + dependencies: + private "^0.1.6" + +regex-not@^1.0.0, regex-not@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/regex-not/-/regex-not-1.0.2.tgz#1f4ece27e00b0b65e0247a6810e6a85d83a5752c" + dependencies: + extend-shallow "^3.0.2" + safe-regex "^1.1.0" + +regexp-tree@^0.1.0: + version "0.1.5" + resolved "https://registry.yarnpkg.com/regexp-tree/-/regexp-tree-0.1.5.tgz#7cd71fca17198d04b4176efd79713f2998009397" + +regexpp@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-2.0.1.tgz#8d19d31cf632482b589049f8281f93dbcba4d07f" + +regexpu-core@^4.5.4: + version "4.5.4" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-4.5.4.tgz#080d9d02289aa87fe1667a4f5136bc98a6aebaae" + dependencies: + regenerate "^1.4.0" + regenerate-unicode-properties "^8.0.2" + regjsgen "^0.5.0" + regjsparser "^0.6.0" + unicode-match-property-ecmascript "^1.0.4" + unicode-match-property-value-ecmascript "^1.1.0" + +regjsgen@^0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.5.0.tgz#a7634dc08f89209c2049adda3525711fb97265dd" + +regjsparser@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.6.0.tgz#f1e6ae8b7da2bae96c99399b868cd6c933a2ba9c" + dependencies: + jsesc "~0.5.0" + +rehype-parse@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/rehype-parse/-/rehype-parse-6.0.0.tgz#f681555f2598165bee2c778b39f9073d17b16bca" + dependencies: + hast-util-from-parse5 "^5.0.0" + parse5 "^5.0.0" + xtend "^4.0.1" + +relateurl@0.2.x: + version "0.2.7" + resolved "https://registry.yarnpkg.com/relateurl/-/relateurl-0.2.7.tgz#54dbf377e51440aca90a4cd274600d3ff2d888a9" + +remove-trailing-separator@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz#c24bce2a283adad5bc3f58e0d48249b92379d8ef" + +renderkid@^2.0.1: + version "2.0.3" + resolved "https://registry.yarnpkg.com/renderkid/-/renderkid-2.0.3.tgz#380179c2ff5ae1365c522bf2fcfcff01c5b74149" + dependencies: + css-select "^1.1.0" + dom-converter "^0.2" + htmlparser2 "^3.3.0" + strip-ansi "^3.0.0" + utila "^0.4.0" + +repeat-element@^1.1.2: + version "1.1.3" + resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.3.tgz#782e0d825c0c5a3bb39731f84efee6b742e6b1ce" + +repeat-string@^1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" + +replace-ext@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/replace-ext/-/replace-ext-1.0.0.tgz#de63128373fcbf7c3ccfa4de5a480c45a67958eb" + +request-promise-core@1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/request-promise-core/-/request-promise-core-1.1.2.tgz#339f6aababcafdb31c799ff158700336301d3346" + dependencies: + lodash "^4.17.11" + +request-promise-native@^1.0.5: + version "1.0.7" + resolved "https://registry.yarnpkg.com/request-promise-native/-/request-promise-native-1.0.7.tgz#a49868a624bdea5069f1251d0a836e0d89aa2c59" + dependencies: + request-promise-core "1.1.2" + stealthy-require "^1.1.1" + tough-cookie "^2.3.3" + +request@^2.83.0, request@^2.87.0, request@^2.88.0: + version "2.88.0" + resolved "https://registry.yarnpkg.com/request/-/request-2.88.0.tgz#9c2fca4f7d35b592efe57c7f0a55e81052124fef" + dependencies: + aws-sign2 "~0.7.0" + aws4 "^1.8.0" + caseless "~0.12.0" + combined-stream "~1.0.6" + extend "~3.0.2" + forever-agent "~0.6.1" + form-data "~2.3.2" + har-validator "~5.1.0" + http-signature "~1.2.0" + is-typedarray "~1.0.0" + isstream "~0.1.2" + json-stringify-safe "~5.0.1" + mime-types "~2.1.19" + oauth-sign "~0.9.0" + performance-now "^2.1.0" + qs "~6.5.2" + safe-buffer "^5.1.2" + tough-cookie "~2.4.3" + tunnel-agent "^0.6.0" + uuid "^3.3.2" + +require-directory@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + +require-from-string@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" + +require-main-filename@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1" + +require-main-filename@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b" + +requireindex@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/requireindex/-/requireindex-1.2.0.tgz#3463cdb22ee151902635aa6c9535d4de9c2ef1ef" + +requires-port@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" + +resize-observer-polyfill@^1.5.0, resize-observer-polyfill@^1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz#0e9020dd3d21024458d4ebd27e23e40269810464" + +resolve-cwd@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/resolve-cwd/-/resolve-cwd-2.0.0.tgz#00a9f7387556e27038eae232caa372a6a59b665a" + dependencies: + resolve-from "^3.0.0" + +resolve-from@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-3.0.0.tgz#b22c7af7d9d6881bc8b6e653335eebcb0a188748" + +resolve-from@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + +resolve-pathname@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/resolve-pathname/-/resolve-pathname-2.2.0.tgz#7e9ae21ed815fd63ab189adeee64dc831eefa879" + +resolve-url@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" + +resolve@1.1.7: + version "1.1.7" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.1.7.tgz#203114d82ad2c5ed9e8e0411b3932875e889e97b" + +resolve@1.10.0, resolve@^1.10.0, resolve@^1.3.2, resolve@^1.5.0, resolve@^1.8.1, resolve@^1.9.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.10.0.tgz#3bdaaeaf45cc07f375656dfd2e54ed0810b101ba" + dependencies: + path-parse "^1.0.6" + +restore-cursor@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-2.0.0.tgz#9f7ee287f82fd326d4fd162923d62129eee0dfaf" + dependencies: + onetime "^2.0.0" + signal-exit "^3.0.2" + +ret@~0.1.10: + version "0.1.15" + resolved "https://registry.yarnpkg.com/ret/-/ret-0.1.15.tgz#b8a4825d5bdb1fc3f6f53c2bc33f81388681c7bc" + +rgb-regex@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/rgb-regex/-/rgb-regex-1.0.1.tgz#c0e0d6882df0e23be254a475e8edd41915feaeb1" + +rgba-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/rgba-regex/-/rgba-regex-1.0.0.tgz#43374e2e2ca0968b0ef1523460b7d730ff22eeb3" + +rimraf@2.6.3, rimraf@^2.2.8, rimraf@^2.5.4, rimraf@^2.6.1, rimraf@^2.6.2: + version "2.6.3" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.3.tgz#b2d104fe0d8fb27cf9e0a1cda8262dd3833c6cab" + dependencies: + glob "^7.1.3" + +ripemd160@^2.0.0, ripemd160@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/ripemd160/-/ripemd160-2.0.2.tgz#a1c1a6f624751577ba5d07914cbc92850585890c" + dependencies: + hash-base "^3.0.0" + inherits "^2.0.1" + +rmc-feedback@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/rmc-feedback/-/rmc-feedback-2.0.0.tgz#cbc6cb3ae63c7a635eef0e25e4fbaf5ac366eeaa" + dependencies: + babel-runtime "6.x" + classnames "^2.2.5" + +rsvp@^4.8.4: + version "4.8.4" + resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-4.8.4.tgz#b50e6b34583f3dd89329a2f23a8a2be072845911" + +run-async@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/run-async/-/run-async-2.3.0.tgz#0371ab4ae0bdd720d4166d7dfda64ff7a445a6c0" + dependencies: + is-promise "^2.1.0" + +run-queue@^1.0.0, run-queue@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/run-queue/-/run-queue-1.0.3.tgz#e848396f057d223f24386924618e25694161ec47" + dependencies: + aproba "^1.1.1" + +rxjs@^6.4.0: + version "6.4.0" + resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-6.4.0.tgz#f3bb0fe7bda7fb69deac0c16f17b50b0b8790504" + dependencies: + tslib "^1.9.0" + +safe-buffer@5.1.2, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + +safe-regex@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/safe-regex/-/safe-regex-1.1.0.tgz#40a3669f3b077d1e943d44629e157dd48023bf2e" + dependencies: + ret "~0.1.10" + +"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + +sane@^4.0.3: + version "4.1.0" + resolved "https://registry.yarnpkg.com/sane/-/sane-4.1.0.tgz#ed881fd922733a6c461bc189dc2b6c006f3ffded" + dependencies: + "@cnakazawa/watch" "^1.0.3" + anymatch "^2.0.0" + capture-exit "^2.0.0" + exec-sh "^0.3.2" + execa "^1.0.0" + fb-watchman "^2.0.0" + micromatch "^3.1.4" + minimist "^1.1.1" + walker "~1.0.5" + +sass-loader@7.1.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/sass-loader/-/sass-loader-7.1.0.tgz#16fd5138cb8b424bf8a759528a1972d72aad069d" + dependencies: + clone-deep "^2.0.1" + loader-utils "^1.0.1" + lodash.tail "^4.1.1" + neo-async "^2.5.0" + pify "^3.0.0" + semver "^5.5.0" + +sax@^1.2.4, sax@~1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" + +saxes@^3.1.9: + version "3.1.9" + resolved "https://registry.yarnpkg.com/saxes/-/saxes-3.1.9.tgz#c1c197cd54956d88c09f960254b999e192d7058b" + dependencies: + xmlchars "^1.3.1" + +scheduler@^0.13.6: + version "0.13.6" + resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.13.6.tgz#466a4ec332467b31a91b9bf74e5347072e4cd889" + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + +schema-utils@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-1.0.0.tgz#0b79a93204d7b600d4b2850d1f66c2a34951c770" + dependencies: + ajv "^6.1.0" + ajv-errors "^1.0.0" + ajv-keywords "^3.1.0" + +select-hose@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/select-hose/-/select-hose-2.0.0.tgz#625d8658f865af43ec962bfc376a37359a4994ca" + +selfsigned@^1.9.1: + version "1.10.4" + resolved "https://registry.yarnpkg.com/selfsigned/-/selfsigned-1.10.4.tgz#cdd7eccfca4ed7635d47a08bf2d5d3074092e2cd" + dependencies: + node-forge "0.7.5" + +"semver@2 || 3 || 4 || 5", semver@^5.3.0, semver@^5.4.1, semver@^5.5.0, semver@^5.5.1, semver@^5.6.0: + version "5.7.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.0.tgz#790a7cf6fea5459bac96110b29b60412dc8ff96b" + +semver@5.5.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.5.0.tgz#dc4bbc7a6ca9d916dee5d43516f0092b58f7b8ab" + +semver@6.0.0, semver@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-6.0.0.tgz#05e359ee571e5ad7ed641a6eec1e547ba52dea65" + +send@0.16.2: + version "0.16.2" + resolved "https://registry.yarnpkg.com/send/-/send-0.16.2.tgz#6ecca1e0f8c156d141597559848df64730a6bbc1" + dependencies: + debug "2.6.9" + depd "~1.1.2" + destroy "~1.0.4" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + fresh "0.5.2" + http-errors "~1.6.2" + mime "1.4.1" + ms "2.0.0" + on-finished "~2.3.0" + range-parser "~1.2.0" + statuses "~1.4.0" + +serialize-javascript@^1.4.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-1.7.0.tgz#d6e0dfb2a3832a8c94468e6eb1db97e55a192a65" + +serve-index@^1.7.2: + version "1.9.1" + resolved "https://registry.yarnpkg.com/serve-index/-/serve-index-1.9.1.tgz#d3768d69b1e7d82e5ce050fff5b453bea12a9239" + dependencies: + accepts "~1.3.4" + batch "0.6.1" + debug "2.6.9" + escape-html "~1.0.3" + http-errors "~1.6.2" + mime-types "~2.1.17" + parseurl "~1.3.2" + +serve-static@1.13.2: + version "1.13.2" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.13.2.tgz#095e8472fd5b46237db50ce486a43f4b86c6cec1" + dependencies: + encodeurl "~1.0.2" + escape-html "~1.0.3" + parseurl "~1.3.2" + send "0.16.2" + +set-blocking@^2.0.0, set-blocking@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" + +set-value@^0.4.3: + version "0.4.3" + resolved "https://registry.yarnpkg.com/set-value/-/set-value-0.4.3.tgz#7db08f9d3d22dc7f78e53af3c3bf4666ecdfccf1" + dependencies: + extend-shallow "^2.0.1" + is-extendable "^0.1.1" + is-plain-object "^2.0.1" + to-object-path "^0.3.0" + +set-value@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-value/-/set-value-2.0.0.tgz#71ae4a88f0feefbbf52d1ea604f3fb315ebb6274" + dependencies: + extend-shallow "^2.0.1" + is-extendable "^0.1.1" + is-plain-object "^2.0.3" + split-string "^3.0.1" + +setimmediate@^1.0.4, setimmediate@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" + +setprototypeof@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.0.tgz#d0bd85536887b6fe7c0d818cb962d9d91c54e656" + +sha.js@^2.4.0, sha.js@^2.4.8: + version "2.4.11" + resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.11.tgz#37a5cf0b81ecbc6943de109ba2960d1b26584ae7" + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" + +shallow-clone@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/shallow-clone/-/shallow-clone-0.1.2.tgz#5909e874ba77106d73ac414cfec1ffca87d97060" + dependencies: + is-extendable "^0.1.1" + kind-of "^2.0.1" + lazy-cache "^0.2.3" + mixin-object "^2.0.1" + +shallow-clone@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/shallow-clone/-/shallow-clone-1.0.0.tgz#4480cd06e882ef68b2ad88a3ea54832e2c48b571" + dependencies: + is-extendable "^0.1.1" + kind-of "^5.0.0" + mixin-object "^2.0.1" + +shallow-equal@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/shallow-equal/-/shallow-equal-1.1.0.tgz#cc022f030dcba0d1c198abf658a3c6c744e171ca" + +shallowequal@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/shallowequal/-/shallowequal-0.2.2.tgz#1e32fd5bcab6ad688a4812cb0cc04efc75c7014e" + dependencies: + lodash.keys "^3.1.2" + +shallowequal@^1.0.1, shallowequal@^1.0.2, shallowequal@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/shallowequal/-/shallowequal-1.1.0.tgz#188d521de95b9087404fd4dcb68b13df0ae4e7f8" + +shebang-command@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" + dependencies: + shebang-regex "^1.0.0" + +shebang-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" + +shell-quote@1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.6.1.tgz#f4781949cce402697127430ea3b3c5476f481767" + dependencies: + array-filter "~0.0.0" + array-map "~0.0.0" + array-reduce "~0.0.0" + jsonify "~0.0.0" + +shellwords@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/shellwords/-/shellwords-0.1.1.tgz#d6b9181c1a48d397324c84871efbcfc73fc0654b" + +signal-exit@^3.0.0, signal-exit@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" + +simple-swizzle@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/simple-swizzle/-/simple-swizzle-0.2.2.tgz#a4da6b635ffcccca33f70d17cb92592de95e557a" + dependencies: + is-arrayish "^0.3.1" + +sisteransi@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.0.tgz#77d9622ff909080f1c19e5f4a1df0c1b0a27b88c" + +slash@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55" + +slash@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-2.0.0.tgz#de552851a1759df3a8f206535442f5ec4ddeab44" + +slice-ansi@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-2.1.0.tgz#cacd7693461a637a5788d92a7dd4fba068e81636" + dependencies: + ansi-styles "^3.2.0" + astral-regex "^1.0.0" + is-fullwidth-code-point "^2.0.0" + +snapdragon-node@^2.0.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/snapdragon-node/-/snapdragon-node-2.1.1.tgz#6c175f86ff14bdb0724563e8f3c1b021a286853b" + dependencies: + define-property "^1.0.0" + isobject "^3.0.0" + snapdragon-util "^3.0.1" + +snapdragon-util@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/snapdragon-util/-/snapdragon-util-3.0.1.tgz#f956479486f2acd79700693f6f7b805e45ab56e2" + dependencies: + kind-of "^3.2.0" + +snapdragon@^0.8.1: + version "0.8.2" + resolved "https://registry.yarnpkg.com/snapdragon/-/snapdragon-0.8.2.tgz#64922e7c565b0e14204ba1aa7d6964278d25182d" + dependencies: + base "^0.11.1" + debug "^2.2.0" + define-property "^0.2.5" + extend-shallow "^2.0.1" + map-cache "^0.2.2" + source-map "^0.5.6" + source-map-resolve "^0.5.0" + use "^3.1.0" + +sockjs-client@1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/sockjs-client/-/sockjs-client-1.3.0.tgz#12fc9d6cb663da5739d3dc5fb6e8687da95cb177" + dependencies: + debug "^3.2.5" + eventsource "^1.0.7" + faye-websocket "~0.11.1" + inherits "^2.0.3" + json3 "^3.3.2" + url-parse "^1.4.3" + +sockjs@0.3.19: + version "0.3.19" + resolved "https://registry.yarnpkg.com/sockjs/-/sockjs-0.3.19.tgz#d976bbe800af7bd20ae08598d582393508993c0d" + dependencies: + faye-websocket "^0.10.0" + uuid "^3.0.1" + +source-list-map@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/source-list-map/-/source-list-map-2.0.1.tgz#3993bd873bfc48479cca9ea3a547835c7c154b34" + +source-map-resolve@^0.5.0: + version "0.5.2" + resolved "https://registry.yarnpkg.com/source-map-resolve/-/source-map-resolve-0.5.2.tgz#72e2cc34095543e43b2c62b2c4c10d4a9054f259" + dependencies: + atob "^2.1.1" + decode-uri-component "^0.2.0" + resolve-url "^0.2.1" + source-map-url "^0.4.0" + urix "^0.1.0" + +source-map-support@^0.5.6, source-map-support@~0.5.10: + version "0.5.12" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.12.tgz#b4f3b10d51857a5af0138d3ce8003b201613d599" + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + +source-map-url@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.0.tgz#3e935d7ddd73631b97659956d55128e87b5084a3" + +source-map@^0.5.0, source-map@^0.5.3, source-map@^0.5.6: + version "0.5.7" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" + +source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0, source-map@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" + +space-separated-tokens@^1.0.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/space-separated-tokens/-/space-separated-tokens-1.1.3.tgz#bc6500e116d13285a94b59b58c44c7f045fe6124" + +spdx-correct@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.0.tgz#fb83e504445268f154b074e218c87c003cd31df4" + dependencies: + spdx-expression-parse "^3.0.0" + spdx-license-ids "^3.0.0" + +spdx-exceptions@^2.1.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.2.0.tgz#2ea450aee74f2a89bfb94519c07fcd6f41322977" + +spdx-expression-parse@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz#99e119b7a5da00e05491c9fa338b7904823b41d0" + dependencies: + spdx-exceptions "^2.1.0" + spdx-license-ids "^3.0.0" + +spdx-license-ids@^3.0.0: + version "3.0.4" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.4.tgz#75ecd1a88de8c184ef015eafb51b5b48bfd11bb1" + +spdy-transport@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdy-transport/-/spdy-transport-3.0.0.tgz#00d4863a6400ad75df93361a1608605e5dcdcf31" + dependencies: + debug "^4.1.0" + detect-node "^2.0.4" + hpack.js "^2.1.6" + obuf "^1.1.2" + readable-stream "^3.0.6" + wbuf "^1.7.3" + +spdy@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/spdy/-/spdy-4.0.0.tgz#81f222b5a743a329aa12cea6a390e60e9b613c52" + dependencies: + debug "^4.1.0" + handle-thing "^2.0.0" + http-deceiver "^1.2.7" + select-hose "^2.0.0" + spdy-transport "^3.0.0" + +split-string@^3.0.1, split-string@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/split-string/-/split-string-3.1.0.tgz#7cb09dda3a86585705c64b39a6466038682e8fe2" + dependencies: + extend-shallow "^3.0.0" + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + +sshpk@^1.7.0: + version "1.16.1" + resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.16.1.tgz#fb661c0bef29b39db40769ee39fa70093d6f6877" + dependencies: + asn1 "~0.2.3" + assert-plus "^1.0.0" + bcrypt-pbkdf "^1.0.0" + dashdash "^1.12.0" + ecc-jsbn "~0.1.1" + getpass "^0.1.1" + jsbn "~0.1.0" + safer-buffer "^2.0.2" + tweetnacl "~0.14.0" + +ssri@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/ssri/-/ssri-6.0.1.tgz#2a3c41b28dd45b62b63676ecb74001265ae9edd8" + dependencies: + figgy-pudding "^3.5.1" + +stable@^0.1.8: + version "0.1.8" + resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.8.tgz#836eb3c8382fe2936feaf544631017ce7d47a3cf" + +stack-utils@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-1.0.2.tgz#33eba3897788558bebfc2db059dc158ec36cebb8" + +static-extend@^0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/static-extend/-/static-extend-0.1.2.tgz#60809c39cbff55337226fd5e0b520f341f1fb5c6" + dependencies: + define-property "^0.2.5" + object-copy "^0.1.0" + +"statuses@>= 1.4.0 < 2": + version "1.5.0" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c" + +statuses@~1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.4.0.tgz#bb73d446da2796106efcc1b601a253d6c46bd087" + +stealthy-require@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/stealthy-require/-/stealthy-require-1.1.1.tgz#35b09875b4ff49f26a777e509b3090a3226bf24b" + +stream-browserify@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/stream-browserify/-/stream-browserify-2.0.2.tgz#87521d38a44aa7ee91ce1cd2a47df0cb49dd660b" + dependencies: + inherits "~2.0.1" + readable-stream "^2.0.2" + +stream-each@^1.1.0: + version "1.2.3" + resolved "https://registry.yarnpkg.com/stream-each/-/stream-each-1.2.3.tgz#ebe27a0c389b04fbcc233642952e10731afa9bae" + dependencies: + end-of-stream "^1.1.0" + stream-shift "^1.0.0" + +stream-http@^2.7.2: + version "2.8.3" + resolved "https://registry.yarnpkg.com/stream-http/-/stream-http-2.8.3.tgz#b2d242469288a5a27ec4fe8933acf623de6514fc" + dependencies: + builtin-status-codes "^3.0.0" + inherits "^2.0.1" + readable-stream "^2.3.6" + to-arraybuffer "^1.0.0" + xtend "^4.0.0" + +stream-shift@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/stream-shift/-/stream-shift-1.0.0.tgz#d5c752825e5367e786f78e18e445ea223a155952" + +string-convert@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/string-convert/-/string-convert-0.2.1.tgz#6982cc3049fbb4cd85f8b24568b9d9bf39eeff97" + +string-length@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/string-length/-/string-length-2.0.0.tgz#d40dbb686a3ace960c1cffca562bf2c45f8363ed" + dependencies: + astral-regex "^1.0.0" + strip-ansi "^4.0.0" + +string-width@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" + dependencies: + code-point-at "^1.0.0" + is-fullwidth-code-point "^1.0.0" + strip-ansi "^3.0.0" + +"string-width@^1.0.2 || 2", string-width@^2.0.0, string-width@^2.1.0, string-width@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" + dependencies: + is-fullwidth-code-point "^2.0.0" + strip-ansi "^4.0.0" + +string-width@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-3.1.0.tgz#22767be21b62af1081574306f69ac51b62203961" + dependencies: + emoji-regex "^7.0.1" + is-fullwidth-code-point "^2.0.0" + strip-ansi "^5.1.0" + +string_decoder@^1.0.0, string_decoder@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.2.0.tgz#fe86e738b19544afe70469243b2a1ee9240eae8d" + dependencies: + safe-buffer "~5.1.0" + +string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" + dependencies: + safe-buffer "~5.1.0" + +stringify-object@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/stringify-object/-/stringify-object-3.3.0.tgz#703065aefca19300d3ce88af4f5b3956d7556629" + dependencies: + get-own-enumerable-property-symbols "^3.0.0" + is-obj "^1.0.1" + is-regexp "^1.0.0" + +strip-ansi@5.2.0, strip-ansi@^5.0.0, strip-ansi@^5.1.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae" + dependencies: + ansi-regex "^4.1.0" + +strip-ansi@^3.0.0, strip-ansi@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" + dependencies: + ansi-regex "^2.0.0" + +strip-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" + dependencies: + ansi-regex "^3.0.0" + +strip-bom@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3" + +strip-comments@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/strip-comments/-/strip-comments-1.0.2.tgz#82b9c45e7f05873bee53f37168af930aa368679d" + dependencies: + babel-extract-comments "^1.0.0" + babel-plugin-transform-object-rest-spread "^6.26.0" + +strip-eof@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf" + +strip-json-comments@^2.0.1, strip-json-comments@~2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" + +style-loader@0.23.1: + version "0.23.1" + resolved "https://registry.yarnpkg.com/style-loader/-/style-loader-0.23.1.tgz#cb9154606f3e771ab6c4ab637026a1049174d925" + dependencies: + loader-utils "^1.1.0" + schema-utils "^1.0.0" + +stylehacks@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/stylehacks/-/stylehacks-4.0.3.tgz#6718fcaf4d1e07d8a1318690881e8d96726a71d5" + dependencies: + browserslist "^4.0.0" + postcss "^7.0.0" + postcss-selector-parser "^3.0.0" + +supports-color@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" + +supports-color@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" + dependencies: + has-flag "^3.0.0" + +supports-color@^6.0.0, supports-color@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-6.1.0.tgz#0764abc69c63d5ac842dd4867e8d025e880df8f3" + dependencies: + has-flag "^3.0.0" + +svgo@^1.0.0, svgo@^1.2.1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/svgo/-/svgo-1.2.2.tgz#0253d34eccf2aed4ad4f283e11ee75198f9d7316" + dependencies: + chalk "^2.4.1" + coa "^2.0.2" + css-select "^2.0.0" + css-select-base-adapter "^0.1.1" + css-tree "1.0.0-alpha.28" + css-url-regex "^1.1.0" + csso "^3.5.1" + js-yaml "^3.13.1" + mkdirp "~0.5.1" + object.values "^1.1.0" + sax "~1.2.4" + stable "^0.1.8" + unquote "~1.1.1" + util.promisify "~1.0.0" + +symbol-tree@^3.2.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/symbol-tree/-/symbol-tree-3.2.2.tgz#ae27db38f660a7ae2e1c3b7d1bc290819b8519e6" + +table@^5.2.3: + version "5.2.3" + resolved "https://registry.yarnpkg.com/table/-/table-5.2.3.tgz#cde0cc6eb06751c009efab27e8c820ca5b67b7f2" + dependencies: + ajv "^6.9.1" + lodash "^4.17.11" + slice-ansi "^2.1.0" + string-width "^3.0.0" + +tapable@^1.0.0, tapable@^1.1.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/tapable/-/tapable-1.1.3.tgz#a1fccc06b58db61fd7a45da2da44f5f3a3e67ba2" + +tar@^4: + version "4.4.8" + resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.8.tgz#b19eec3fde2a96e64666df9fdb40c5ca1bc3747d" + dependencies: + chownr "^1.1.1" + fs-minipass "^1.2.5" + minipass "^2.3.4" + minizlib "^1.1.1" + mkdirp "^0.5.0" + safe-buffer "^5.1.2" + yallist "^3.0.2" + +terser-webpack-plugin@1.2.3, terser-webpack-plugin@^1.1.0: + version "1.2.3" + resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-1.2.3.tgz#3f98bc902fac3e5d0de730869f50668561262ec8" + dependencies: + cacache "^11.0.2" + find-cache-dir "^2.0.0" + schema-utils "^1.0.0" + serialize-javascript "^1.4.0" + source-map "^0.6.1" + terser "^3.16.1" + webpack-sources "^1.1.0" + worker-farm "^1.5.2" + +terser@^3.16.1: + version "3.17.0" + resolved "https://registry.yarnpkg.com/terser/-/terser-3.17.0.tgz#f88ffbeda0deb5637f9d24b0da66f4e15ab10cb2" + dependencies: + commander "^2.19.0" + source-map "~0.6.1" + source-map-support "~0.5.10" + +test-exclude@^5.2.2: + version "5.2.2" + resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-5.2.2.tgz#7322f8ab037b0b93ad2aab35fe9068baf997a4c4" + dependencies: + glob "^7.1.3" + minimatch "^3.0.4" + read-pkg-up "^4.0.0" + require-main-filename "^2.0.0" + +text-table@0.2.0, text-table@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" + +throat@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/throat/-/throat-4.1.0.tgz#89037cbc92c56ab18926e6ba4cbb200e15672a6a" + +through2@^2.0.0: + version "2.0.5" + resolved "https://registry.yarnpkg.com/through2/-/through2-2.0.5.tgz#01c1e39eb31d07cb7d03a96a70823260b23132cd" + dependencies: + readable-stream "~2.3.6" + xtend "~4.0.1" + +through@^2.3.6: + version "2.3.8" + resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" + +thunky@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/thunky/-/thunky-1.0.3.tgz#f5df732453407b09191dae73e2a8cc73f381a826" + +timers-browserify@^2.0.4: + version "2.0.10" + resolved "https://registry.yarnpkg.com/timers-browserify/-/timers-browserify-2.0.10.tgz#1d28e3d2aadf1d5a5996c4e9f95601cd053480ae" + dependencies: + setimmediate "^1.0.4" + +timsort@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/timsort/-/timsort-0.3.0.tgz#405411a8e7e6339fe64db9a234de11dc31e02bd4" + +tiny-invariant@^1.0.2: + version "1.0.4" + resolved "https://registry.yarnpkg.com/tiny-invariant/-/tiny-invariant-1.0.4.tgz#346b5415fd93cb696b0c4e8a96697ff590f92463" + +tiny-warning@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/tiny-warning/-/tiny-warning-1.0.2.tgz#1dfae771ee1a04396bdfde27a3adcebc6b648b28" + +tinycolor2@^1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/tinycolor2/-/tinycolor2-1.4.1.tgz#f4fad333447bc0b07d4dc8e9209d8f39a8ac77e8" + +tmp@^0.0.33: + version "0.0.33" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" + dependencies: + os-tmpdir "~1.0.2" + +tmpl@1.0.x: + version "1.0.4" + resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.4.tgz#23640dd7b42d00433911140820e5cf440e521dd1" + +to-arraybuffer@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz#7d229b1fcc637e466ca081180836a7aabff83f43" + +to-fast-properties@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" + +to-object-path@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/to-object-path/-/to-object-path-0.3.0.tgz#297588b7b0e7e0ac08e04e672f85c1f4999e17af" + dependencies: + kind-of "^3.0.2" + +to-regex-range@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-2.1.1.tgz#7c80c17b9dfebe599e27367e0d4dd5590141db38" + dependencies: + is-number "^3.0.0" + repeat-string "^1.6.1" + +to-regex@^3.0.1, to-regex@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/to-regex/-/to-regex-3.0.2.tgz#13cfdd9b336552f30b51f33a8ae1b42a7a7599ce" + dependencies: + define-property "^2.0.2" + extend-shallow "^3.0.2" + regex-not "^1.0.2" + safe-regex "^1.1.0" + +toggle-selection@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/toggle-selection/-/toggle-selection-1.0.6.tgz#6e45b1263f2017fa0acc7d89d78b15b8bf77da32" + +topo@3.x.x: + version "3.0.3" + resolved "https://registry.yarnpkg.com/topo/-/topo-3.0.3.tgz#d5a67fb2e69307ebeeb08402ec2a2a6f5f7ad95c" + dependencies: + hoek "6.x.x" + +tough-cookie@^2.3.3, tough-cookie@^2.3.4, tough-cookie@^2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2" + dependencies: + psl "^1.1.28" + punycode "^2.1.1" + +tough-cookie@~2.4.3: + version "2.4.3" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.4.3.tgz#53f36da3f47783b0925afa06ff9f3b165280f781" + dependencies: + psl "^1.1.24" + punycode "^1.4.1" + +tr46@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/tr46/-/tr46-1.0.1.tgz#a8b13fd6bfd2489519674ccde55ba3693b706d09" + dependencies: + punycode "^2.1.0" + +trim-right@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003" + +trough@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.3.tgz#e29bd1614c6458d44869fc28b255ab7857ef7c24" + +ts-pnp@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/ts-pnp/-/ts-pnp-1.1.2.tgz#be8e4bfce5d00f0f58e0666a82260c34a57af552" + +tslib@^1.8.1, tslib@^1.9.0: + version "1.9.3" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.9.3.tgz#d7e4dd79245d85428c4d7e4822a79917954ca286" + +tsutils@^3.7.0: + version "3.10.0" + resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-3.10.0.tgz#6f1c95c94606e098592b0dff06590cf9659227d6" + dependencies: + tslib "^1.8.1" + +tty-browserify@0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/tty-browserify/-/tty-browserify-0.0.0.tgz#a157ba402da24e9bf957f9aa69d524eed42901a6" + +tunnel-agent@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" + dependencies: + safe-buffer "^5.0.1" + +tweetnacl@^0.14.3, tweetnacl@~0.14.0: + version "0.14.5" + resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" + +type-check@~0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" + dependencies: + prelude-ls "~1.1.2" + +type-is@~1.6.16: + version "1.6.16" + resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.16.tgz#f89ce341541c672b25ee7ae3c73dee3b2be50194" + dependencies: + media-typer "0.3.0" + mime-types "~2.1.18" + +typedarray@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" + +typescript@3.4.5: + version "3.4.5" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-3.4.5.tgz#2d2618d10bb566572b8d7aad5180d84257d70a99" + +ua-parser-js@^0.7.18: + version "0.7.19" + resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.19.tgz#94151be4c0a7fb1d001af7022fdaca4642659e4b" + +uglify-js@3.4.x: + version "3.4.10" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.4.10.tgz#9ad9563d8eb3acdfb8d38597d2af1d815f6a755f" + dependencies: + commander "~2.19.0" + source-map "~0.6.1" + +uglify-js@^3.1.4: + version "3.5.6" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.5.6.tgz#8a5f8a06ee7415ac1fa302f4623bc7344b553da4" + dependencies: + commander "~2.20.0" + source-map "~0.6.1" + +unicode-canonical-property-names-ecmascript@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz#2619800c4c825800efdd8343af7dd9933cbe2818" + +unicode-match-property-ecmascript@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz#8ed2a32569961bce9227d09cd3ffbb8fed5f020c" + dependencies: + unicode-canonical-property-names-ecmascript "^1.0.4" + unicode-property-aliases-ecmascript "^1.0.4" + +unicode-match-property-value-ecmascript@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.1.0.tgz#5b4b426e08d13a80365e0d657ac7a6c1ec46a277" + +unicode-property-aliases-ecmascript@^1.0.4: + version "1.0.5" + resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.0.5.tgz#a9cc6cc7ce63a0a3023fc99e341b94431d405a57" + +unified@^7.1.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/unified/-/unified-7.1.0.tgz#5032f1c1ee3364bd09da12e27fdd4a7553c7be13" + dependencies: + "@types/unist" "^2.0.0" + "@types/vfile" "^3.0.0" + bail "^1.0.0" + extend "^3.0.0" + is-plain-obj "^1.1.0" + trough "^1.0.0" + vfile "^3.0.0" + x-is-string "^0.1.0" + +union-value@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/union-value/-/union-value-1.0.0.tgz#5c71c34cb5bad5dcebe3ea0cd08207ba5aa1aea4" + dependencies: + arr-union "^3.1.0" + get-value "^2.0.6" + is-extendable "^0.1.1" + set-value "^0.4.3" + +uniq@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/uniq/-/uniq-1.0.1.tgz#b31c5ae8254844a3a8281541ce2b04b865a734ff" + +uniqs@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/uniqs/-/uniqs-2.0.0.tgz#ffede4b36b25290696e6e165d4a59edb998e6b02" + +unique-filename@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-1.1.1.tgz#1d69769369ada0583103a1e6ae87681b56573230" + dependencies: + unique-slug "^2.0.0" + +unique-slug@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/unique-slug/-/unique-slug-2.0.1.tgz#5e9edc6d1ce8fb264db18a507ef9bd8544451ca6" + dependencies: + imurmurhash "^0.1.4" + +unist-util-stringify-position@^1.0.0, unist-util-stringify-position@^1.1.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-1.1.2.tgz#3f37fcf351279dcbca7480ab5889bb8a832ee1c6" + +unist-util-stringify-position@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-2.0.0.tgz#4c452c0dbcbc509f7bcd366e9a8afd646f9d51ae" + dependencies: + "@types/unist" "^2.0.2" + +universalify@^0.1.0: + version "0.1.2" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66" + +unpipe@1.0.0, unpipe@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" + +unquote@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/unquote/-/unquote-1.1.1.tgz#8fded7324ec6e88a0ff8b905e7c098cdc086d544" + +unset-value@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unset-value/-/unset-value-1.0.0.tgz#8376873f7d2335179ffb1e6fc3a8ed0dfc8ab559" + dependencies: + has-value "^0.3.1" + isobject "^3.0.0" + +upath@^1.1.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/upath/-/upath-1.1.2.tgz#3db658600edaeeccbe6db5e684d67ee8c2acd068" + +upper-case@^1.1.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/upper-case/-/upper-case-1.1.3.tgz#f6b4501c2ec4cdd26ba78be7222961de77621598" + +uri-js@^4.2.2: + version "4.2.2" + resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.2.2.tgz#94c540e1ff772956e2299507c010aea6c8838eb0" + dependencies: + punycode "^2.1.0" + +urix@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72" + +url-loader@1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/url-loader/-/url-loader-1.1.2.tgz#b971d191b83af693c5e3fea4064be9e1f2d7f8d8" + dependencies: + loader-utils "^1.1.0" + mime "^2.0.3" + schema-utils "^1.0.0" + +url-parse@^1.4.3: + version "1.4.6" + resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.4.6.tgz#baf91d6e6783c8a795eb476892ffef2737fc0456" + dependencies: + querystringify "^2.0.0" + requires-port "^1.0.0" + +url@^0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/url/-/url-0.11.0.tgz#3838e97cfc60521eb73c525a8e55bfdd9e2e28f1" + dependencies: + punycode "1.3.2" + querystring "0.2.0" + +use@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/use/-/use-3.1.1.tgz#d50c8cac79a19fbc20f2911f56eb973f4e10070f" + +util-deprecate@^1.0.1, util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + +util.promisify@1.0.0, util.promisify@^1.0.0, util.promisify@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/util.promisify/-/util.promisify-1.0.0.tgz#440f7165a459c9a16dc145eb8e72f35687097030" + dependencies: + define-properties "^1.1.2" + object.getownpropertydescriptors "^2.0.3" + +util@0.10.3: + version "0.10.3" + resolved "https://registry.yarnpkg.com/util/-/util-0.10.3.tgz#7afb1afe50805246489e3db7fe0ed379336ac0f9" + dependencies: + inherits "2.0.1" + +util@^0.11.0: + version "0.11.1" + resolved "https://registry.yarnpkg.com/util/-/util-0.11.1.tgz#3236733720ec64bb27f6e26f421aaa2e1b588d61" + dependencies: + inherits "2.0.3" + +utila@^0.4.0, utila@~0.4: + version "0.4.0" + resolved "https://registry.yarnpkg.com/utila/-/utila-0.4.0.tgz#8a16a05d445657a3aea5eecc5b12a4fa5379772c" + +utils-merge@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" + +uuid@^3.0.1, uuid@^3.3.2: + version "3.3.2" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.2.tgz#1b4af4955eb3077c501c23872fc6513811587131" + +validate-npm-package-license@^3.0.1: + version "3.0.4" + resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a" + dependencies: + spdx-correct "^3.0.0" + spdx-expression-parse "^3.0.0" + +value-equal@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/value-equal/-/value-equal-0.4.0.tgz#c5bdd2f54ee093c04839d71ce2e4758a6890abc7" + +vary@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" + +vendors@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/vendors/-/vendors-1.0.2.tgz#7fcb5eef9f5623b156bcea89ec37d63676f21801" + +verror@1.10.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" + dependencies: + assert-plus "^1.0.0" + core-util-is "1.0.2" + extsprintf "^1.2.0" + +vfile-message@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-1.1.1.tgz#5833ae078a1dfa2d96e9647886cd32993ab313e1" + dependencies: + unist-util-stringify-position "^1.1.1" + +vfile-message@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-2.0.0.tgz#750bbb86fe545988a67e899b329bbcabb73edef6" + dependencies: + "@types/unist" "^2.0.2" + unist-util-stringify-position "^1.1.1" + +vfile@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/vfile/-/vfile-3.0.1.tgz#47331d2abe3282424f4a4bb6acd20a44c4121803" + dependencies: + is-buffer "^2.0.0" + replace-ext "1.0.0" + unist-util-stringify-position "^1.0.0" + vfile-message "^1.0.0" + +vfile@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/vfile/-/vfile-4.0.0.tgz#ebf3b48af9fcde524d5e08d5f75812058a5f78ad" + dependencies: + "@types/unist" "^2.0.2" + is-buffer "^2.0.0" + replace-ext "1.0.0" + unist-util-stringify-position "^2.0.0" + vfile-message "^2.0.0" + +vm-browserify@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/vm-browserify/-/vm-browserify-0.0.4.tgz#5d7ea45bbef9e4a6ff65f95438e0a87c357d5a73" + dependencies: + indexof "0.0.1" + +w3c-hr-time@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/w3c-hr-time/-/w3c-hr-time-1.0.1.tgz#82ac2bff63d950ea9e3189a58a65625fedf19045" + dependencies: + browser-process-hrtime "^0.1.2" + +w3c-xmlserializer@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/w3c-xmlserializer/-/w3c-xmlserializer-1.1.2.tgz#30485ca7d70a6fd052420a3d12fd90e6339ce794" + dependencies: + domexception "^1.0.1" + webidl-conversions "^4.0.2" + xml-name-validator "^3.0.0" + +walker@^1.0.7, walker@~1.0.5: + version "1.0.7" + resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.7.tgz#2f7f9b8fd10d677262b18a884e28d19618e028fb" + dependencies: + makeerror "1.0.x" + +warning@4.x, warning@^4.0.1, warning@^4.0.2, warning@^4.0.3, warning@~4.0.2: + version "4.0.3" + resolved "https://registry.yarnpkg.com/warning/-/warning-4.0.3.tgz#16e9e077eb8a86d6af7d64aa1e05fd85b4678ca3" + dependencies: + loose-envify "^1.0.0" + +warning@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/warning/-/warning-3.0.0.tgz#32e5377cb572de4ab04753bdf8821c01ed605b7c" + dependencies: + loose-envify "^1.0.0" + +watchpack@^1.5.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-1.6.0.tgz#4bc12c2ebe8aa277a71f1d3f14d685c7b446cd00" + dependencies: + chokidar "^2.0.2" + graceful-fs "^4.1.2" + neo-async "^2.5.0" + +wbuf@^1.1.0, wbuf@^1.7.3: + version "1.7.3" + resolved "https://registry.yarnpkg.com/wbuf/-/wbuf-1.7.3.tgz#c1d8d149316d3ea852848895cb6a0bfe887b87df" + dependencies: + minimalistic-assert "^1.0.0" + +web-namespaces@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/web-namespaces/-/web-namespaces-1.1.2.tgz#c8dc267ab639505276bae19e129dbd6ae72b22b4" + +webidl-conversions@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-4.0.2.tgz#a855980b1f0b6b359ba1d5d9fb39ae941faa63ad" + +webpack-dev-middleware@^3.5.1: + version "3.6.2" + resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-3.6.2.tgz#f37a27ad7c09cd7dc67cd97655413abaa1f55942" + dependencies: + memory-fs "^0.4.1" + mime "^2.3.1" + range-parser "^1.0.3" + webpack-log "^2.0.0" + +webpack-dev-server@3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-3.2.1.tgz#1b45ce3ecfc55b6ebe5e36dab2777c02bc508c4e" + dependencies: + ansi-html "0.0.7" + bonjour "^3.5.0" + chokidar "^2.0.0" + compression "^1.5.2" + connect-history-api-fallback "^1.3.0" + debug "^4.1.1" + del "^3.0.0" + express "^4.16.2" + html-entities "^1.2.0" + http-proxy-middleware "^0.19.1" + import-local "^2.0.0" + internal-ip "^4.2.0" + ip "^1.1.5" + killable "^1.0.0" + loglevel "^1.4.1" + opn "^5.1.0" + portfinder "^1.0.9" + schema-utils "^1.0.0" + selfsigned "^1.9.1" + semver "^5.6.0" + serve-index "^1.7.2" + sockjs "0.3.19" + sockjs-client "1.3.0" + spdy "^4.0.0" + strip-ansi "^3.0.0" + supports-color "^6.1.0" + url "^0.11.0" + webpack-dev-middleware "^3.5.1" + webpack-log "^2.0.0" + yargs "12.0.2" + +webpack-log@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/webpack-log/-/webpack-log-2.0.0.tgz#5b7928e0637593f119d32f6227c1e0ac31e1b47f" + dependencies: + ansi-colors "^3.0.0" + uuid "^3.3.2" + +webpack-manifest-plugin@2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/webpack-manifest-plugin/-/webpack-manifest-plugin-2.0.4.tgz#e4ca2999b09557716b8ba4475fb79fab5986f0cd" + dependencies: + fs-extra "^7.0.0" + lodash ">=3.5 <5" + tapable "^1.0.0" + +webpack-sources@^1.1.0, webpack-sources@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-1.3.0.tgz#2a28dcb9f1f45fe960d8f1493252b5ee6530fa85" + dependencies: + source-list-map "^2.0.0" + source-map "~0.6.1" + +webpack@4.29.6: + version "4.29.6" + resolved "https://registry.yarnpkg.com/webpack/-/webpack-4.29.6.tgz#66bf0ec8beee4d469f8b598d3988ff9d8d90e955" + dependencies: + "@webassemblyjs/ast" "1.8.5" + "@webassemblyjs/helper-module-context" "1.8.5" + "@webassemblyjs/wasm-edit" "1.8.5" + "@webassemblyjs/wasm-parser" "1.8.5" + acorn "^6.0.5" + acorn-dynamic-import "^4.0.0" + ajv "^6.1.0" + ajv-keywords "^3.1.0" + chrome-trace-event "^1.0.0" + enhanced-resolve "^4.1.0" + eslint-scope "^4.0.0" + json-parse-better-errors "^1.0.2" + loader-runner "^2.3.0" + loader-utils "^1.1.0" + memory-fs "~0.4.1" + micromatch "^3.1.8" + mkdirp "~0.5.0" + neo-async "^2.5.0" + node-libs-browser "^2.0.0" + schema-utils "^1.0.0" + tapable "^1.1.0" + terser-webpack-plugin "^1.1.0" + watchpack "^1.5.0" + webpack-sources "^1.3.0" + +websocket-driver@>=0.5.1: + version "0.7.0" + resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.7.0.tgz#0caf9d2d755d93aee049d4bdd0d3fe2cca2a24eb" + dependencies: + http-parser-js ">=0.4.0" + websocket-extensions ">=0.1.1" + +websocket-extensions@>=0.1.1: + version "0.1.3" + resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.3.tgz#5d2ff22977003ec687a4b87073dfbbac146ccf29" + +whatwg-encoding@^1.0.1, whatwg-encoding@^1.0.3, whatwg-encoding@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-1.0.5.tgz#5abacf777c32166a51d085d6b4f3e7d27113ddb0" + dependencies: + iconv-lite "0.4.24" + +whatwg-fetch@3.0.0, whatwg-fetch@>=0.10.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-3.0.0.tgz#fc804e458cc460009b1a2b966bc8817d2578aefb" + +whatwg-mimetype@^2.1.0, whatwg-mimetype@^2.2.0, whatwg-mimetype@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz#3d4b1e0312d2079879f826aff18dbeeca5960fbf" + +whatwg-url@^6.4.1: + version "6.5.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-6.5.0.tgz#f2df02bff176fd65070df74ad5ccbb5a199965a8" + dependencies: + lodash.sortby "^4.7.0" + tr46 "^1.0.1" + webidl-conversions "^4.0.2" + +whatwg-url@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-7.0.0.tgz#fde926fa54a599f3adf82dff25a9f7be02dc6edd" + dependencies: + lodash.sortby "^4.7.0" + tr46 "^1.0.1" + webidl-conversions "^4.0.2" + +which-module@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" + +which@^1.2.9, which@^1.3.0, which@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" + dependencies: + isexe "^2.0.0" + +wide-align@^1.1.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" + dependencies: + string-width "^1.0.2 || 2" + +wordwrap@~0.0.2: + version "0.0.3" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107" + +wordwrap@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" + +workbox-background-sync@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-background-sync/-/workbox-background-sync-4.3.0.tgz#56d6da78c6813fcf5671327cf732265c02c8677e" + dependencies: + workbox-core "^4.3.0" + +workbox-broadcast-update@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-broadcast-update/-/workbox-broadcast-update-4.3.0.tgz#67d4e0fbafbc79a9707d72dda0f99769e9e545bb" + dependencies: + workbox-core "^4.3.0" + +workbox-build@^4.2.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-build/-/workbox-build-4.3.0.tgz#281fead712ecd2dce3a4c293b107e8ec46c21b98" + dependencies: + "@babel/runtime" "^7.3.4" + common-tags "^1.8.0" + fs-extra "^4.0.2" + glob "^7.1.3" + joi "^14.3.1" + lodash.template "^4.4.0" + pretty-bytes "^5.1.0" + stringify-object "^3.3.0" + strip-comments "^1.0.2" + workbox-background-sync "^4.3.0" + workbox-broadcast-update "^4.3.0" + workbox-cacheable-response "^4.3.0" + workbox-core "^4.3.0" + workbox-expiration "^4.3.0" + workbox-google-analytics "^4.3.0" + workbox-navigation-preload "^4.3.0" + workbox-precaching "^4.3.0" + workbox-range-requests "^4.3.0" + workbox-routing "^4.3.0" + workbox-strategies "^4.3.0" + workbox-streams "^4.3.0" + workbox-sw "^4.3.0" + workbox-window "^4.3.0" + +workbox-cacheable-response@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-cacheable-response/-/workbox-cacheable-response-4.3.0.tgz#132c1cbb15c36d24d502b1c768558e46c912e7f3" + dependencies: + workbox-core "^4.3.0" + +workbox-core@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-core/-/workbox-core-4.3.0.tgz#1981c98af8e7da6d16dc8837c4f736f8167cc75d" + +workbox-expiration@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-expiration/-/workbox-expiration-4.3.0.tgz#aaa1cc8ab21134f2f46eeb447b0532b56e86330d" + dependencies: + workbox-core "^4.3.0" + +workbox-google-analytics@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-google-analytics/-/workbox-google-analytics-4.3.0.tgz#ef47cb9fe959bfbc16bd123210ff88891c4afeeb" + dependencies: + workbox-background-sync "^4.3.0" + workbox-core "^4.3.0" + workbox-routing "^4.3.0" + workbox-strategies "^4.3.0" + +workbox-navigation-preload@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-navigation-preload/-/workbox-navigation-preload-4.3.0.tgz#b4d32404921b10cb4a916a90e660ffc76877b09f" + dependencies: + workbox-core "^4.3.0" + +workbox-precaching@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-precaching/-/workbox-precaching-4.3.0.tgz#1aa386fb81bbb7cc1e0c484f659e5ac9f6820005" + dependencies: + workbox-core "^4.3.0" + +workbox-range-requests@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-range-requests/-/workbox-range-requests-4.3.0.tgz#ae25e33918701ffa2c5dd2d86bdfa1cf779527e8" + dependencies: + workbox-core "^4.3.0" + +workbox-routing@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-routing/-/workbox-routing-4.3.0.tgz#6bf838d0dc5be43cb5bcba9010874971ab642067" + dependencies: + workbox-core "^4.3.0" + +workbox-strategies@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-strategies/-/workbox-strategies-4.3.0.tgz#96eeb2d39e99d549d914e017cbe2bfd0a0d1a8b4" + dependencies: + workbox-core "^4.3.0" + +workbox-streams@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-streams/-/workbox-streams-4.3.0.tgz#836e27e302b90167da2ca47cd261eeb3369fb9ff" + dependencies: + workbox-core "^4.3.0" + +workbox-sw@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-sw/-/workbox-sw-4.3.0.tgz#07a8b0df4e3a4ad05bb56f5d5686723923738e56" + +workbox-webpack-plugin@4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/workbox-webpack-plugin/-/workbox-webpack-plugin-4.2.0.tgz#c94c3f69ff39c8a5b0c7e6bebc382cb53410a63d" + dependencies: + "@babel/runtime" "^7.0.0" + json-stable-stringify "^1.0.1" + workbox-build "^4.2.0" + +workbox-window@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/workbox-window/-/workbox-window-4.3.0.tgz#22636919ec408292e2a68b7da818f3fe81932d18" + dependencies: + workbox-core "^4.3.0" + +worker-farm@^1.5.2: + version "1.6.0" + resolved "https://registry.yarnpkg.com/worker-farm/-/worker-farm-1.6.0.tgz#aecc405976fab5a95526180846f0dba288f3a4a0" + dependencies: + errno "~0.1.7" + +wrap-ansi@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85" + dependencies: + string-width "^1.0.1" + strip-ansi "^3.0.1" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + +write-file-atomic@2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-2.4.1.tgz#d0b05463c188ae804396fd5ab2a370062af87529" + dependencies: + graceful-fs "^4.1.11" + imurmurhash "^0.1.4" + signal-exit "^3.0.2" + +write@1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/write/-/write-1.0.3.tgz#0800e14523b923a387e415123c865616aae0f5c3" + dependencies: + mkdirp "^0.5.1" + +ws@^5.2.0: + version "5.2.2" + resolved "https://registry.yarnpkg.com/ws/-/ws-5.2.2.tgz#dffef14866b8e8dc9133582514d1befaf96e980f" + dependencies: + async-limiter "~1.0.0" + +ws@^6.1.2: + version "6.2.1" + resolved "https://registry.yarnpkg.com/ws/-/ws-6.2.1.tgz#442fdf0a47ed64f59b6a5d8ff130f4748ed524fb" + dependencies: + async-limiter "~1.0.0" + +x-is-string@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/x-is-string/-/x-is-string-0.1.0.tgz#474b50865af3a49a9c4657f05acd145458f77d82" + +xml-name-validator@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-3.0.0.tgz#6ae73e06de4d8c6e47f9fb181f78d648ad457c6a" + +xmlchars@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/xmlchars/-/xmlchars-1.3.1.tgz#1dda035f833dbb4f86a0c28eaa6ca769214793cf" + +xregexp@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/xregexp/-/xregexp-4.0.0.tgz#e698189de49dd2a18cc5687b05e17c8e43943020" + +xtend@^4.0.0, xtend@^4.0.1, xtend@~4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af" + +"y18n@^3.2.1 || ^4.0.0", y18n@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.0.tgz#95ef94f85ecc81d007c264e190a120f0a3c8566b" + +yallist@^3.0.0, yallist@^3.0.2: + version "3.0.3" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.0.3.tgz#b4b049e314be545e3ce802236d6cd22cd91c3de9" + +yargs-parser@^10.1.0: + version "10.1.0" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-10.1.0.tgz#7202265b89f7e9e9f2e5765e0fe735a905edbaa8" + dependencies: + camelcase "^4.1.0" + +yargs-parser@^11.1.1: + version "11.1.1" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-11.1.1.tgz#879a0865973bca9f6bab5cbdf3b1c67ec7d3bcf4" + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" + +yargs@12.0.2: + version "12.0.2" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-12.0.2.tgz#fe58234369392af33ecbef53819171eff0f5aadc" + dependencies: + cliui "^4.0.0" + decamelize "^2.0.0" + find-up "^3.0.0" + get-caller-file "^1.0.1" + os-locale "^3.0.0" + require-directory "^2.1.1" + require-main-filename "^1.0.1" + set-blocking "^2.0.0" + string-width "^2.0.0" + which-module "^2.0.0" + y18n "^3.2.1 || ^4.0.0" + yargs-parser "^10.1.0" + +yargs@^12.0.2: + version "12.0.5" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-12.0.5.tgz#05f5997b609647b64f66b81e3b4b10a368e7ad13" + dependencies: + cliui "^4.0.0" + decamelize "^1.2.0" + find-up "^3.0.0" + get-caller-file "^1.0.1" + os-locale "^3.0.0" + require-directory "^2.1.1" + require-main-filename "^1.0.1" + set-blocking "^2.0.0" + string-width "^2.0.0" + which-module "^2.0.0" + y18n "^3.2.1 || ^4.0.0" + yargs-parser "^11.1.1" diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index f56ce80bf87a6..050d0a665adaa 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -273,6 +273,9 @@ webapps/static/nvd3-1.8.5.min.js.map webapps/static/nvd3-1.8.5.min.js **/dependency-reduced-pom.xml + **/node_modules/** + **/yarn.lock + **/ozone-recon-web/build/** From 1cb2eb0df30d4fbaa090c68022833063f3d225cc Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 20 May 2019 17:22:58 -0700 Subject: [PATCH 0006/1308] HDFS-14353. Erasure Coding: metrics xmitsInProgress become to negative. Contributed by maobaolong. --- .../server/datanode/erasurecode/ErasureCodingWorker.java | 4 ++++ .../datanode/erasurecode/StripedBlockReconstructor.java | 6 +++++- .../server/datanode/erasurecode/StripedReconstructor.java | 4 ++++ .../org/apache/hadoop/hdfs/TestReconstructStripedFile.java | 6 ++++++ 4 files changed, 19 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java index f9063b7a8929f..f4506cf470719 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java @@ -170,4 +170,8 @@ public void shutDown() { stripedReconstructionPool.shutdown(); stripedReadPool.shutdown(); } + + public float getXmitWeight() { + return xmitWeight; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java index 29c0078e95710..1af2380886ac3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java @@ -67,7 +67,11 @@ public void run() { LOG.warn("Failed to reconstruct striped block: {}", getBlockGroup(), e); getDatanode().getMetrics().incrECFailedReconstructionTasks(); } finally { - getDatanode().decrementXmitsInProgress(getXmits()); + float xmitWeight = getErasureCodingWorker().getXmitWeight(); + // if the xmits is smaller than 1, the xmitsSubmitted should be set to 1 + // because if it set to zero, we cannot to measure the xmits submitted + int xmitsSubmitted = Math.max((int) (getXmits() * xmitWeight), 1); + getDatanode().decrementXmitsInProgress(xmitsSubmitted); final DataNodeMetrics metrics = getDatanode().getMetrics(); metrics.incrECReconstructionTasks(); metrics.incrECReconstructionBytesRead(getBytesRead()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java index a1f4c7ff55e37..4c8be827f4354 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java @@ -275,4 +275,8 @@ Configuration getConf() { DataNode getDatanode() { return datanode; } + + public ErasureCodingWorker getErasureCodingWorker() { + return erasureCodingWorker; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index 2abfff7876c13..0b490b5c3bcc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -514,6 +514,8 @@ private void testNNSendsErasureCodingTasks(int deadDN) throws Exception { @Test(timeout = 180000) public void testErasureCodingWorkerXmitsWeight() throws Exception { + testErasureCodingWorkerXmitsWeight(0.5f, + (int) (ecPolicy.getNumDataUnits() * 0.5f)); testErasureCodingWorkerXmitsWeight(1f, ecPolicy.getNumDataUnits()); testErasureCodingWorkerXmitsWeight(0f, 1); testErasureCodingWorkerXmitsWeight(10f, 10 * ecPolicy.getNumDataUnits()); @@ -567,6 +569,10 @@ public void stripedBlockReconstruction() throws IOException { } finally { barrier.await(); DataNodeFaultInjector.set(oldInjector); + for (final DataNode curDn : cluster.getDataNodes()) { + GenericTestUtils.waitFor(() -> curDn.getXceiverCount() > 1, 10, 60000); + assertEquals(0, curDn.getXmitsInProgress()); + } } } } From c1d7d68c78aed3ef800e877c2d3a926a68539f16 Mon Sep 17 00:00:00 2001 From: Lokesh Jain Date: Tue, 21 May 2019 14:47:48 +0530 Subject: [PATCH 0007/1308] HDDS-1461. Optimize listStatus api in OzoneFileSystem (#782) --- .../org/apache/hadoop/utils/db/RDBStore.java | 4 + .../hadoop/ozone/client/OzoneBucket.java | 17 ++ .../ozone/client/protocol/ClientProtocol.java | 19 +- .../hadoop/ozone/client/rest/RestClient.java | 8 + .../hadoop/ozone/client/rpc/RpcClient.java | 13 ++ .../java/org/apache/hadoop/ozone/OmUtils.java | 1 + .../apache/hadoop/ozone/audit/OMAction.java | 3 +- .../hadoop/ozone/om/helpers/OzoneFSUtils.java | 89 ++++++++ .../ozone/om/helpers/OzoneFileStatus.java | 11 +- .../om/protocol/OzoneManagerProtocol.java | 14 ++ ...ManagerProtocolClientSideTranslatorPB.java | 47 +++- .../src/main/proto/OzoneManagerProtocol.proto | 20 +- .../hadoop/ozone/om/KeyManagerImpl.java | 179 +++++++++++---- .../org/apache/hadoop/ozone/om/OMMetrics.java | 17 ++ .../ozone/om/OmMetadataManagerImpl.java | 5 +- .../apache/hadoop/ozone/om/OzoneManager.java | 25 +++ .../hadoop/ozone/om/fs/OzoneManagerFS.java | 4 + .../OzoneManagerRequestHandler.java | 26 +++ .../hadoop/ozone/om/TestKeyManagerImpl.java | 211 ++++++++++++++++++ .../fs/ozone/BasicOzoneClientAdapterImpl.java | 14 ++ .../hadoop/fs/ozone/BasicOzoneFileSystem.java | 150 +++---------- .../hadoop/fs/ozone/OzoneClientAdapter.java | 4 + .../fs/ozone/TestOzoneFileInterfaces.java | 42 +++- .../hadoop/fs/ozone/TestOzoneFileSystem.java | 29 ++- 24 files changed, 773 insertions(+), 179 deletions(-) create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java index 5bb0fa41399ba..07d74c4f465db 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java @@ -298,4 +298,8 @@ public DBCheckpoint getCheckpoint(boolean flush) { public File getDbLocation() { return dbLocation; } + + public CodecRegistry getCodecRegistry() { + return codecRegistry; + } } \ No newline at end of file diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 8b3f8befce8cd..7843ad36038b3 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -528,6 +528,23 @@ public OzoneOutputStream createFile(String keyName, long size, recursive); } + /** + * List the status for a file or a directory and its contents. + * + * @param keyName Absolute path of the entry to be listed + * @param recursive For a directory if true all the descendants of a + * particular directory are listed + * @param startKey Key from which listing needs to start. If startKey exists + * its status is included in the final list. + * @param numEntries Number of entries to list from the start key + * @return list of file status + */ + public List listStatus(String keyName, boolean recursive, + String startKey, long numEntries) throws IOException { + return proxy + .listStatus(volumeName, name, keyName, recursive, startKey, numEntries); + } + /** * An Iterator to iterate over {@link OzoneKey} list. */ diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index cb9cb309e738c..c7607ef53dd30 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -604,4 +604,21 @@ OzoneInputStream readFile(String volumeName, String bucketName, OzoneOutputStream createFile(String volumeName, String bucketName, String keyName, long size, ReplicationType type, ReplicationFactor factor, boolean overWrite, boolean recursive) throws IOException; + + /** + * List the status for a file or a directory and its contents. + * + * @param volumeName Volume name + * @param bucketName Bucket name + * @param keyName Absolute path of the entry to be listed + * @param recursive For a directory if true all the descendants of a + * particular directory are listed + * @param startKey Key from which listing needs to start. If startKey exists + * its status is included in the final list. + * @param numEntries Number of entries to list from the start key + * @return list of file status + */ + List listStatus(String volumeName, String bucketName, + String keyName, boolean recursive, String startKey, long numEntries) + throws IOException; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java index 80e81fea6bb37..2f37713159bc6 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java @@ -1113,4 +1113,12 @@ public OzoneOutputStream createFile(String volumeName, String bucketName, throw new UnsupportedOperationException( "Ozone REST protocol does not " + "support this operation."); } + + @Override + public List listStatus(String volumeName, String bucketName, + String keyName, boolean recursive, String startKey, long numEntries) + throws IOException { + throw new UnsupportedOperationException( + "Ozone REST protocol does not " + "support this operation."); + } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 66d9eac794bf1..ab40c5231a64d 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -993,6 +993,19 @@ public OzoneOutputStream createFile(String volumeName, String bucketName, factor); } + @Override + public List listStatus(String volumeName, String bucketName, + String keyName, boolean recursive, String startKey, long numEntries) + throws IOException { + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .build(); + return ozoneManagerClient + .listStatus(keyArgs, recursive, startKey, numEntries); + } + private OzoneInputStream createInputStream(OmKeyInfo keyInfo, String requestId) throws IOException { LengthInputStream lengthInputStream = KeyInputStream diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 2ceccbb214952..f060735296c0b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -189,6 +189,7 @@ public static boolean isReadOnly( case ListMultiPartUploadParts: case GetFileStatus: case LookupFile: + case ListStatus: return true; case CreateVolume: case SetVolumeProperty: diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java index e1845020c763c..043b9041f5c6a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java @@ -58,7 +58,8 @@ public enum OMAction implements AuditAction { GET_FILE_STATUS, CREATE_DIRECTORY, CREATE_FILE, - LOOKUP_FILE; + LOOKUP_FILE, + LIST_STATUS; @Override public String getAction() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java new file mode 100644 index 0000000000000..07f3194c14b2f --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.helpers; + +import org.apache.hadoop.fs.Path; + +import java.nio.file.Paths; + +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; + +/** + * Utility class for OzoneFileSystem. + */ +public final class OzoneFSUtils { + + private OzoneFSUtils() {} + + /** + * Returns string representation of path after removing the leading slash. + */ + public static String pathToKey(Path path) { + return path.toString().substring(1); + } + + /** + * Returns string representation of the input path parent. The function adds + * a trailing slash if it does not exist and returns an empty string if the + * parent is root. + */ + public static String getParent(String keyName) { + java.nio.file.Path parentDir = Paths.get(keyName).getParent(); + if (parentDir == null) { + return ""; + } + return addTrailingSlashIfNeeded(parentDir.toString()); + } + + /** + * The function returns immediate child of given ancestor in a particular + * descendant. For example if ancestor is /a/b and descendant is /a/b/c/d/e + * the function should return /a/b/c/. If the descendant itself is the + * immediate child then it is returned as is without adding a trailing slash. + * This is done to distinguish files from a directory as in ozone files do + * not carry a trailing slash. + */ + public static String getImmediateChild(String descendant, String ancestor) { + ancestor = + !ancestor.isEmpty() ? addTrailingSlashIfNeeded(ancestor) : ancestor; + if (!descendant.startsWith(ancestor)) { + return null; + } + java.nio.file.Path descendantPath = Paths.get(descendant); + java.nio.file.Path ancestorPath = Paths.get(ancestor); + int ancestorPathNameCount = + ancestor.isEmpty() ? 0 : ancestorPath.getNameCount(); + if (descendantPath.getNameCount() - ancestorPathNameCount > 1) { + return addTrailingSlashIfNeeded( + ancestor + descendantPath.getName(ancestorPathNameCount)); + } + return descendant; + } + + public static String addTrailingSlashIfNeeded(String key) { + if (!key.endsWith(OZONE_URI_DELIMITER)) { + return key + OZONE_URI_DELIMITER; + } else { + return key; + } + } + + public static boolean isFile(String keyName) { + return !keyName.endsWith(OZONE_URI_DELIMITER); + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java index 462463d1c8115..871794651214b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java @@ -18,11 +18,11 @@ package org.apache.hadoop.ozone.om.helpers; -import org.apache.hadoop.fs.FSProtos.FileStatusProto; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.protocolPB.PBHelper; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto; import java.io.IOException; import java.net.URI; @@ -53,13 +53,14 @@ public OzoneFileStatus(String keyName) { super(0, true, 0, 0, 0, getPath(keyName)); } - public FileStatusProto getProtobuf() throws IOException { - return PBHelper.convert(this); + public OzoneFileStatusProto getProtobuf() throws IOException { + return OzoneFileStatusProto.newBuilder().setStatus(PBHelper.convert(this)) + .build(); } - public static OzoneFileStatus getFromProtobuf(FileStatusProto response) + public static OzoneFileStatus getFromProtobuf(OzoneFileStatusProto response) throws IOException { - return new OzoneFileStatus(PBHelper.convert(response)); + return new OzoneFileStatus(PBHelper.convert(response.getStatus())); } public static Path getPath(String keyName) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index b3dc9c8526485..0a7d6fd0ada74 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -450,5 +450,19 @@ OpenKeySession createFile(OmKeyArgs keyArgs, boolean overWrite, * invalid arguments */ OmKeyInfo lookupFile(OmKeyArgs keyArgs) throws IOException; + + /** + * List the status for a file or a directory and its contents. + * + * @param keyArgs Key args + * @param recursive For a directory if true all the descendants of a + * particular directory are listed + * @param startKey Key from which listing needs to start. If startKey exists + * its status is included in the final list. + * @param numEntries Number of entries to list from the start key + * @return list of file status + */ + List listStatus(OmKeyArgs keyArgs, boolean recursive, + String startKey, long numEntries) throws IOException; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 7e3b33f5c74cf..48d19aea89ea0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -55,7 +55,13 @@ import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusRequest; @@ -1281,14 +1287,13 @@ public OmKeyInfo lookupFile(OmKeyArgs args) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) .build(); - OzoneManagerProtocolProtos.LookupFileRequest lookupFileRequest = - OzoneManagerProtocolProtos.LookupFileRequest.newBuilder() + LookupFileRequest lookupFileRequest = LookupFileRequest.newBuilder() .setKeyArgs(keyArgs) .build(); OMRequest omRequest = createOMRequest(Type.LookupFile) .setLookupFileRequest(lookupFileRequest) .build(); - OzoneManagerProtocolProtos.LookupFileResponse resp = + LookupFileResponse resp = handleError(submitRequest(omRequest)).getLookupFileResponse(); return OmKeyInfo.getFromProtobuf(resp.getKeyInfo()); } @@ -1304,8 +1309,7 @@ public OpenKeySession createFile(OmKeyArgs args, .setType(args.getType()) .setFactor(args.getFactor()) .build(); - OzoneManagerProtocolProtos.CreateFileRequest createFileRequest = - OzoneManagerProtocolProtos.CreateFileRequest.newBuilder() + CreateFileRequest createFileRequest = CreateFileRequest.newBuilder() .setKeyArgs(keyArgs) .setIsOverwrite(overWrite) .setIsRecursive(recursive) @@ -1313,9 +1317,38 @@ public OpenKeySession createFile(OmKeyArgs args, OMRequest omRequest = createOMRequest(Type.CreateFile) .setCreateFileRequest(createFileRequest) .build(); - OzoneManagerProtocolProtos.CreateFileResponse resp = + CreateFileResponse resp = handleError(submitRequest(omRequest)).getCreateFileResponse(); return new OpenKeySession(resp.getID(), OmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion()); } + + @Override + public List listStatus(OmKeyArgs args, boolean recursive, + String startKey, long numEntries) throws IOException { + KeyArgs keyArgs = KeyArgs.newBuilder() + .setVolumeName(args.getVolumeName()) + .setBucketName(args.getBucketName()) + .setKeyName(args.getKeyName()) + .build(); + ListStatusRequest listStatusRequest = + ListStatusRequest.newBuilder() + .setKeyArgs(keyArgs) + .setRecursive(recursive) + .setStartKey(startKey) + .setNumEntries(numEntries) + .build(); + OMRequest omRequest = createOMRequest(Type.ListStatus) + .setListStatusRequest(listStatusRequest) + .build(); + ListStatusResponse listStatusResponse = + handleError(submitRequest(omRequest)).getListStatusResponse(); + List statusList = + new ArrayList<>(listStatusResponse.getStatusesCount()); + for (OzoneFileStatusProto fileStatus : listStatusResponse + .getStatusesList()) { + statusList.add(OzoneFileStatus.getFromProtobuf(fileStatus)); + } + return statusList; + } } diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index 0f50a78df147d..50751ed2c4cc3 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -85,6 +85,7 @@ enum Type { CreateDirectory = 71; CreateFile = 72; LookupFile = 73; + ListStatus = 74; } message OMRequest { @@ -141,6 +142,7 @@ message OMRequest { optional CreateDirectoryRequest createDirectoryRequest = 71; optional CreateFileRequest createFileRequest = 72; optional LookupFileRequest lookupFileRequest = 73; + optional ListStatusRequest listStatusRequest = 74; } message OMResponse { @@ -200,6 +202,7 @@ message OMResponse { optional CreateDirectoryResponse createDirectoryResponse = 71; optional CreateFileResponse createFileResponse = 72; optional LookupFileResponse lookupFileResponse = 73; + optional ListStatusResponse listStatusResponse = 74; } enum Status { @@ -561,12 +564,16 @@ message KeyInfo { optional FileEncryptionInfoProto fileEncryptionInfo = 12; } +message OzoneFileStatusProto { + required hadoop.fs.FileStatusProto status = 1; +} + message GetFileStatusRequest { required KeyArgs keyArgs = 1; } message GetFileStatusResponse { - required hadoop.fs.FileStatusProto status = 1; + required OzoneFileStatusProto status = 1; } message CreateDirectoryRequest { @@ -599,6 +606,17 @@ message LookupFileResponse { optional KeyInfo keyInfo = 1; } +message ListStatusRequest { + required KeyArgs keyArgs = 1; + required bool recursive = 2; + required string startKey = 3; + required uint64 numEntries = 4; +} + +message ListStatusResponse { + repeated OzoneFileStatusProto statuses = 1; +} + message CreateKeyRequest { required KeyArgs keyArgs = 1; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index c8df904a5ce60..895a47ae37582 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -32,8 +32,8 @@ import java.security.GeneralSecurityException; import java.security.PrivilegedExceptionAction; +import com.google.common.base.Strings; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; @@ -67,6 +67,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.helpers.OmPartInfo; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -85,6 +86,10 @@ import org.apache.hadoop.utils.BackgroundService; import org.apache.hadoop.utils.db.BatchOperation; import org.apache.hadoop.utils.db.DBStore; +import org.apache.hadoop.utils.db.CodecRegistry; +import org.apache.hadoop.utils.db.RDBStore; +import org.apache.hadoop.utils.db.TableIterator; +import org.apache.hadoop.utils.db.Table; import com.google.common.base.Preconditions; @@ -1360,10 +1365,10 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { // Check if this is the root of the filesystem. if (keyName.length() == 0) { validateBucket(volumeName, bucketName); - return new OzoneFileStatus(keyName); + return new OzoneFileStatus(OZONE_URI_DELIMITER); } - //Check if the key is a file. + // Check if the key is a file. String fileKeyBytes = metadataManager.getOzoneKey( volumeName, bucketName, keyName); OmKeyInfo fileKeyInfo = metadataManager.getKeyTable().get(fileKeyBytes); @@ -1372,7 +1377,7 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { return new OzoneFileStatus(fileKeyInfo, scmBlockSize, false); } - String dirKey = addTrailingSlashIfNeeded(keyName); + String dirKey = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); String dirKeyBytes = metadataManager.getOzoneKey( volumeName, bucketName, dirKey); OmKeyInfo dirKeyInfo = metadataManager.getKeyTable().get(dirKeyBytes); @@ -1390,7 +1395,7 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { " bucket:" + bucketName + " key:" + keyName + " with error no " + "such file exists:"); throw new OMException("Unable to get file status: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, + volumeName + " bucket: " + bucketName + " key: " + keyName, ResultCodes.FILE_NOT_FOUND); } finally { metadataManager.getLock().releaseBucketLock(volumeName, bucketName); @@ -1413,47 +1418,50 @@ public void createDirectory(OmKeyArgs args) throws IOException { String bucketName = args.getBucketName(); String keyName = args.getKeyName(); + metadataManager.getLock().acquireBucketLock(volumeName, bucketName); try { - metadataManager.getLock().acquireBucketLock(volumeName, bucketName); - - // verify bucket exists - OmBucketInfo bucketInfo = getBucketInfo(volumeName, bucketName); // Check if this is the root of the filesystem. if (keyName.length() == 0) { return; } - verifyNoFilesInPath(volumeName, bucketName, Paths.get(keyName), false); - String dir = addTrailingSlashIfNeeded(keyName); - String dirDbKey = - metadataManager.getOzoneKey(volumeName, bucketName, dir); - FileEncryptionInfo encInfo = getFileEncryptionInfo(bucketInfo); + Path keyPath = Paths.get(keyName); + OzoneFileStatus status = + verifyNoFilesInPath(volumeName, bucketName, keyPath, false); + if (status != null && OzoneFSUtils.pathToKey(status.getPath()) + .equals(keyName)) { + // if directory already exists + return; + } OmKeyInfo dirDbKeyInfo = - createDirectoryKeyInfo(volumeName, bucketName, dir, new ArrayList<>(), - ReplicationFactor.ONE, ReplicationType.RATIS, encInfo); + createDirectoryKey(volumeName, bucketName, keyName); + String dirDbKey = metadataManager + .getOzoneKey(volumeName, bucketName, dirDbKeyInfo.getKeyName()); metadataManager.getKeyTable().put(dirDbKey, dirDbKeyInfo); - } finally { metadataManager.getLock().releaseBucketLock(volumeName, bucketName); } } - private OmKeyInfo createDirectoryKeyInfo(String volumeName, String bucketName, - String keyName, List locations, - ReplicationFactor factor, ReplicationType type, - FileEncryptionInfo encInfo) { + private OmKeyInfo createDirectoryKey(String volumeName, String bucketName, + String keyName) throws IOException { + // verify bucket exists + OmBucketInfo bucketInfo = getBucketInfo(volumeName, bucketName); + + String dir = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); + FileEncryptionInfo encInfo = getFileEncryptionInfo(bucketInfo); return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) - .setKeyName(keyName) + .setKeyName(dir) .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, locations))) + new OmKeyLocationInfoGroup(0, new ArrayList<>()))) .setCreationTime(Time.now()) .setModificationTime(Time.now()) .setDataSize(0) - .setReplicationType(type) - .setReplicationFactor(factor) + .setReplicationType(ReplicationType.RATIS) + .setReplicationFactor(ReplicationFactor.ONE) .setFileEncryptionInfo(encInfo) .build(); } @@ -1482,9 +1490,8 @@ public OpenKeySession createFile(OmKeyArgs args, boolean isOverWrite, String keyName = args.getKeyName(); OpenKeySession keySession; + metadataManager.getLock().acquireBucketLock(volumeName, bucketName); try { - metadataManager.getLock().acquireBucketLock(volumeName, bucketName); - OzoneFileStatus fileStatus; try { fileStatus = getFileStatus(args); @@ -1531,8 +1538,8 @@ public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { String bucketName = args.getBucketName(); String keyName = args.getKeyName(); + metadataManager.getLock().acquireBucketLock(volumeName, bucketName); try { - metadataManager.getLock().acquireBucketLock(volumeName, bucketName); OzoneFileStatus fileStatus = getFileStatus(args); if (fileStatus.isFile()) { return fileStatus.getKeyInfo(); @@ -1546,6 +1553,105 @@ public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { ResultCodes.NOT_A_FILE); } + /** + * List the status for a file or a directory and its contents. + * + * @param args Key args + * @param recursive For a directory if true all the descendants of a + * particular directory are listed + * @param startKey Key from which listing needs to start. If startKey exists + * its status is included in the final list. + * @param numEntries Number of entries to list from the start key + * @return list of file status + */ + public List listStatus(OmKeyArgs args, boolean recursive, + String startKey, long numEntries) throws IOException { + Preconditions.checkNotNull(args, "Key args can not be null"); + String volumeName = args.getVolumeName(); + String bucketName = args.getBucketName(); + String keyName = args.getKeyName(); + + List fileStatusList = new ArrayList<>(); + metadataManager.getLock().acquireBucketLock(volumeName, bucketName); + try { + if (Strings.isNullOrEmpty(startKey)) { + OzoneFileStatus fileStatus = getFileStatus(args); + if (fileStatus.isFile()) { + return Collections.singletonList(fileStatus); + } + startKey = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); + } + + String seekKeyInDb = + metadataManager.getOzoneKey(volumeName, bucketName, startKey); + String keyInDb = OzoneFSUtils.addTrailingSlashIfNeeded( + metadataManager.getOzoneKey(volumeName, bucketName, keyName)); + TableIterator> + iterator = metadataManager.getKeyTable().iterator(); + iterator.seek(seekKeyInDb); + + if (!iterator.hasNext()) { + return Collections.emptyList(); + } + + if (iterator.key().equals(keyInDb)) { + // skip the key which needs to be listed + iterator.next(); + } + + while (iterator.hasNext() && numEntries - fileStatusList.size() > 0) { + String entryInDb = iterator.key(); + OmKeyInfo value = iterator.value().getValue(); + if (entryInDb.startsWith(keyInDb)) { + String entryKeyName = value.getKeyName(); + if (recursive) { + // for recursive list all the entries + fileStatusList.add(new OzoneFileStatus(value, scmBlockSize, + !OzoneFSUtils.isFile(entryKeyName))); + iterator.next(); + } else { + // get the child of the directory to list from the entry. For + // example if directory to list is /a and entry is /a/b/c where + // c is a file. The immediate child is b which is a directory. c + // should not be listed as child of a. + String immediateChild = OzoneFSUtils + .getImmediateChild(entryKeyName, keyName); + boolean isFile = OzoneFSUtils.isFile(immediateChild); + if (isFile) { + fileStatusList + .add(new OzoneFileStatus(value, scmBlockSize, !isFile)); + iterator.next(); + } else { + // if entry is a directory + fileStatusList.add(new OzoneFileStatus(immediateChild)); + // skip the other descendants of this child directory. + iterator.seek( + getNextGreaterString(volumeName, bucketName, immediateChild)); + } + } + } else { + break; + } + } + } finally { + metadataManager.getLock().releaseBucketLock(volumeName, bucketName); + } + return fileStatusList; + } + + private String getNextGreaterString(String volumeName, String bucketName, + String keyPrefix) throws IOException { + // Increment the last character of the string and return the new ozone key. + Preconditions.checkArgument(!Strings.isNullOrEmpty(keyPrefix), + "Key prefix is null or empty"); + CodecRegistry codecRegistry = + ((RDBStore) metadataManager.getStore()).getCodecRegistry(); + byte[] keyPrefixInBytes = codecRegistry.asRawData(keyPrefix); + keyPrefixInBytes[keyPrefixInBytes.length - 1]++; + String nextPrefix = codecRegistry.asObject(keyPrefixInBytes, String.class); + return metadataManager.getOzoneKey(volumeName, bucketName, nextPrefix); + } + /** * Verify that none of the parent path exists as file in the filesystem. * @@ -1555,6 +1661,8 @@ public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { * directory for the ozone filesystem. * @param directoryMustExist throws exception if true and given path does not * exist as directory + * @return OzoneFileStatus of the first directory found in path in reverse + * order * @throws OMException if ancestor exists as file in the filesystem * if directoryMustExist flag is true and parent does * not exist @@ -1562,8 +1670,9 @@ public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { * @throws IOException if there is error in the db * invalid arguments */ - private void verifyNoFilesInPath(String volumeName, String bucketName, - Path path, boolean directoryMustExist) throws IOException { + private OzoneFileStatus verifyNoFilesInPath(String volumeName, + String bucketName, Path path, boolean directoryMustExist) + throws IOException { OmKeyArgs.Builder argsBuilder = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName); @@ -1580,7 +1689,7 @@ private void verifyNoFilesInPath(String volumeName, String bucketName, + "bucket: " + bucketName + "key: " + keyName, ResultCodes.FILE_ALREADY_EXISTS); } else if (fileStatus.isDirectory()) { - break; + return fileStatus; } } catch (OMException ex) { if (ex.getResult() != ResultCodes.FILE_NOT_FOUND) { @@ -1594,6 +1703,7 @@ private void verifyNoFilesInPath(String volumeName, String bucketName, } path = path.getParent(); } + return null; } private FileEncryptionInfo getFileEncryptionInfo(OmBucketInfo bucketInfo) @@ -1617,11 +1727,4 @@ private FileEncryptionInfo getFileEncryptionInfo(OmBucketInfo bucketInfo) return encInfo; } - private String addTrailingSlashIfNeeded(String key) { - if (StringUtils.isNotEmpty(key) && !key.endsWith(OZONE_URI_DELIMITER)) { - return key + OZONE_URI_DELIMITER; - } else { - return key; - } - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index 6e6d3aab08dfc..851be03be88f8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -71,6 +71,7 @@ public class OMMetrics { private @Metric MutableCounterLong numCreateDirectory; private @Metric MutableCounterLong numCreateFile; private @Metric MutableCounterLong numLookupFile; + private @Metric MutableCounterLong numListStatus; // Failure Metrics private @Metric MutableCounterLong numVolumeCreateFails; @@ -107,6 +108,7 @@ public class OMMetrics { private @Metric MutableCounterLong numCreateDirectoryFails; private @Metric MutableCounterLong numCreateFileFails; private @Metric MutableCounterLong numLookupFileFails; + private @Metric MutableCounterLong numListStatusFails; // Metrics for total number of volumes, buckets and keys @@ -333,6 +335,16 @@ public void incNumLookupFileFails() { numLookupFileFails.incr(); } + public void incNumListStatus() { + numKeyOps.incr(); + numFSOps.incr(); + numListStatus.incr(); + } + + public void incNumListStatusFails() { + numListStatusFails.incr(); + } + public void incNumListMultipartUploadPartFails() { numListMultipartUploadPartFails.incr(); } @@ -638,6 +650,11 @@ public long getNumGetFileStatus() { return numGetFileStatus.value(); } + @VisibleForTesting + public long getNumListStatus() { + return numListStatus.value(); + } + @VisibleForTesting public long getNumVolumeListFails() { return numVolumeListFails.value(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 6987927b173d6..ece04ddf61695 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -350,7 +350,10 @@ public String getOzoneKey(String volume, String bucket, String key) { // TODO : Throw if the Bucket is null? builder.append(OM_KEY_PREFIX).append(bucket); if (StringUtil.isNotBlank(key)) { - builder.append(OM_KEY_PREFIX).append(key); + builder.append(OM_KEY_PREFIX); + if (!key.equals(OM_KEY_PREFIX)) { + builder.append(key); + } } return builder.toString(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index c147346030c33..ec51fe77c04d0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -2946,6 +2946,31 @@ public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { } } + @Override + public List listStatus(OmKeyArgs args, boolean recursive, + String startKey, long numEntries) throws IOException { + if(isAclEnabled) { + checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ, + args.getVolumeName(), args.getBucketName(), args.getKeyName()); + } + boolean auditSuccess = true; + try { + metrics.incNumListStatus(); + return keyManager.listStatus(args, recursive, startKey, numEntries); + } catch (Exception ex) { + metrics.incNumListStatusFails(); + auditSuccess = false; + AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.LIST_STATUS, + (args == null) ? null : args.toAuditMap(), ex)); + throw ex; + } finally { + if(auditSuccess){ + AUDIT.logWriteSuccess(buildAuditMessageForSuccess( + OMAction.LIST_STATUS, (args == null) ? null : args.toAuditMap())); + } + } + } + /** * Startup options. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java index cdde5060c4ee3..46ba58dfe763d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java @@ -24,6 +24,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import java.io.IOException; +import java.util.List; /** * Ozone Manager FileSystem interface. @@ -37,4 +38,7 @@ OpenKeySession createFile(OmKeyArgs args, boolean isOverWrite, boolean isRecursive) throws IOException; OmKeyInfo lookupFile(OmKeyArgs args) throws IOException; + + List listStatus(OmKeyArgs keyArgs, boolean recursive, + String startKey, long numEntries) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 1200d17d46369..1b95a2eea90ef 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.om.helpers.OmPartInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerServerProtocol; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -347,6 +348,11 @@ public OMResponse handle(OMRequest request) { lookupFile(request.getLookupFileRequest()); responseBuilder.setLookupFileResponse(lookupFileResponse); break; + case ListStatus: + OzoneManagerProtocolProtos.ListStatusResponse listStatusResponse = + listStatus(request.getListStatusRequest()); + responseBuilder.setListStatusResponse(listStatusResponse); + break; default: responseBuilder.setSuccess(false); responseBuilder.setMessage("Unrecognized Command Type: " + cmdType); @@ -1031,4 +1037,24 @@ private OzoneManagerProtocolProtos.LookupFileResponse lookupFile( protected OzoneManagerServerProtocol getOzoneManagerServerProtocol() { return impl; } + + private OzoneManagerProtocolProtos.ListStatusResponse listStatus( + OzoneManagerProtocolProtos.ListStatusRequest request) throws IOException { + KeyArgs keyArgs = request.getKeyArgs(); + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() + .setVolumeName(keyArgs.getVolumeName()) + .setBucketName(keyArgs.getBucketName()) + .setKeyName(keyArgs.getKeyName()) + .build(); + List statuses = + impl.listStatus(omKeyArgs, request.getRecursive(), + request.getStartKey(), request.getNumEntries()); + OzoneManagerProtocolProtos.ListStatusResponse.Builder + listStatusResponseBuilder = + OzoneManagerProtocolProtos.ListStatusResponse.newBuilder(); + for (OzoneFileStatus status : statuses) { + listStatusResponseBuilder.addStatuses(status.getProtobuf()); + } + return listStatusResponseBuilder.build(); + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 2d76c3facf16b..ad2b2b196e3f9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -22,6 +22,13 @@ import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; import java.util.UUID; import org.apache.commons.io.FileUtils; @@ -45,6 +52,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; +import org.junit.After; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.AfterClass; @@ -114,6 +122,22 @@ public static void cleanup() throws Exception { FileUtils.deleteDirectory(dir); } + @After + public void cleanupTest() throws IOException { + List fileStatuses = keyManager + .listStatus(createBuilder().setKeyName("").build(), true, "", 100000); + for (OzoneFileStatus fileStatus : fileStatuses) { + if (fileStatus.isFile()) { + keyManager.deleteKey( + createKeyArgs(fileStatus.getPath().toString().substring(1))); + } else { + keyManager.deleteKey(createKeyArgs(OzoneFSUtils + .addTrailingSlashIfNeeded( + fileStatus.getPath().toString().substring(1)))); + } + } + } + private static void createBucket(String volumeName, String bucketName) throws IOException { OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() @@ -331,6 +355,193 @@ public void testLookupFile() throws IOException { } } + private OmKeyArgs createKeyArgs(String toKeyName) { + return createBuilder().setKeyName(toKeyName).build(); + } + + @Test + public void testListStatus() throws IOException { + String superDir = RandomStringUtils.randomAlphabetic(5); + + int numDirectories = 5; + int numFiles = 5; + // set of directory descendants of root + Set directorySet = new TreeSet<>(); + // set of file descendants of root + Set fileSet = new TreeSet<>(); + createDepthTwoDirectory(superDir, numDirectories, numFiles, directorySet, + fileSet); + // set of all descendants of root + Set children = new TreeSet<>(directorySet); + children.addAll(fileSet); + // number of entries in the filesystem + int numEntries = directorySet.size() + fileSet.size(); + + OmKeyArgs rootDirArgs = createKeyArgs(""); + List fileStatuses = + keyManager.listStatus(rootDirArgs, true, "", 100); + // verify the number of status returned is same as number of entries + Assert.assertEquals(numEntries, fileStatuses.size()); + + fileStatuses = keyManager.listStatus(rootDirArgs, false, "", 100); + // the number of immediate children of root is 1 + Assert.assertEquals(1, fileStatuses.size()); + + // if startKey is the first descendant of the root then listStatus should + // return all the entries. + String startKey = children.iterator().next(); + fileStatuses = keyManager.listStatus(rootDirArgs, true, + startKey.substring(0, startKey.length() - 1), 100); + Assert.assertEquals(numEntries, fileStatuses.size()); + + for (String directory : directorySet) { + // verify status list received for each directory with recursive flag set + // to false + OmKeyArgs dirArgs = createKeyArgs(directory); + fileStatuses = keyManager.listStatus(dirArgs, false, "", 100); + verifyFileStatus(directory, fileStatuses, directorySet, fileSet, false); + + // verify status list received for each directory with recursive flag set + // to true + fileStatuses = keyManager.listStatus(dirArgs, true, "", 100); + verifyFileStatus(directory, fileStatuses, directorySet, fileSet, true); + + // verify list status call with using the startKey parameter and + // recursive flag set to false. After every call to listStatus use the + // latest received file status as the startKey until no more entries are + // left to list. + List tempFileStatus = null; + Set tmpStatusSet = new HashSet<>(); + do { + tempFileStatus = keyManager.listStatus(dirArgs, false, + tempFileStatus != null ? OzoneFSUtils.pathToKey( + tempFileStatus.get(tempFileStatus.size() - 1).getPath()) : null, + 2); + tmpStatusSet.addAll(tempFileStatus); + } while (tempFileStatus.size() == 2); + verifyFileStatus(directory, new ArrayList<>(tmpStatusSet), directorySet, + fileSet, false); + + // verify list status call with using the startKey parameter and + // recursive flag set to true. After every call to listStatus use the + // latest received file status as the startKey until no more entries are + // left to list. + tempFileStatus = null; + tmpStatusSet = new HashSet<>(); + do { + tempFileStatus = keyManager.listStatus(dirArgs, true, + tempFileStatus != null ? OzoneFSUtils.pathToKey( + tempFileStatus.get(tempFileStatus.size() - 1).getPath()) : null, + 2); + tmpStatusSet.addAll(tempFileStatus); + } while (tempFileStatus.size() == 2); + verifyFileStatus(directory, new ArrayList<>(tmpStatusSet), directorySet, + fileSet, true); + } + } + + /** + * Creates a depth two directory. + * + * @param superDir Super directory to create + * @param numDirectories number of directory children + * @param numFiles number of file children + * @param directorySet set of descendant directories for the super directory + * @param fileSet set of descendant files for the super directory + */ + private void createDepthTwoDirectory(String superDir, int numDirectories, + int numFiles, Set directorySet, Set fileSet) + throws IOException { + // create super directory + OmKeyArgs superDirArgs = createKeyArgs(superDir); + keyManager.createDirectory(superDirArgs); + directorySet.add(superDir); + + // add directory children to super directory + Set childDirectories = + createDirectories(superDir, new HashMap<>(), numDirectories); + directorySet.addAll(childDirectories); + // add file to super directory + fileSet.addAll(createFiles(superDir, new HashMap<>(), numFiles)); + + // for each child directory create files and directories + for (String child : childDirectories) { + fileSet.addAll(createFiles(child, new HashMap<>(), numFiles)); + directorySet + .addAll(createDirectories(child, new HashMap<>(), numDirectories)); + } + } + + private void verifyFileStatus(String directory, + List fileStatuses, Set directorySet, + Set fileSet, boolean recursive) { + + for (OzoneFileStatus fileStatus : fileStatuses) { + String keyName = OzoneFSUtils.pathToKey(fileStatus.getPath()); + String parent = Paths.get(keyName).getParent().toString(); + if (!recursive) { + // if recursive is false, verify all the statuses have the input + // directory as parent + Assert.assertEquals(parent, directory); + } + // verify filestatus is present in directory or file set accordingly + if (fileStatus.isDirectory()) { + Assert.assertTrue(directorySet.contains(keyName)); + } else { + Assert.assertTrue(fileSet.contains(keyName)); + } + } + + // count the number of entries which should be present in the directory + int numEntries = 0; + Set entrySet = new TreeSet<>(directorySet); + entrySet.addAll(fileSet); + for (String entry : entrySet) { + if (OzoneFSUtils.getParent(entry) + .startsWith(OzoneFSUtils.addTrailingSlashIfNeeded(directory))) { + if (recursive) { + numEntries++; + } else if (OzoneFSUtils.getParent(entry) + .equals(OzoneFSUtils.addTrailingSlashIfNeeded(directory))) { + numEntries++; + } + } + } + // verify the number of entries match the status list size + Assert.assertEquals(fileStatuses.size(), numEntries); + } + + private Set createDirectories(String parent, + Map> directoryMap, int numDirectories) + throws IOException { + Set keyNames = new TreeSet<>(); + for (int i = 0; i < numDirectories; i++) { + String keyName = parent + "/" + RandomStringUtils.randomAlphabetic(5); + OmKeyArgs keyArgs = createBuilder().setKeyName(keyName).build(); + keyManager.createDirectory(keyArgs); + keyNames.add(keyName); + } + directoryMap.put(parent, new ArrayList<>(keyNames)); + return keyNames; + } + + private List createFiles(String parent, + Map> fileMap, int numFiles) throws IOException { + List keyNames = new ArrayList<>(); + for (int i = 0; i < numFiles; i++) { + String keyName = parent + "/" + RandomStringUtils.randomAlphabetic(5); + OmKeyArgs keyArgs = createBuilder().setKeyName(keyName).build(); + OpenKeySession keySession = keyManager.createFile(keyArgs, false, false); + keyArgs.setLocationInfoList( + keySession.getKeyInfo().getLatestVersionLocations() + .getLocationList()); + keyManager.commitKey(keyArgs, keySession.getId()); + keyNames.add(keyName); + } + fileMap.put(parent, keyNames); + return keyNames; + } + private OmKeyArgs.Builder createBuilder() { return new OmKeyArgs.Builder() .setBucketName(BUCKET_NAME) diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index 7f65724dbf07d..fa862d9677b10 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -22,6 +22,7 @@ import java.io.InputStream; import java.net.URI; import java.util.Iterator; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -253,6 +254,19 @@ public Iterator listKeys(String pathKey) { return new IteratorAdapter(bucket.listKeys(pathKey)); } + public List listStatus(String keyName, boolean recursive, + String startKey, long numEntries) throws IOException { + try { + incrementCounter(Statistic.OBJECTS_LIST); + return bucket.listStatus(keyName, recursive, startKey, numEntries); + } catch (OMException e) { + if (e.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) { + throw new FileNotFoundException(e.getMessage()); + } + throw e; + } + } + @Override public Token getDelegationToken(String renewer) throws IOException { diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index 77e5a608f44bb..da3f1ac78f5ae 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -22,17 +22,13 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; -import java.util.ArrayList; import java.util.EnumSet; -import java.util.HashMap; import java.util.Iterator; import java.util.List; -import java.util.Map; +import java.util.LinkedList; import java.util.Objects; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.stream.Collectors; -import java.util.stream.Stream; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -46,6 +42,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; @@ -494,130 +491,37 @@ private boolean o3Exists(final Path f) throws IOException { } } - private class ListStatusIterator extends OzoneListingIterator { - // _fileStatuses_ maintains a list of file(s) which is either the input - // path itself or a child of the input directory path. - private List fileStatuses = new ArrayList<>(LISTING_PAGE_SIZE); - // _subDirStatuses_ maintains a list of sub-dirs of the input directory - // path. - private Map subDirStatuses = - new HashMap<>(LISTING_PAGE_SIZE); - private Path f; // the input path - - ListStatusIterator(Path f) throws IOException { - super(f); - this.f = f; - } + @Override + public FileStatus[] listStatus(Path f) throws IOException { + incrementCounter(Statistic.INVOCATION_LIST_STATUS); + statistics.incrementReadOps(1); + LOG.trace("listStatus() path:{}", f); + int numEntries = LISTING_PAGE_SIZE; + LinkedList statuses = new LinkedList<>(); + List tmpStatusList; + String startKey = ""; - /** - * Add the key to the listStatus result if the key corresponds to the - * input path or is an immediate child of the input path. - * - * @param key key to be processed - * @return always returns true - * @throws IOException - */ - @Override - boolean processKey(String key) throws IOException { - Path keyPath = new Path(OZONE_URI_DELIMITER + key); - if (key.equals(getPathKey())) { - if (pathIsDirectory()) { - // if input path is a directory, we add the sub-directories and - // files under this directory. - return true; - } else { - addFileStatus(keyPath); - return true; - } - } - // Left with only subkeys now - // We add only the immediate child files and sub-dirs i.e. we go only - // upto one level down the directory tree structure. - if (pathToKey(keyPath.getParent()).equals(pathToKey(f))) { - // This key is an immediate child. Can be file or directory - if (key.endsWith(OZONE_URI_DELIMITER)) { - // Key is a directory - addSubDirStatus(keyPath); + do { + tmpStatusList = + adapter.listStatus(pathToKey(f), false, startKey, numEntries); + if (!tmpStatusList.isEmpty()) { + if (startKey.isEmpty()) { + statuses.addAll(tmpStatusList); } else { - addFileStatus(keyPath); - } - } else { - // This key is not the immediate child of the input directory. So we - // traverse the parent tree structure of this key until we get the - // immediate child of the input directory. - Path immediateChildPath = getImmediateChildPath(keyPath.getParent()); - if (immediateChildPath != null) { - addSubDirStatus(immediateChildPath); + statuses.addAll(tmpStatusList.subList(1, tmpStatusList.size())); } + startKey = pathToKey(statuses.getLast().getPath()); } - return true; - } + // listStatus returns entries numEntries in size if available. + // Any lesser number of entries indicate that the required entries have + // exhausted. + } while (tmpStatusList.size() == numEntries); - /** - * Adds the FileStatus of keyPath to final result of listStatus. - * - * @param filePath path to the file - * @throws FileNotFoundException - */ - void addFileStatus(Path filePath) throws IOException { - fileStatuses.add(getFileStatus(filePath)); + for (OzoneFileStatus status : statuses) { + status.makeQualified(uri, status.getPath().makeQualified(uri, workingDir), + getUsername(), getUsername()); } - - /** - * Adds the FileStatus of the subdir to final result of listStatus, if not - * already included. - * - * @param dirPath path to the dir - * @throws FileNotFoundException - */ - void addSubDirStatus(Path dirPath) throws IOException { - // Check if subdir path is already included in statuses. - if (!subDirStatuses.containsKey(dirPath)) { - subDirStatuses.put(dirPath, getFileStatus(dirPath)); - } - } - - /** - * Traverse the parent directory structure of keyPath to determine the - * which parent/ grand-parent/.. is the immediate child of the input path f. - * - * @param keyPath path whose parent directory structure should be traversed. - * @return immediate child path of the input path f. - */ - Path getImmediateChildPath(Path keyPath) { - Path path = keyPath; - Path parent = path.getParent(); - while (parent != null) { - if (pathToKey(parent).equals(pathToKey(f))) { - return path; - } - path = parent; - parent = path.getParent(); - } - return null; - } - - /** - * Return the result of listStatus operation. If the input path is a - * file, return the status for only that file. If the input path is a - * directory, return the statuses for all the child files and sub-dirs. - */ - FileStatus[] getStatuses() { - List result = Stream.concat( - fileStatuses.stream(), subDirStatuses.values().stream()) - .collect(Collectors.toList()); - return result.toArray(new FileStatus[result.size()]); - } - } - - @Override - public FileStatus[] listStatus(Path f) throws IOException { - incrementCounter(Statistic.INVOCATION_LIST_STATUS); - statistics.incrementReadOps(1); - LOG.trace("listStatus() path:{}", f); - ListStatusIterator iterator = new ListStatusIterator(f); - iterator.iterate(); - return iterator.getStatuses(); + return statuses.toArray(new FileStatus[0]); } @Override diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java index 44bb6a4071851..390925c0780d6 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java @@ -27,6 +27,7 @@ import java.io.InputStream; import java.net.URI; import java.util.Iterator; +import java.util.List; /** * Lightweight adapter to separate hadoop/ozone classes. @@ -52,6 +53,9 @@ OzoneFSOutputStream createFile(String key, boolean overWrite, Iterator listKeys(String pathKey); + List listStatus(String keyName, boolean recursive, + String startKey, long numEntries) throws IOException; + Token getDelegationToken(String renewer) throws IOException; diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index 87e1487fbfea7..ca3e64f18b92c 100644 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -20,12 +20,15 @@ import java.io.IOException; import java.net.URI; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.List; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.junit.Assert; @@ -104,6 +107,8 @@ public static Collection data() { private OzoneFSStorageStatistics statistics; + private OMMetrics omMetrics; + public TestOzoneFileInterfaces(boolean setDefaultFs, boolean useAbsolutePath) { this.setDefaultFs = setDefaultFs; @@ -147,6 +152,7 @@ public void init() throws Exception { } o3fs = (OzoneFileSystem) fs; statistics = (OzoneFSStorageStatistics) o3fs.getOzoneFSOpsCountStatistics(); + omMetrics = cluster.getOzoneManager().getMetrics(); } @After @@ -246,11 +252,45 @@ public void testDirectory() throws IOException { assertEquals(1, statusList.length); assertEquals(status, statusList[0]); - FileStatus statusRoot = fs.getFileStatus(createPath("/")); + fs.getFileStatus(createPath("/")); assertTrue("Root dir (/) is not a directory.", status.isDirectory()); assertEquals(0, status.getLen()); } + @Test + public void testListStatus() throws IOException { + List paths = new ArrayList<>(); + String dirPath = RandomStringUtils.randomAlphanumeric(5); + Path path = createPath("/" + dirPath); + paths.add(path); + assertTrue("Makedirs returned with false for the path " + path, + fs.mkdirs(path)); + + long listObjects = statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()); + long omListStatus = omMetrics.getNumListStatus(); + FileStatus[] statusList = fs.listStatus(createPath("/")); + assertEquals(1, statusList.length); + assertEquals(++listObjects, + statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()).longValue()); + assertEquals(++omListStatus, omMetrics.getNumListStatus()); + assertEquals(fs.getFileStatus(path), statusList[0]); + + dirPath = RandomStringUtils.randomAlphanumeric(5); + path = createPath("/" + dirPath); + paths.add(path); + assertTrue("Makedirs returned with false for the path " + path, + fs.mkdirs(path)); + + statusList = fs.listStatus(createPath("/")); + assertEquals(2, statusList.length); + assertEquals(++listObjects, + statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()).longValue()); + assertEquals(++omListStatus, omMetrics.getNumListStatus()); + for (Path p : paths) { + assertTrue(Arrays.asList(statusList).contains(fs.getFileStatus(p))); + } + } + @Test public void testOzoneManagerFileSystemInterface() throws IOException { String dirPath = RandomStringUtils.randomAlphanumeric(5); diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java index e1ba2e7863216..ac8f11fc9057b 100644 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java @@ -44,6 +44,9 @@ import org.junit.Test; import java.io.IOException; +import java.util.Set; +import java.util.TreeSet; + import org.junit.rules.Timeout; import static org.junit.Assert.assertEquals; @@ -172,7 +175,7 @@ public void testDeleteCreatesFakeParentDir() throws Exception { public void testListStatus() throws Exception { Path parent = new Path("/testListStatus"); Path file1 = new Path(parent, "key1"); - Path file2 = new Path(parent, "key1/key2"); + Path file2 = new Path(parent, "key2"); ContractTestUtils.touch(fs, file1); ContractTestUtils.touch(fs, file2); @@ -219,6 +222,30 @@ public void testListStatusOnRoot() throws Exception { assertFalse(fileStatus2.equals(dir12.toString())); } + /** + * Tests listStatus operation on root directory. + */ + @Test + public void testListStatusOnLargeDirectory() throws Exception { + Path root = new Path("/"); + Set paths = new TreeSet<>(); + int numDirs = 5111; + for(int i = 0; i < numDirs; i++) { + Path p = new Path(root, String.valueOf(i)); + fs.mkdirs(p); + paths.add(p.getName()); + } + + FileStatus[] fileStatuses = o3fs.listStatus(root); + assertEquals( + "Total directories listed do not match the existing directories", + numDirs, fileStatuses.length); + + for (int i=0; i < numDirs; i++) { + assertTrue(paths.contains(fileStatuses[i].getPath().getName())); + } + } + /** * Tests listStatus on a path with subdirs. */ From ff5691ca062c1f5e539ed74b503a86073734d34d Mon Sep 17 00:00:00 2001 From: Ajay Kumar Date: Tue, 21 May 2019 17:09:21 +0200 Subject: [PATCH 0008/1308] HDDS-1442. add spark container to ozonesecure-mr compose files Closes #746 --- .../main/compose/ozonesecure-mr/docker-compose.yaml | 10 ++++++++++ .../dist/src/main/compose/ozonesecure-mr/docker-config | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml index 5f8d1bff32adb..38ed7d67e0336 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml @@ -111,3 +111,13 @@ services: HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar WAIT_FOR: rm:8088 command: ["yarn","timelineserver"] + spark: + image: ahadoop/spark-2.4:hadoop-3.2 + hostname: spark + volumes: + - ../..:/opt/hadoop + ports: + - 4040:4040 + env_file: + - docker-config + command: ["watch","-n","100000","ls"] \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config index d17ae035f82c8..431edca7f36c7 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config @@ -168,7 +168,7 @@ LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT OZONE_DATANODE_SECURE_USER=root KEYTAB_DIR=/etc/security/keytabs -KERBEROS_KEYTABS=dn om scm HTTP testuser s3g rm nm yarn jhs hadoop +KERBEROS_KEYTABS=dn om scm HTTP testuser s3g rm nm yarn jhs hadoop spark KERBEROS_KEYSTORES=hadoop KERBEROS_SERVER=kdc JAVA_HOME=/usr/lib/jvm/jre From 5906268f0dd63a93eb591ddccf70d23b15e5c2ed Mon Sep 17 00:00:00 2001 From: Sahil Takiar Date: Mon, 20 May 2019 19:36:44 -0500 Subject: [PATCH 0009/1308] HADOOP-16321: ITestS3ASSL+TestOpenSSLSocketFactory failing with java.lang.UnsatisfiedLinkErrors --- .../hadoop/security/ssl/TestOpenSSLSocketFactory.java | 8 ++++++-- .../test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java | 5 ++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestOpenSSLSocketFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestOpenSSLSocketFactory.java index ea881e990b934..41ec3e4516f0e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestOpenSSLSocketFactory.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestOpenSSLSocketFactory.java @@ -35,7 +35,10 @@ public class TestOpenSSLSocketFactory { @Test public void testOpenSSL() throws IOException { - assumeTrue(NativeCodeLoader.buildSupportsOpenssl()); + assumeTrue("Unable to load native libraries", + NativeCodeLoader.isNativeCodeLoaded()); + assumeTrue("Build was not compiled with support for OpenSSL", + NativeCodeLoader.buildSupportsOpenssl()); OpenSSLSocketFactory.initializeDefaultFactory( OpenSSLSocketFactory.SSLChannelMode.OpenSSL); assertThat(OpenSSLSocketFactory.getDefaultFactory() @@ -44,7 +47,8 @@ public void testOpenSSL() throws IOException { @Test public void testJSEEJava8() throws IOException { - assumeTrue(System.getProperty("java.version").startsWith("1.8")); + assumeTrue("Not running on Java 8", + System.getProperty("java.version").startsWith("1.8")); OpenSSLSocketFactory.initializeDefaultFactory( OpenSSLSocketFactory.SSLChannelMode.Default_JSSE); assertThat(Arrays.stream(OpenSSLSocketFactory.getDefaultFactory() diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java index 794bf80826650..4232b0f270eb3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java @@ -40,7 +40,10 @@ public class ITestS3ASSL extends AbstractS3ATestBase { @Test public void testOpenSSL() throws IOException { - assumeTrue(NativeCodeLoader.buildSupportsOpenssl()); + assumeTrue("Unable to load native libraries", + NativeCodeLoader.isNativeCodeLoaded()); + assumeTrue("Build was not compiled with support for OpenSSL", + NativeCodeLoader.buildSupportsOpenssl()); Configuration conf = new Configuration(getConfiguration()); conf.setEnum(Constants.SSL_CHANNEL_MODE, OpenSSLSocketFactory.SSLChannelMode.OpenSSL); From a85451c8d22b81c36329d7f6e19fa117dcedccad Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Tue, 21 May 2019 11:25:30 -0700 Subject: [PATCH 0010/1308] YARN-9575. Fix TestYarnConfigurationFields testcase failing. Contributed by Prabhu Joseph. --- .../src/main/resources/yarn-default.xml | 10 ---------- .../src/site/markdown/yarn-service/Configurations.md | 1 + 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 87c2f132ea722..9741f6c36b1da 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -4211,14 +4211,4 @@ yarn.resourcemanager.activities-manager.app-activities.max-queue-length 1000 - - - - Comma separated extra class path parameters for yarn services AM. - These path elements will be appended to the end of the YARN service AM - classpath. - - yarn.service.classpath - - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md index 4785f39b265b4..53ffa07ab43f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md @@ -131,6 +131,7 @@ The above config allows the service AM to be retried a maximum of 10 times. |yarn.service.log.exclude-pattern | Regex expression for excluding log files by name when aggregating the logs after the application completes. If the log file name matches both include and exclude pattern, this file will be excluded (default does not exclude any files).| |yarn.service.rolling-log.include-pattern | Regex expression for including log files by name when aggregating the logs while app is running.| |yarn.service.rolling-log.exclude-pattern | Regex expression for excluding log files by name when aggregating the logs while app is running. If the log file name matches both include and exclude pattern, this file will be excluded.| +|yarn.service.classpath | Comma separated extra class path parameters for yarn services AM. These path elements will be appended to the end of the YARN service AM classpath. | ### Component-level configuration properties Component-level service AM configuration properties can be specified either in the cluster `yarn-site.xml` at the global level (effectively overriding the default values system-wide), specified per service in the `properties` field of the `Configuration` object, or specified per component in the `properties` field of the component's `Configuration` object. From ef1cc725b8c1301f33bcb80b63b12d45dc23bed8 Mon Sep 17 00:00:00 2001 From: Ajay Yadav <7813154+ajayydv@users.noreply.github.com> Date: Tue, 21 May 2019 15:53:40 -0700 Subject: [PATCH 0011/1308] HDDS-1538. Update ozone protobuf message for ACLs. Contributed by Ajay Kumar. (#828) --- .../org/apache/hadoop/ozone/OzoneConsts.java | 3 +- .../src/main/resources/ozone-default.xml | 18 +-- .../hadoop/ozone/client/rest/RestClient.java | 6 +- .../hadoop/ozone/client/rpc/RpcClient.java | 25 ++-- .../org/apache/hadoop/ozone/OzoneAcl.java | 112 +++++++----------- .../apache/hadoop/ozone/om/OMConfigKeys.java | 12 -- .../ozone/om/helpers/OmOzoneAclMap.java | 60 +++++++--- .../hadoop/ozone/protocolPB/OMPBHelper.java | 65 +++++----- .../ozone/security/acl/IAccessAuthorizer.java | 20 +++- .../ozone/security/acl/OzoneAclConfig.java | 65 ++++++++++ .../src/main/proto/OzoneManagerProtocol.proto | 17 ++- .../apache/hadoop/ozone/TestOzoneAcls.java | 79 ++++++++---- .../rpc/TestOzoneRpcClientAbstract.java | 20 ++-- .../hadoop/ozone/om/TestOzoneManager.java | 21 ++-- .../hadoop/ozone/ozShell/TestOzoneShell.java | 13 +- .../storage/DistributedStorageHandler.java | 21 ++-- .../hadoop/ozone/om/VolumeManagerImpl.java | 4 +- .../ozone/om/TestBucketManagerImpl.java | 18 +-- .../apache/hadoop/ozone/scm/cli/SQLCLI.java | 2 +- 19 files changed, 338 insertions(+), 243 deletions(-) rename {hadoop-hdds => hadoop-ozone}/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java (63%) create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java rename {hadoop-hdds => hadoop-ozone}/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java (57%) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index ce1e97e34d918..4f249f854bb16 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -52,12 +52,11 @@ public final class OzoneConsts { public static final String OZONE_ACL_USER_TYPE = "user"; public static final String OZONE_ACL_GROUP_TYPE = "group"; public static final String OZONE_ACL_WORLD_TYPE = "world"; + public static final String OZONE_ACL_ANONYMOUS_TYPE = "anonymous"; public static final String OZONE_ACL_IP_TYPE = "ip"; public static final String OZONE_ACL_READ = "r"; public static final String OZONE_ACL_WRITE = "w"; - public static final String OZONE_ACL_READ_WRITE = "rw"; - public static final String OZONE_ACL_WRITE_READ = "wr"; public static final String OZONE_ACL_DELETE = "d"; public static final String OZONE_ACL_LIST = "l"; public static final String OZONE_ACL_ALL = "a"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index a46ddb16a8950..305cac5a2cd6f 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -540,14 +540,6 @@ the address of the OM. - - ozone.om.group.rights - READ_WRITE - OM, SECURITY - - Default group permissions in Ozone OM. - - ozone.om.handler.count.key 20 @@ -640,14 +632,6 @@ of buckets or keys inside each bucket a user can create. - - ozone.om.user.rights - READ_WRITE - OM, SECURITY - - Default user permissions used in OM. - - ozone.om.db.dirs @@ -1809,7 +1793,7 @@ assumed. - + hdds.block.token.enabled false diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java index 2f37713159bc6..71fb8ca8c6c66 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java @@ -112,7 +112,7 @@ public class RestClient implements ClientProtocol { private final URI ozoneRestUri; private final CloseableHttpClient httpClient; private final UserGroupInformation ugi; - private final OzoneAcl.OzoneACLRights userRights; + // private final OzoneAcl.OzoneACLRights userRights; /** * Creates RestClient instance with the given configuration. @@ -161,8 +161,8 @@ public RestClient(Configuration conf) .build()) .build(); - this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS, - OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT); +// this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS, +// OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT); // TODO: Add new configuration parameter to configure RestServerSelector. RestServerSelector defaultSelector = new DefaultRestServerSelector(); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index ab40c5231a64d..3aa4fb8e6358b 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -73,7 +73,6 @@ .OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.protocol.proto .OzoneManagerProtocolProtos.ServicePort; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -85,6 +84,9 @@ import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolPB; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -121,8 +123,8 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer { private final int bytesPerChecksum; private boolean verifyChecksum; private final UserGroupInformation ugi; - private final OzoneAcl.OzoneACLRights userRights; - private final OzoneAcl.OzoneACLRights groupRights; + private final ACLType userRights; + private final ACLType groupRights; private final long streamBufferFlushSize; private final long streamBufferMaxSize; private final long blockSize; @@ -141,10 +143,11 @@ public RpcClient(Configuration conf) throws IOException { Preconditions.checkNotNull(conf); this.conf = new OzoneConfiguration(conf); this.ugi = UserGroupInformation.getCurrentUser(); - this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS, - OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT); - this.groupRights = conf.getEnum(OMConfigKeys.OZONE_OM_GROUP_RIGHTS, - OMConfigKeys.OZONE_OM_GROUP_RIGHTS_DEFAULT); + // Get default acl rights for user and group. + OzoneAclConfig aclConfig = this.conf.getObject(OzoneAclConfig.class); + this.userRights = aclConfig.getUserDefaultRights(); + this.groupRights = aclConfig.getGroupDefaultRights(); + this.ozoneManagerClient = new OzoneManagerProtocolClientSideTranslatorPB( this.conf, clientId.toString(), ugi); long scmVersion = @@ -256,13 +259,13 @@ public void createVolume(String volumeName, VolumeArgs volArgs) OzoneQuota.parseQuota(volArgs.getQuota()).sizeInBytes(); List listOfAcls = new ArrayList<>(); //User ACL - listOfAcls.add(new OzoneAcl(OzoneAcl.OzoneACLType.USER, + listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, owner, userRights)); //Group ACLs of the User List userGroups = Arrays.asList(UserGroupInformation .createRemoteUser(owner).getGroupNames()); userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights))); + new OzoneAcl(ACLIdentityType.GROUP, group, groupRights))); //ACLs from VolumeArgs if(volArgs.getAcls() != null) { listOfAcls.addAll(volArgs.getAcls()); @@ -403,13 +406,13 @@ public void createBucket( List listOfAcls = new ArrayList<>(); //User ACL - listOfAcls.add(new OzoneAcl(OzoneAcl.OzoneACLType.USER, + listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, ugi.getUserName(), userRights)); //Group ACLs of the User List userGroups = Arrays.asList(UserGroupInformation .createRemoteUser(ugi.getUserName()).getGroupNames()); userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights))); + new OzoneAcl(ACLIdentityType.GROUP, group, groupRights))); //ACLs from BucketArgs if(bucketArgs.getAcls() != null) { listOfAcls.addAll(bucketArgs.getAcls()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java similarity index 63% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index 1827b23bf15f1..eaec507314dbe 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -19,6 +19,11 @@ package org.apache.hadoop.ozone; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; + +import java.util.ArrayList; +import java.util.List; import java.util.Objects; /** @@ -32,9 +37,9 @@ * */ public class OzoneAcl { - private OzoneACLType type; + private ACLIdentityType type; private String name; - private OzoneACLRights rights; + private List rights; /** * Constructor for OzoneAcl. @@ -47,16 +52,37 @@ public OzoneAcl() { * * @param type - Type * @param name - Name of user - * @param rights - Rights + * @param acl - Rights */ - public OzoneAcl(OzoneACLType type, String name, OzoneACLRights rights) { + public OzoneAcl(ACLIdentityType type, String name, ACLType acl) { this.name = name; - this.rights = rights; + this.rights = new ArrayList<>(); + this.rights.add(acl); this.type = type; - if (type == OzoneACLType.WORLD && name.length() != 0) { + if (type == ACLIdentityType.WORLD && name.length() != 0) { throw new IllegalArgumentException("Unexpected name part in world type"); } - if (((type == OzoneACLType.USER) || (type == OzoneACLType.GROUP)) + if (((type == ACLIdentityType.USER) || (type == ACLIdentityType.GROUP)) + && (name.length() == 0)) { + throw new IllegalArgumentException("User or group name is required"); + } + } + + /** + * Constructor for OzoneAcl. + * + * @param type - Type + * @param name - Name of user + * @param acls - Rights + */ + public OzoneAcl(ACLIdentityType type, String name, List acls) { + this.name = name; + this.rights = acls; + this.type = type; + if (type == ACLIdentityType.WORLD && name.length() != 0) { + throw new IllegalArgumentException("Unexpected name part in world type"); + } + if (((type == ACLIdentityType.USER) || (type == ACLIdentityType.GROUP)) && (name.length() == 0)) { throw new IllegalArgumentException("User or group name is required"); } @@ -78,17 +104,20 @@ public static OzoneAcl parseAcl(String acl) throws IllegalArgumentException { throw new IllegalArgumentException("ACLs are not in expected format"); } - OzoneACLType aclType = OzoneACLType.valueOf(parts[0].toUpperCase()); - OzoneACLRights rights = OzoneACLRights.getACLRight(parts[2].toLowerCase()); + ACLIdentityType aclType = ACLIdentityType.valueOf(parts[0].toUpperCase()); + List acls = new ArrayList<>(); + for (char ch : parts[2].toCharArray()) { + acls.add(ACLType.getACLRight(String.valueOf(ch))); + } // TODO : Support sanitation of these user names by calling into // userAuth Interface. - return new OzoneAcl(aclType, parts[1], rights); + return new OzoneAcl(aclType, parts[1], acls); } @Override public String toString() { - return type + ":" + name + ":" + OzoneACLRights.getACLRightsString(rights); + return type + ":" + name + ":" + ACLType.getACLString(rights); } /** @@ -120,7 +149,7 @@ public String getName() { * * @return - Rights */ - public OzoneACLRights getRights() { + public List getRights() { return rights; } @@ -129,7 +158,7 @@ public OzoneACLRights getRights() { * * @return type */ - public OzoneACLType getType() { + public ACLIdentityType getType() { return type; } @@ -150,9 +179,7 @@ public boolean equals(Object obj) { return false; } OzoneAcl otherAcl = (OzoneAcl) obj; - return otherAcl.getName().equals(this.getName()) && - otherAcl.getRights() == this.getRights() && - otherAcl.getType() == this.getType(); + return otherAcl.toString().equals(this.toString()); } /** @@ -177,57 +204,4 @@ public enum OzoneACLType { value = val; } } - - /** - * ACL rights. - */ - public enum OzoneACLRights { - READ, WRITE, READ_WRITE; - - /** - * Returns the ACL rights based on passed in String. - * - * @param type ACL right string - * - * @return OzoneACLRights - */ - public static OzoneACLRights getACLRight(String type) { - if (type == null || type.isEmpty()) { - throw new IllegalArgumentException("ACL right cannot be empty"); - } - - switch (type) { - case OzoneConsts.OZONE_ACL_READ: - return OzoneACLRights.READ; - case OzoneConsts.OZONE_ACL_WRITE: - return OzoneACLRights.WRITE; - case OzoneConsts.OZONE_ACL_READ_WRITE: - case OzoneConsts.OZONE_ACL_WRITE_READ: - return OzoneACLRights.READ_WRITE; - default: - throw new IllegalArgumentException("ACL right is not recognized"); - } - - } - - /** - * Returns String representation of ACL rights. - * @param acl OzoneACLRights - * @return String representation of acl - */ - public static String getACLRightsString(OzoneACLRights acl) { - switch(acl) { - case READ: - return OzoneConsts.OZONE_ACL_READ; - case WRITE: - return OzoneConsts.OZONE_ACL_WRITE; - case READ_WRITE: - return OzoneConsts.OZONE_ACL_READ_WRITE; - default: - throw new IllegalArgumentException("ACL right is not recognized"); - } - } - - } - } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 60dde441196b6..5f1f579f6d335 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -19,7 +19,6 @@ import java.util.concurrent.TimeUnit; -import org.apache.hadoop.ozone.OzoneAcl; import org.apache.ratis.util.TimeDuration; /** @@ -78,17 +77,6 @@ private OMConfigKeys() { "ozone.om.user.max.volume"; public static final int OZONE_OM_USER_MAX_VOLUME_DEFAULT = 1024; - // OM Default user/group permissions - public static final String OZONE_OM_USER_RIGHTS = - "ozone.om.user.rights"; - public static final OzoneAcl.OzoneACLRights OZONE_OM_USER_RIGHTS_DEFAULT = - OzoneAcl.OzoneACLRights.READ_WRITE; - - public static final String OZONE_OM_GROUP_RIGHTS = - "ozone.om.group.rights"; - public static final OzoneAcl.OzoneACLRights OZONE_OM_GROUP_RIGHTS_DEFAULT = - OzoneAcl.OzoneACLRights.READ_WRITE; - public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK = "ozone.key.deleting.limit.per.task"; public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java index 2584eb58c2fda..8831c6b879c28 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java @@ -37,7 +37,7 @@ @SuppressWarnings("ProtocolBufferOrdinal") public class OmOzoneAclMap { // per Acl Type user:rights map - private ArrayList> aclMaps; + private ArrayList>> aclMaps; OmOzoneAclMap() { aclMaps = new ArrayList<>(); @@ -46,51 +46,75 @@ public class OmOzoneAclMap { } } - private Map getMap(OzoneAclType type) { + private Map> getMap(OzoneAclType type) { return aclMaps.get(type.ordinal()); } // For a given acl type and user, get the stored acl - private OzoneAclRights getAcl(OzoneAclType type, String user) { + private List getAcl(OzoneAclType type, String user) { return getMap(type).get(user); } // Add a new acl to the map public void addAcl(OzoneAclInfo acl) { - getMap(acl.getType()).put(acl.getName(), acl.getRights()); + getMap(acl.getType()).put(acl.getName(), acl.getRightsList()); } // for a given acl, check if the user has access rights public boolean hasAccess(OzoneAclInfo acl) { - OzoneAclRights storedRights = getAcl(acl.getType(), acl.getName()); - if (storedRights != null) { - switch (acl.getRights()) { - case READ: - return (storedRights == OzoneAclRights.READ) - || (storedRights == OzoneAclRights.READ_WRITE); + if (acl == null) { + return false; + } + + List storedRights = getAcl(acl.getType(), acl.getName()); + if(storedRights == null) { + return false; + } + + for (OzoneAclRights right : storedRights) { + switch (right) { + case CREATE: + return (right == OzoneAclRights.CREATE) + || (right == OzoneAclRights.ALL); + case LIST: + return (right == OzoneAclRights.LIST) + || (right == OzoneAclRights.ALL); case WRITE: - return (storedRights == OzoneAclRights.WRITE) - || (storedRights == OzoneAclRights.READ_WRITE); - case READ_WRITE: - return (storedRights == OzoneAclRights.READ_WRITE); + return (right == OzoneAclRights.WRITE) + || (right == OzoneAclRights.ALL); + case READ: + return (right == OzoneAclRights.READ) + || (right == OzoneAclRights.ALL); + case DELETE: + return (right == OzoneAclRights.DELETE) + || (right == OzoneAclRights.ALL); + case READ_ACL: + return (right == OzoneAclRights.READ_ACL) + || (right == OzoneAclRights.ALL); + case WRITE_ACL: + return (right == OzoneAclRights.WRITE_ACL) + || (right == OzoneAclRights.ALL); + case ALL: + return (right == OzoneAclRights.ALL); + case NONE: + return !(right == OzoneAclRights.NONE); default: return false; } - } else { - return false; } + return false; } // Convert this map to OzoneAclInfo Protobuf List public List ozoneAclGetProtobuf() { List aclList = new LinkedList<>(); for (OzoneAclType type: OzoneAclType.values()) { - for (Map.Entry entry : + for (Map.Entry> entry : aclMaps.get(type.ordinal()).entrySet()) { OzoneAclInfo aclInfo = OzoneAclInfo.newBuilder() .setName(entry.getKey()) .setType(type) - .setRights(entry.getValue()) + .addAllRights(entry.getValue()) .build(); aclList.add(aclInfo); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java index fa6fc85b03612..45ae0b301af94 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java @@ -41,9 +41,15 @@ import org.apache.hadoop.ozone.protocol.proto .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.security.token.Token; +import java.util.List; +import java.util.ArrayList; + /** * Utilities for converting protobuf classes. */ @@ -59,7 +65,7 @@ private OMPBHelper() { */ public static OzoneAclInfo convertOzoneAcl(OzoneAcl acl) { OzoneAclInfo.OzoneAclType aclType; - switch(acl.getType()) { + switch (acl.getType()) { case USER: aclType = OzoneAclType.USER; break; @@ -69,27 +75,24 @@ public static OzoneAclInfo convertOzoneAcl(OzoneAcl acl) { case WORLD: aclType = OzoneAclType.WORLD; break; - default: - throw new IllegalArgumentException("ACL type is not recognized"); - } - OzoneAclInfo.OzoneAclRights aclRights; - switch(acl.getRights()) { - case READ: - aclRights = OzoneAclRights.READ; + case ANONYMOUS: + aclType = OzoneAclType.ANONYMOUS; break; - case WRITE: - aclRights = OzoneAclRights.WRITE; - break; - case READ_WRITE: - aclRights = OzoneAclRights.READ_WRITE; + case CLIENT_IP: + aclType = OzoneAclType.CLIENT_IP; break; default: - throw new IllegalArgumentException("ACL right is not recognized"); + throw new IllegalArgumentException("ACL type is not recognized"); + } + List aclRights = new ArrayList<>(); + + for (ACLType right : acl.getRights()) { + aclRights.add(OzoneAclRights.valueOf(right.name())); } return OzoneAclInfo.newBuilder().setType(aclType) .setName(acl.getName()) - .setRights(aclRights) + .addAllRights(aclRights) .build(); } @@ -98,35 +101,31 @@ public static OzoneAclInfo convertOzoneAcl(OzoneAcl acl) { * @return OzoneAcl */ public static OzoneAcl convertOzoneAcl(OzoneAclInfo aclInfo) { - OzoneAcl.OzoneACLType aclType; - switch(aclInfo.getType()) { + ACLIdentityType aclType; + switch (aclInfo.getType()) { case USER: - aclType = OzoneAcl.OzoneACLType.USER; + aclType = ACLIdentityType.USER; break; case GROUP: - aclType = OzoneAcl.OzoneACLType.GROUP; + aclType = ACLIdentityType.GROUP; break; case WORLD: - aclType = OzoneAcl.OzoneACLType.WORLD; + aclType = ACLIdentityType.WORLD; break; - default: - throw new IllegalArgumentException("ACL type is not recognized"); - } - OzoneAcl.OzoneACLRights aclRights; - switch(aclInfo.getRights()) { - case READ: - aclRights = OzoneAcl.OzoneACLRights.READ; + case ANONYMOUS: + aclType = ACLIdentityType.ANONYMOUS; break; - case WRITE: - aclRights = OzoneAcl.OzoneACLRights.WRITE; - break; - case READ_WRITE: - aclRights = OzoneAcl.OzoneACLRights.READ_WRITE; + case CLIENT_IP: + aclType = ACLIdentityType.CLIENT_IP; break; default: - throw new IllegalArgumentException("ACL right is not recognized"); + throw new IllegalArgumentException("ACL type is not recognized"); } + List aclRights = new ArrayList<>(); + for (OzoneAclRights acl : aclInfo.getRightsList()) { + aclRights.add(ACLType.valueOf(acl.name())); + } return new OzoneAcl(aclType, aclInfo.getName(), aclRights); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java index 074c07d69b374..2c47000a6cf2a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java @@ -20,6 +20,8 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.ozone.OzoneConsts; +import java.util.List; + /** * Public API for Ozone ACLs. Security providers providing support for Ozone * ACLs should implement this. @@ -84,7 +86,8 @@ public static ACLType getACLRight(String type) { case OzoneConsts.OZONE_ACL_NONE: return ACLType.NONE; default: - throw new IllegalArgumentException("ACL right is not recognized"); + throw new IllegalArgumentException(type + " ACL right is not " + + "recognized"); } } @@ -92,10 +95,18 @@ public static ACLType getACLRight(String type) { /** * Returns String representation of ACL rights. * - * @param acl ACLType + * @param acls ACLType * @return String representation of acl */ - public static String getACLRightsString(ACLType acl) { + public static String getACLString(List acls) { + StringBuffer sb = new StringBuffer(); + acls.forEach(acl -> { + sb.append(getAclString(acl)); + }); + return sb.toString(); + } + + public static String getAclString(ACLType acl) { switch (acl) { case READ: return OzoneConsts.OZONE_ACL_READ; @@ -129,7 +140,8 @@ enum ACLIdentityType { USER(OzoneConsts.OZONE_ACL_USER_TYPE), GROUP(OzoneConsts.OZONE_ACL_GROUP_TYPE), CLIENT_IP(OzoneConsts.OZONE_ACL_IP_TYPE), - WORLD(OzoneConsts.OZONE_ACL_WORLD_TYPE); + WORLD(OzoneConsts.OZONE_ACL_WORLD_TYPE), + ANONYMOUS(OzoneConsts.OZONE_ACL_ANONYMOUS_TYPE); @Override public String toString() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java new file mode 100644 index 0000000000000..9641eda18dac9 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.security.acl; + +import org.apache.hadoop.hdds.conf.Config; +import org.apache.hadoop.hdds.conf.ConfigGroup; +import org.apache.hadoop.hdds.conf.ConfigTag; +import org.apache.hadoop.hdds.conf.ConfigType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; + +/** + * Ozone ACL config pojo. + * */ +@ConfigGroup(prefix = "ozone.om") +public class OzoneAclConfig { + // OM Default user/group permissions + private ACLType userDefaultRights = ACLType.ALL; + private ACLType groupDefaultRights = ACLType.ALL; + + @Config(key = "user.rights", + defaultValue = "ALL", + type = ConfigType.STRING, + tags = {ConfigTag.OM, ConfigTag.SECURITY}, + description = "Default user permissions set for an object in " + + "OzoneManager." + ) + public void setUserDefaultRights(String userRights) { + this.userDefaultRights = ACLType.valueOf(userRights); + } + + @Config(key = "group.rights", + defaultValue = "ALL", + type = ConfigType.STRING, + tags = {ConfigTag.OM, ConfigTag.SECURITY}, + description = "Default group permissions set for an object in " + + "OzoneManager." + ) + public void setGroupDefaultRights(String groupRights) { + this.groupDefaultRights = ACLType.valueOf(groupRights); + } + + public ACLType getUserDefaultRights() { + return userDefaultRights; + } + + public ACLType getGroupDefaultRights() { + return groupDefaultRights; + } + +} diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index 50751ed2c4cc3..e82741bed5ec4 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -451,15 +451,24 @@ message OzoneAclInfo { USER = 1; GROUP = 2; WORLD = 3; + ANONYMOUS = 4; + CLIENT_IP = 5; } + enum OzoneAclRights { - READ = 1; - WRITE = 2; - READ_WRITE = 3; + CREATE = 1; + LIST = 2; + DELETE = 3; + READ = 4; + WRITE = 5; + READ_ACL = 6; + WRITE_ACL = 7; + ALL = 8; + NONE = 9; } required OzoneAclType type = 1; required string name = 2; - required OzoneAclRights rights = 3; + repeated OzoneAclRights rights = 3; } message CreateBucketRequest { diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java similarity index 57% rename from hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java rename to hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java index 03c45c501985d..2cd3d9ec98275 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java @@ -18,16 +18,20 @@ package org.apache.hadoop.ozone; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; + import org.junit.Test; +import java.util.Arrays; import java.util.HashMap; import java.util.Set; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.*; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; /** - * This class is to test acl stoarge and retreival in ozone store. + * This class is to test acl storage and retrieval in ozone store. */ public class TestOzoneAcls { @@ -39,8 +43,8 @@ public void testAclParse() { testMatrix.put("user:bilbo:r", Boolean.TRUE); testMatrix.put("user:bilbo:w", Boolean.TRUE); testMatrix.put("user:bilbo:rw", Boolean.TRUE); - testMatrix.put("user:bilbo:wr", Boolean.TRUE); - testMatrix.put(" user:bilbo:wr ", Boolean.TRUE); + testMatrix.put("user:bilbo:a", Boolean.TRUE); + testMatrix.put(" user:bilbo:a ", Boolean.TRUE); // ACLs makes no judgement on the quality of @@ -53,7 +57,16 @@ public void testAclParse() { testMatrix.put("", Boolean.FALSE); testMatrix.put(null, Boolean.FALSE); testMatrix.put(" user:bilbo:", Boolean.FALSE); - testMatrix.put(" user:bilbo:rx", Boolean.FALSE); + testMatrix.put(" user:bilbo:rx", Boolean.TRUE); + testMatrix.put(" user:bilbo:rwdlncxy", Boolean.TRUE); + testMatrix.put(" group:bilbo:rwdlncxy", Boolean.TRUE); + testMatrix.put(" world::rwdlncxy", Boolean.TRUE); + testMatrix.put(" user:bilbo:rncxy", Boolean.TRUE); + testMatrix.put(" group:bilbo:ncxy", Boolean.TRUE); + testMatrix.put(" world::ncxy", Boolean.TRUE); + testMatrix.put(" user:bilbo:rwcxy", Boolean.TRUE); + testMatrix.put(" group:bilbo:rwcxy", Boolean.TRUE); + testMatrix.put(" world::rwcxy", Boolean.TRUE); testMatrix.put(" user:bilbo:mk", Boolean.FALSE); testMatrix.put(" user::rw", Boolean.FALSE); testMatrix.put("user11:bilbo:rw", Boolean.FALSE); @@ -62,12 +75,12 @@ public void testAclParse() { testMatrix.put(" group:hobbit:r", Boolean.TRUE); testMatrix.put(" group:hobbit:w", Boolean.TRUE); testMatrix.put(" group:hobbit:rw", Boolean.TRUE); - testMatrix.put(" group:hobbit:wr", Boolean.TRUE); + testMatrix.put(" group:hobbit:a", Boolean.TRUE); testMatrix.put(" group:*:rw", Boolean.TRUE); testMatrix.put(" group:~!:rw", Boolean.TRUE); testMatrix.put(" group:hobbit:", Boolean.FALSE); - testMatrix.put(" group:hobbit:rx", Boolean.FALSE); + testMatrix.put(" group:hobbit:rx", Boolean.TRUE); testMatrix.put(" group:hobbit:mk", Boolean.FALSE); testMatrix.put(" group::", Boolean.FALSE); testMatrix.put(" group::rw", Boolean.FALSE); @@ -77,14 +90,14 @@ public void testAclParse() { testMatrix.put("JUNK group:hobbit:r", Boolean.FALSE); testMatrix.put("JUNK group:hobbit:w", Boolean.FALSE); testMatrix.put("JUNK group:hobbit:rw", Boolean.FALSE); - testMatrix.put("JUNK group:hobbit:wr", Boolean.FALSE); + testMatrix.put("JUNK group:hobbit:a", Boolean.FALSE); testMatrix.put("JUNK group:*:rw", Boolean.FALSE); testMatrix.put("JUNK group:~!:rw", Boolean.FALSE); testMatrix.put(" world::r", Boolean.TRUE); testMatrix.put(" world::w", Boolean.TRUE); testMatrix.put(" world::rw", Boolean.TRUE); - testMatrix.put(" world::wr", Boolean.TRUE); + testMatrix.put(" world::a", Boolean.TRUE); testMatrix.put(" world:bilbo:w", Boolean.FALSE); testMatrix.put(" world:bilbo:rw", Boolean.FALSE); @@ -97,7 +110,7 @@ public void testAclParse() { try { OzoneAcl.parseAcl(key); // should never get here since parseAcl will throw - fail("An exception was expected but did not happen."); + fail("An exception was expected but did not happen. Key: " + key); } catch (IllegalArgumentException e) { // nothing to do } @@ -109,33 +122,51 @@ public void testAclParse() { public void testAclValues() { OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); assertEquals(acl.getName(), "bilbo"); - assertEquals(OzoneAcl.OzoneACLRights.READ_WRITE, acl.getRights()); - assertEquals(OzoneAcl.OzoneACLType.USER, acl.getType()); + assertEquals(Arrays.asList(READ, WRITE), acl.getRights()); + assertEquals(ACLIdentityType.USER, acl.getType()); - acl = OzoneAcl.parseAcl("user:bilbo:wr"); + acl = OzoneAcl.parseAcl("user:bilbo:a"); assertEquals("bilbo", acl.getName()); - assertEquals(OzoneAcl.OzoneACLRights.READ_WRITE, acl.getRights()); - assertEquals(OzoneAcl.OzoneACLType.USER, acl.getType()); + assertEquals(Arrays.asList(ALL), acl.getRights()); + assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:r"); assertEquals("bilbo", acl.getName()); - assertEquals(OzoneAcl.OzoneACLRights.READ, acl.getRights()); - assertEquals(OzoneAcl.OzoneACLType.USER, acl.getType()); + assertEquals(Arrays.asList(READ), acl.getRights()); + assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:w"); assertEquals("bilbo", acl.getName()); - assertEquals(OzoneAcl.OzoneACLRights.WRITE, acl.getRights()); - assertEquals(OzoneAcl.OzoneACLType.USER, acl.getType()); + assertEquals(Arrays.asList(WRITE), acl.getRights()); + assertEquals(ACLIdentityType.USER, acl.getType()); - acl = OzoneAcl.parseAcl("group:hobbit:wr"); + acl = OzoneAcl.parseAcl("group:hobbit:a"); assertEquals(acl.getName(), "hobbit"); - assertEquals(OzoneAcl.OzoneACLRights.READ_WRITE, acl.getRights()); - assertEquals(OzoneAcl.OzoneACLType.GROUP, acl.getType()); + assertEquals(Arrays.asList(ALL), acl.getRights()); + assertEquals(ACLIdentityType.GROUP, acl.getType()); + + acl = OzoneAcl.parseAcl("world::a"); + assertEquals(acl.getName(), ""); + assertEquals(Arrays.asList(ALL), acl.getRights()); + assertEquals(ACLIdentityType.WORLD, acl.getType()); + + acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy"); + assertEquals(acl.getName(), "bilbo"); + assertEquals(Arrays.asList(READ, WRITE, DELETE, LIST, NONE, CREATE, + READ_ACL, WRITE_ACL), acl.getRights()); + assertEquals(ACLIdentityType.USER, acl.getType()); + + acl = OzoneAcl.parseAcl("group:hadoop:rwdlncxy"); + assertEquals(acl.getName(), "hadoop"); + assertEquals(Arrays.asList(READ, WRITE, DELETE, LIST, NONE, CREATE, + READ_ACL, WRITE_ACL), acl.getRights()); + assertEquals(ACLIdentityType.GROUP, acl.getType()); - acl = OzoneAcl.parseAcl("world::wr"); + acl = OzoneAcl.parseAcl("world::rwdlncxy"); assertEquals(acl.getName(), ""); - assertEquals(OzoneAcl.OzoneACLRights.READ_WRITE, acl.getRights()); - assertEquals(OzoneAcl.OzoneACLType.WORLD, acl.getType()); + assertEquals(Arrays.asList(READ, WRITE, DELETE, LIST, NONE, CREATE, + READ_ACL, WRITE_ACL), acl.getRights()); + assertEquals(ACLIdentityType.WORLD, acl.getType()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index bd496d0803ffd..0d32f83250659 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -82,6 +82,8 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.ozone.s3.util.OzoneS3Util; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.Time; @@ -260,7 +262,7 @@ public void testCreateBucketWithMetadata() } - + @Test public void testCreateBucket() throws IOException, OzoneException { @@ -420,8 +422,8 @@ public void testCreateBucketWithAcls() throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, "test", - OzoneAcl.OzoneACLRights.READ_WRITE); + OzoneAcl userAcl = new OzoneAcl(ACLIdentityType.USER, "test", + ACLType.READ); List acls = new ArrayList<>(); acls.add(userAcl); store.createVolume(volumeName); @@ -439,8 +441,8 @@ public void testCreateBucketWithAllArgument() throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, "test", - OzoneAcl.OzoneACLRights.READ_WRITE); + OzoneAcl userAcl = new OzoneAcl(ACLIdentityType.USER, "test", + ACLType.ALL); List acls = new ArrayList<>(); acls.add(userAcl); store.createVolume(volumeName); @@ -480,9 +482,7 @@ public void testAddBucketAcl() OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); List acls = new ArrayList<>(); - acls.add(new OzoneAcl( - OzoneAcl.OzoneACLType.USER, "test", - OzoneAcl.OzoneACLRights.READ_WRITE)); + acls.add(new OzoneAcl(ACLIdentityType.USER, "test", ACLType.ALL)); OzoneBucket bucket = volume.getBucket(bucketName); bucket.addAcls(acls); OzoneBucket newBucket = volume.getBucket(bucketName); @@ -495,8 +495,8 @@ public void testRemoveBucketAcl() throws IOException, OzoneException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, "test", - OzoneAcl.OzoneACLRights.READ_WRITE); + OzoneAcl userAcl = new OzoneAcl(ACLIdentityType.USER, "test", + ACLType.ALL); List acls = new ArrayList<>(); acls.add(userAcl); store.createVolume(volumeName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java index 03843bbc25c62..6d4702fa9340b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java @@ -55,6 +55,8 @@ import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.util.OzoneVersionInfo; import org.apache.hadoop.ozone.web.handlers.BucketArgs; import org.apache.hadoop.ozone.web.handlers.KeyArgs; @@ -356,30 +358,29 @@ public void testAccessVolume() throws IOException, OzoneException { createVolumeArgs.setGroups(groupName); storageHandler.createVolume(createVolumeArgs); - OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, userName, - OzoneAcl.OzoneACLRights.READ_WRITE); + OzoneAcl userAcl = new OzoneAcl(ACLIdentityType.USER, userName, + ACLType.READ); Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, userAcl)); - OzoneAcl group = new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, groupName[0], - OzoneAcl.OzoneACLRights.READ); + OzoneAcl group = new OzoneAcl(ACLIdentityType.GROUP, groupName[0], + ACLType.READ); Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, group)); // Create a different user and access should fail String falseUserName = "user" + RandomStringUtils.randomNumeric(5); OzoneAcl falseUserAcl = - new OzoneAcl(OzoneAcl.OzoneACLType.USER, falseUserName, - OzoneAcl.OzoneACLRights.READ_WRITE); + new OzoneAcl(ACLIdentityType.USER, falseUserName, + ACLType.ALL); Assert.assertFalse(storageHandler .checkVolumeAccess(volumeName, falseUserAcl)); // Checking access with user name and Group Type should fail - OzoneAcl falseGroupAcl = new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, userName, - OzoneAcl.OzoneACLRights.READ_WRITE); + OzoneAcl falseGroupAcl = new OzoneAcl(ACLIdentityType.GROUP, userName, + ACLType.ALL); Assert.assertFalse(storageHandler .checkVolumeAccess(volumeName, falseGroupAcl)); // Access for acl type world should also fail OzoneAcl worldAcl = - new OzoneAcl(OzoneAcl.OzoneACLType.WORLD, "", - OzoneAcl.OzoneACLRights.READ); + new OzoneAcl(ACLIdentityType.WORLD, "", ACLType.READ); Assert.assertFalse(storageHandler.checkVolumeAccess(volumeName, worldAcl)); Assert.assertEquals(0, omMetrics.getNumVolumeCheckAccessFails()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java index d3a19c39a4673..e8fa1245a09c8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java @@ -44,8 +44,6 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneAcl.OzoneACLRights; -import org.apache.hadoop.ozone.OzoneAcl.OzoneACLType; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneKey; @@ -59,6 +57,8 @@ import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.web.ozShell.OzoneShell; import org.apache.hadoop.ozone.web.ozShell.Shell; import org.apache.hadoop.ozone.web.request.OzoneQuota; @@ -744,8 +744,9 @@ public void testUpdateBucket() throws Exception { OzoneAcl acl = bucket.getAcls().get(aclSize); assertTrue(acl.getName().equals("frodo") - && acl.getType() == OzoneACLType.USER - && acl.getRights()== OzoneACLRights.READ_WRITE); + && acl.getType() == ACLIdentityType.USER + && acl.getRights().contains(ACLType.READ) + && acl.getRights().contains(ACLType.WRITE)); args = new String[] {"bucket", "update", url + "/" + vol.getName() + "/" + bucketName, "--removeAcl", @@ -756,8 +757,8 @@ public void testUpdateBucket() throws Exception { acl = bucket.getAcls().get(aclSize); assertEquals(1 + aclSize, bucket.getAcls().size()); assertTrue(acl.getName().equals("samwise") - && acl.getType() == OzoneACLType.GROUP - && acl.getRights()== OzoneACLRights.READ); + && acl.getType() == ACLIdentityType.GROUP + && acl.getRights().contains(ACLType.READ)); // test update bucket for a non-exist bucket args = new String[] {"bucket", "update", diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java index d0ab5e210f1cf..42c7238427db3 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java @@ -44,8 +44,10 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.web.request.OzoneQuota; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.XceiverClientManager; @@ -80,8 +82,8 @@ public final class DistributedStorageHandler implements StorageHandler { private final OzoneManagerProtocol ozoneManagerClient; private final XceiverClientManager xceiverClientManager; - private final OzoneAcl.OzoneACLRights userRights; - private final OzoneAcl.OzoneACLRights groupRights; + private final ACLType userRights; + private final ACLType groupRights; private int chunkSize; private final long streamBufferFlushSize; private final long streamBufferMaxSize; @@ -109,10 +111,10 @@ public DistributedStorageHandler(OzoneConfiguration conf, chunkSize = (int)conf.getStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES); - userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS, - OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT); - groupRights = conf.getEnum(OMConfigKeys.OZONE_OM_GROUP_RIGHTS, - OMConfigKeys.OZONE_OM_GROUP_RIGHTS_DEFAULT); + // Get default acl rights for user and group. + OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); + this.userRights = aclConfig.getUserDefaultRights(); + this.groupRights = aclConfig.getGroupDefaultRights(); if(chunkSize > OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) { LOG.warn("The chunk size ({}) is not allowed to be more than" + " the maximum size ({})," @@ -176,8 +178,7 @@ public void createVolume(VolumeArgs args) throws IOException, OzoneException { long quota = args.getQuota() == null ? OzoneConsts.MAX_QUOTA_IN_BYTES : args.getQuota().sizeInBytes(); OzoneAcl userAcl = - new OzoneAcl(OzoneAcl.OzoneACLType.USER, - args.getUserName(), userRights); + new OzoneAcl(ACLIdentityType.USER, args.getUserName(), userRights); OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); builder.setAdminName(args.getAdminName()) .setOwnerName(args.getUserName()) @@ -187,7 +188,7 @@ public void createVolume(VolumeArgs args) throws IOException, OzoneException { if (args.getGroups() != null) { for (String group : args.getGroups()) { OzoneAcl groupAcl = - new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights); + new OzoneAcl(ACLIdentityType.GROUP, group, groupRights); builder.addOzoneAcls(OMPBHelper.convertOzoneAcl(groupAcl)); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java index 872d7b6674022..7b17550a12775 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmDeleteVolumeResponse; @@ -484,7 +485,8 @@ public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) } catch (IOException ex) { if (!(ex instanceof OMException)) { LOG.error("Check volume access failed for volume:{} user:{} rights:{}", - volume, userAcl.getName(), userAcl.getRights(), ex); + volume, userAcl.getName(), + StringUtils.join(userAcl.getRightsList(), ","), ex); } throw ex; } finally { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java index 7fa72fd7d0adc..a063d11d24955 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java @@ -42,6 +42,8 @@ import org.mockito.Mockito; import org.mockito.runners.MockitoJUnitRunner; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.*; + /** * Tests BucketManagerImpl, mocks OMMetadataManager for testing. */ @@ -217,8 +219,8 @@ public void testSetBucketPropertyAddACL() throws Exception { OmMetadataManagerImpl metaMgr = createSampleVol(); List acls = new LinkedList<>(); - OzoneAcl ozoneAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, - "root", OzoneAcl.OzoneACLRights.READ); + OzoneAcl ozoneAcl = new OzoneAcl(ACLIdentityType.USER, + "root", ACLType.READ); acls.add(ozoneAcl); BucketManager bucketManager = new BucketManagerImpl(metaMgr); OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() @@ -235,8 +237,8 @@ public void testSetBucketPropertyAddACL() throws Exception { Assert.assertEquals("bucketOne", result.getBucketName()); Assert.assertEquals(1, result.getAcls().size()); List addAcls = new LinkedList<>(); - OzoneAcl newAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, - "ozone", OzoneAcl.OzoneACLRights.READ); + OzoneAcl newAcl = new OzoneAcl(ACLIdentityType.USER, + "ozone", ACLType.READ); addAcls.add(newAcl); OmBucketArgs bucketArgs = OmBucketArgs.newBuilder() .setVolumeName("sampleVol") @@ -256,10 +258,10 @@ public void testSetBucketPropertyRemoveACL() throws Exception { OmMetadataManagerImpl metaMgr = createSampleVol(); List acls = new LinkedList<>(); - OzoneAcl aclOne = new OzoneAcl(OzoneAcl.OzoneACLType.USER, - "root", OzoneAcl.OzoneACLRights.READ); - OzoneAcl aclTwo = new OzoneAcl(OzoneAcl.OzoneACLType.USER, - "ozone", OzoneAcl.OzoneACLRights.READ); + OzoneAcl aclOne = new OzoneAcl(ACLIdentityType.USER, + "root", ACLType.READ); + OzoneAcl aclTwo = new OzoneAcl(ACLIdentityType.USER, + "ozone", ACLType.READ); acls.add(aclOne); acls.add(aclTwo); BucketManager bucketManager = new BucketManagerImpl(metaMgr); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java index 0c7b7ed2632d7..d4c982dcf95e8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java @@ -378,7 +378,7 @@ private void insertOMDB(Connection conn, KeyType type, String keyName, for (OzoneAclInfo aclInfo : volumeInfo.getVolumeAclsList()) { String insertAclInfo = String.format(INSERT_ACL_INFO, adminName, ownerName, volumeName, - aclInfo.getType(), aclInfo.getName(), aclInfo.getRights()); + aclInfo.getType(), aclInfo.getName(), aclInfo.getRightsList()); executeSQL(conn, insertAclInfo); } break; From 77c49f29632ce4e642ca3a4929d1b2518e280140 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Tue, 21 May 2019 18:41:30 -0700 Subject: [PATCH 0012/1308] HDDS-1406. Avoid usage of commonPool in RatisPipelineUtils. (#714) --- .../hdds/scm/pipeline/PipelineFactory.java | 4 + .../hdds/scm/pipeline/PipelineProvider.java | 1 + .../scm/pipeline/RatisPipelineProvider.java | 117 +++++++++++++++++- .../hdds/scm/pipeline/RatisPipelineUtils.java | 73 +---------- .../hdds/scm/pipeline/SCMPipelineManager.java | 5 +- .../scm/pipeline/SimplePipelineProvider.java | 5 + .../pipeline/MockRatisPipelineProvider.java | 5 + ...=> TestRatisPipelineCreateAndDestory.java} | 11 +- 8 files changed, 144 insertions(+), 77 deletions(-) rename hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/{TestRatisPipelineUtils.java => TestRatisPipelineCreateAndDestory.java} (92%) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java index 89349761bfcad..cec688c1a8e36 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java @@ -61,4 +61,8 @@ public Pipeline create(ReplicationType type, ReplicationFactor factor, List nodes) { return providers.get(type).create(factor, nodes); } + + public void shutdown() { + providers.values().forEach(provider -> provider.shutdown()); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java index bb16533751177..a0ce216267237 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java @@ -33,4 +33,5 @@ public interface PipelineProvider { Pipeline create(ReplicationFactor factor, List nodes); + void shutdown(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index df21420be1d67..d3b02e6253aaf 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -24,17 +24,39 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState; +import org.apache.hadoop.hdds.security.x509.SecurityConfig; +import org.apache.hadoop.io.MultipleIOException; +import org.apache.ratis.RatisHelper; +import org.apache.ratis.client.RaftClient; +import org.apache.ratis.grpc.GrpcTlsConfig; +import org.apache.ratis.protocol.RaftClientReply; +import org.apache.ratis.protocol.RaftGroup; +import org.apache.ratis.protocol.RaftPeer; +import org.apache.ratis.retry.RetryPolicy; +import org.apache.ratis.rpc.SupportedRpcType; +import org.apache.ratis.util.TimeDuration; +import org.apache.ratis.util.function.CheckedBiConsumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ForkJoinPool; +import java.util.concurrent.ForkJoinWorkerThread; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; /** @@ -42,10 +64,28 @@ */ public class RatisPipelineProvider implements PipelineProvider { + private static final Logger LOG = + LoggerFactory.getLogger(RatisPipelineProvider.class); + private final NodeManager nodeManager; private final PipelineStateManager stateManager; private final Configuration conf; + // Set parallelism at 3, as now in Ratis we create 1 and 3 node pipelines. + private final int parallelismForPool = 3; + + private final ForkJoinPool.ForkJoinWorkerThreadFactory factory = + (pool -> { + final ForkJoinWorkerThread worker = ForkJoinPool. + defaultForkJoinWorkerThreadFactory.newThread(pool); + worker.setName("RATISCREATEPIPELINE" + worker.getPoolIndex()); + return worker; + }); + + private final ForkJoinPool forkJoinPool = new ForkJoinPool( + parallelismForPool, factory, null, false); + + RatisPipelineProvider(NodeManager nodeManager, PipelineStateManager stateManager, Configuration conf) { this.nodeManager = nodeManager; @@ -53,6 +93,7 @@ public class RatisPipelineProvider implements PipelineProvider { this.conf = conf; } + /** * Create pluggable container placement policy implementation instance. * @@ -133,7 +174,81 @@ public Pipeline create(ReplicationFactor factor, .build(); } + + @Override + public void shutdown() { + forkJoinPool.shutdownNow(); + try { + forkJoinPool.awaitTermination(60, TimeUnit.SECONDS); + } catch (Exception e) { + LOG.error("Unexpected exception occurred during shutdown of " + + "RatisPipelineProvider", e); + } + } + protected void initializePipeline(Pipeline pipeline) throws IOException { - RatisPipelineUtils.createPipeline(pipeline, conf); + final RaftGroup group = RatisHelper.newRaftGroup(pipeline); + LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group); + callRatisRpc(pipeline.getNodes(), + (raftClient, peer) -> { + RaftClientReply reply = raftClient.groupAdd(group, peer.getId()); + if (reply == null || !reply.isSuccess()) { + String msg = "Pipeline initialization failed for pipeline:" + + pipeline.getId() + " node:" + peer.getId(); + LOG.error(msg); + throw new IOException(msg); + } + }); + } + + private void callRatisRpc(List datanodes, + CheckedBiConsumer< RaftClient, RaftPeer, IOException> rpc) + throws IOException { + if (datanodes.isEmpty()) { + return; + } + + final String rpcType = conf + .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); + final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(conf); + final List< IOException > exceptions = + Collections.synchronizedList(new ArrayList<>()); + final int maxOutstandingRequests = + HddsClientUtils.getMaxOutstandingRequests(conf); + final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new + SecurityConfig(conf)); + final TimeDuration requestTimeout = + RatisHelper.getClientRequestTimeout(conf); + try { + forkJoinPool.submit(() -> { + datanodes.parallelStream().forEach(d -> { + final RaftPeer p = RatisHelper.toRaftPeer(d); + try (RaftClient client = RatisHelper + .newRaftClient(SupportedRpcType.valueOfIgnoreCase(rpcType), p, + retryPolicy, maxOutstandingRequests, tlsConfig, + requestTimeout)) { + rpc.accept(client, p); + } catch (IOException ioe) { + String errMsg = + "Failed invoke Ratis rpc " + rpc + " for " + d.getUuid(); + LOG.error(errMsg, ioe); + exceptions.add(new IOException(errMsg, ioe)); + } + }); + }).get(); + } catch (ExecutionException | RejectedExecutionException ex) { + LOG.error(ex.getClass().getName() + " exception occurred during " + + "createPipeline", ex); + throw new IOException(ex.getClass().getName() + " exception occurred " + + "during createPipeline", ex); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new IOException("Interrupt exception occurred during " + + "createPipeline", ex); + } + if (!exceptions.isEmpty()) { + throw MultipleIOException.createIOException(exceptions); + } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java index 0af34fb8563ca..6d2f08b9ca50a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java @@ -17,66 +17,37 @@ */ package org.apache.hadoop.hdds.scm.pipeline; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.io.MultipleIOException; import org.apache.ratis.RatisHelper; import org.apache.ratis.client.RaftClient; import org.apache.ratis.grpc.GrpcTlsConfig; -import org.apache.ratis.protocol.RaftClientReply; import org.apache.ratis.protocol.RaftGroup; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.retry.RetryPolicy; import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.util.TimeDuration; -import org.apache.ratis.util.function.CheckedBiConsumer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; /** * Utility class for Ratis pipelines. Contains methods to create and destroy * ratis pipelines. */ -final class RatisPipelineUtils { +public final class RatisPipelineUtils { private static final Logger LOG = LoggerFactory.getLogger(RatisPipelineUtils.class); private RatisPipelineUtils() { } - - /** - * Sends ratis command to create pipeline on all the datanodes. - * - * @param pipeline - Pipeline to be created - * @param ozoneConf - Ozone Confinuration - * @throws IOException if creation fails - */ - public static void createPipeline(Pipeline pipeline, Configuration ozoneConf) - throws IOException { - final RaftGroup group = RatisHelper.newRaftGroup(pipeline); - LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group); - callRatisRpc(pipeline.getNodes(), ozoneConf, - (raftClient, peer) -> { - RaftClientReply reply = raftClient.groupAdd(group, peer.getId()); - if (reply == null || !reply.isSuccess()) { - String msg = "Pipeline initialization failed for pipeline:" - + pipeline.getId() + " node:" + peer.getId(); - LOG.error(msg); - throw new IOException(msg); - } - }); - } - /** * Removes pipeline from SCM. Sends ratis command to destroy pipeline on all * the datanodes. @@ -125,42 +96,4 @@ static void destroyPipeline(DatanodeDetails dn, PipelineID pipelineID, client .groupRemove(RaftGroupId.valueOf(pipelineID.getId()), true, p.getId()); } - - private static void callRatisRpc(List datanodes, - Configuration ozoneConf, - CheckedBiConsumer rpc) - throws IOException { - if (datanodes.isEmpty()) { - return; - } - - final String rpcType = ozoneConf - .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); - final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf); - final List exceptions = - Collections.synchronizedList(new ArrayList<>()); - final int maxOutstandingRequests = - HddsClientUtils.getMaxOutstandingRequests(ozoneConf); - final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new - SecurityConfig(ozoneConf)); - final TimeDuration requestTimeout = - RatisHelper.getClientRequestTimeout(ozoneConf); - datanodes.parallelStream().forEach(d -> { - final RaftPeer p = RatisHelper.toRaftPeer(d); - try (RaftClient client = RatisHelper - .newRaftClient(SupportedRpcType.valueOfIgnoreCase(rpcType), p, - retryPolicy, maxOutstandingRequests, tlsConfig, requestTimeout)) { - rpc.accept(client, p); - } catch (IOException ioe) { - String errMsg = - "Failed invoke Ratis rpc " + rpc + " for " + d.getUuid(); - LOG.error(errMsg, ioe); - exceptions.add(new IOException(errMsg, ioe)); - } - }); - if (!exceptions.isEmpty()) { - throw MultipleIOException.createIOException(exceptions); - } - } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index c72a52886c825..bce396b6a56b9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -87,7 +87,8 @@ public SCMPipelineManager(Configuration conf, NodeManager nodeManager, this.lock = new ReentrantReadWriteLock(); this.conf = conf; this.stateManager = new PipelineStateManager(conf); - this.pipelineFactory = new PipelineFactory(nodeManager, stateManager, conf); + this.pipelineFactory = new PipelineFactory(nodeManager, stateManager, + conf); // TODO: See if thread priority needs to be set for these threads scheduler = new Scheduler("RatisPipelineUtilsThread", false, 1); this.backgroundPipelineCreator = @@ -419,5 +420,7 @@ public void close() throws IOException { if(metrics != null) { metrics.unRegister(); } + // shutdown pipeline provider. + pipelineFactory.shutdown(); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java index 3e42df332682c..ab98dfa3ed7b5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java @@ -72,4 +72,9 @@ public Pipeline create(ReplicationFactor factor, .setNodes(nodes) .build(); } + + @Override + public void shutdown() { + // Do nothing. + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java index 22828046910e5..32784a31deac3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java @@ -37,4 +37,9 @@ public MockRatisPipelineProvider(NodeManager nodeManager, protected void initializePipeline(Pipeline pipeline) throws IOException { // do nothing as the datanodes do not exists } + + @Override + public void shutdown() { + // Do nothing. + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestory.java similarity index 92% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineUtils.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestory.java index b653e7a2b9284..9fd8aae0f0f13 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestory.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.test.GenericTestUtils; @@ -40,7 +39,7 @@ /** * Tests for RatisPipelineUtils. */ -public class TestRatisPipelineUtils { +public class TestRatisPipelineCreateAndDestory { private static MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); @@ -98,11 +97,13 @@ public void testPipelineCreationOnNodeRestart() throws Exception { // try creating another pipeline now try { - RatisPipelineUtils.createPipeline(pipelines.get(0), conf); + pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); Assert.fail("pipeline creation should fail after shutting down pipeline"); } catch (IOException ioe) { - // in case the pipeline creation fails, MultipleIOException is thrown - Assert.assertTrue(ioe instanceof MultipleIOException); + // As now all datanodes are shutdown, they move to stale state, there + // will be no sufficient datanodes to create the pipeline. + Assert.assertTrue(ioe instanceof InsufficientDatanodesException); } // make sure pipelines is destroyed From 456bb8a3e01bb41b02cc64a5cff149446668afab Mon Sep 17 00:00:00 2001 From: sdeka Date: Wed, 22 May 2019 09:34:21 +0530 Subject: [PATCH 0013/1308] verifyContainerData also does fixup, renamed. Added a Javadoc comment, both as per review discussion --- .../ozone/container/ozoneimpl/ContainerReader.java | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 08a8f5d47f683..448a02f5e0dce 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -165,14 +165,21 @@ private void verifyContainerFile(long containerID, File containerFile) { "Skipping loading of this container.", containerFile); return; } - verifyContainerData(containerData); + verifyAndFixupContainerData(containerData); } catch (IOException ex) { LOG.error("Failed to parse ContainerFile for ContainerID: {}", containerID, ex); } } - public void verifyContainerData(ContainerData containerData) + /** + * verify ContainerData loaded from disk and fix-up stale members. + * Specifically blockCommitSequenceId, delete related metadata + * and bytesUsed + * @param containerData + * @throws IOException + */ + public void verifyAndFixupContainerData(ContainerData containerData) throws IOException { switch (containerData.getContainerType()) { case KeyValueContainer: From 9dff6eff819a832a5918d786675f42aa243a13e8 Mon Sep 17 00:00:00 2001 From: Siyao Meng Date: Wed, 22 May 2019 13:20:46 +0900 Subject: [PATCH 0014/1308] HDFS-14507. Document -blockingDecommission option for hdfs dfsadmin -listOpenFiles Signed-off-by: Takanobu Asanuma --- .../hadoop-hdfs/src/site/markdown/HDFSCommands.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md index 0ba9b9423776b..eba81afe3bf7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md @@ -412,7 +412,7 @@ Usage: | `-getDatanodeInfo` \ | Get the information about the given datanode. See [Rolling Upgrade document](./HdfsRollingUpgrade.html#dfsadmin_-getDatanodeInfo) for the detail. | | `-metasave` filename | Save Namenode's primary data structures to *filename* in the directory specified by hadoop.log.dir property. *filename* is overwritten if it exists. *filename* will contain one line for each of the following
    1. Datanodes heart beating with Namenode
    2. Blocks waiting to be replicated
    3. Blocks currently being replicated
    4. Blocks waiting to be deleted | | `-triggerBlockReport` `[-incremental]` \ | Trigger a block report for the given datanode. If 'incremental' is specified, it will be otherwise, it will be a full block report. | -| `-listOpenFiles` `[-blockingDecommission]` `[-path ]` | List all open files currently managed by the NameNode along with client name and client machine accessing them. Open files list will be filtered by given type and path. | +| `-listOpenFiles` `[-blockingDecommission]` `[-path ]` | List all open files currently managed by the NameNode along with client name and client machine accessing them. Open files list will be filtered by given type and path. Add -blockingDecommission option if you only want to list open files that are blocking the DataNode decommissioning. | | `-help` [cmd] | Displays help for the given command or all commands if none is specified. | Runs a HDFS dfsadmin client. From 67f9a7b165edecbec7c8063758202be4d8cff0f5 Mon Sep 17 00:00:00 2001 From: Wanqiang Ji Date: Wed, 15 May 2019 19:54:41 +0800 Subject: [PATCH 0015/1308] MAPREDUCE-7205. Treat container scheduler kill exit code as a task attempt killing event. This closes #821 Signed-off-by: Akira Ajisaka --- .../v2/app/rm/RMContainerAllocator.java | 20 ++++++++++------- .../v2/app/rm/TestRMContainerAllocator.java | 22 +++++++++++++++++++ 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java index e459cb52f05ca..a0a4def86343e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java @@ -968,16 +968,20 @@ private void updateAMRMToken(Token token) throws IOException { @VisibleForTesting public TaskAttemptEvent createContainerFinishedEvent(ContainerStatus cont, - TaskAttemptId attemptID) { - if (cont.getExitStatus() == ContainerExitStatus.ABORTED - || cont.getExitStatus() == ContainerExitStatus.PREEMPTED) { - // killed by framework - return new TaskAttemptEvent(attemptID, - TaskAttemptEventType.TA_KILL); - } else { - return new TaskAttemptEvent(attemptID, + TaskAttemptId attemptId) { + TaskAttemptEvent event; + switch (cont.getExitStatus()) { + case ContainerExitStatus.ABORTED: + case ContainerExitStatus.PREEMPTED: + case ContainerExitStatus.KILLED_BY_CONTAINER_SCHEDULER: + // killed by YARN + event = new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL); + break; + default: + event = new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_COMPLETED); } + return event; } @SuppressWarnings("unchecked") diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java index f3ac950e9efcb..439be485a9fa3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java @@ -2430,6 +2430,8 @@ public void testCompletedContainerEvent() { ApplicationId applicationId = ApplicationId.newInstance(1, 1); ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, 1); + + // ABORTED ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, 1); ContainerStatus status = ContainerStatus.newInstance( @@ -2448,6 +2450,7 @@ public void testCompletedContainerEvent() { abortedStatus, attemptId); Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent.getType()); + // PREEMPTED ContainerId containerId2 = ContainerId.newContainerId(applicationAttemptId, 2); ContainerStatus status2 = ContainerStatus.newInstance(containerId2, @@ -2464,6 +2467,25 @@ public void testCompletedContainerEvent() { TaskAttemptEvent abortedEvent2 = allocator.createContainerFinishedEvent( preemptedStatus, attemptId); Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent2.getType()); + + // KILLED_BY_CONTAINER_SCHEDULER + ContainerId containerId3 = + ContainerId.newContainerId(applicationAttemptId, 3); + ContainerStatus status3 = ContainerStatus.newInstance(containerId3, + ContainerState.RUNNING, "", 0); + + ContainerStatus killedByContainerSchedulerStatus = + ContainerStatus.newInstance(containerId3, ContainerState.RUNNING, "", + ContainerExitStatus.KILLED_BY_CONTAINER_SCHEDULER); + + TaskAttemptEvent event3 = allocator.createContainerFinishedEvent(status3, + attemptId); + Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED, + event3.getType()); + + TaskAttemptEvent abortedEvent3 = allocator.createContainerFinishedEvent( + killedByContainerSchedulerStatus, attemptId); + Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent3.getType()); } @Test From 2fc6f8599a64bceb19e789c55012ddc42ba590bf Mon Sep 17 00:00:00 2001 From: Mukul Kumar Singh Date: Wed, 22 May 2019 17:18:40 +0530 Subject: [PATCH 0016/1308] HDDS-1449. JVM Exit in datanode while committing a key. Contributed by Mukul Kumar Singh. (#825) --- .../DeleteBlocksCommandHandler.java | 90 ++++---- .../common/utils/ContainerCache.java | 87 +++++-- .../keyvalue/KeyValueBlockIterator.java | 15 +- .../container/keyvalue/KeyValueContainer.java | 13 +- .../keyvalue/KeyValueContainerCheck.java | 61 ++--- .../keyvalue/helpers/BlockUtils.java | 4 +- .../helpers/KeyValueContainerUtil.java | 36 +-- .../keyvalue/impl/BlockManagerImpl.java | 205 ++++++++--------- .../background/BlockDeletingService.java | 116 +++++----- .../container/ozoneimpl/ContainerReader.java | 47 ++-- .../keyvalue/TestKeyValueBlockIterator.java | 212 +++++++++--------- .../keyvalue/TestKeyValueContainer.java | 44 ++-- .../keyvalue/TestKeyValueContainerCheck.java | 88 ++++---- .../TestStorageContainerManagerHelper.java | 14 +- .../rpc/TestOzoneRpcClientAbstract.java | 70 +++--- .../common/TestBlockDeletingService.java | 165 +++++++------- .../common/impl/TestContainerPersistence.java | 4 +- .../commandhandler/TestBlockDeletion.java | 28 ++- .../TestCloseContainerByPipeline.java | 10 +- 19 files changed, 711 insertions(+), 598 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index aa63fb48f4de2..966452e105be3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -48,7 +48,7 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.BatchOperation; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -198,52 +198,54 @@ private void deleteKeyValueContainerBlocks( } int newDeletionBlocks = 0; - MetadataStore containerDB = BlockUtils.getDB(containerData, conf); - for (Long blk : delTX.getLocalIDList()) { - BatchOperation batch = new BatchOperation(); - byte[] blkBytes = Longs.toByteArray(blk); - byte[] blkInfo = containerDB.get(blkBytes); - if (blkInfo != null) { - byte[] deletingKeyBytes = - DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk); - byte[] deletedKeyBytes = - DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk); - if (containerDB.get(deletingKeyBytes) != null - || containerDB.get(deletedKeyBytes) != null) { - LOG.debug(String.format( - "Ignoring delete for block %d in container %d." - + " Entry already added.", blk, containerId)); - continue; - } - // Found the block in container db, - // use an atomic update to change its state to deleting. - batch.put(deletingKeyBytes, blkInfo); - batch.delete(blkBytes); - try { - containerDB.writeBatch(batch); - newDeletionBlocks++; - LOG.debug("Transited Block {} to DELETING state in container {}", - blk, containerId); - } catch (IOException e) { - // if some blocks failed to delete, we fail this TX, - // without sending this ACK to SCM, SCM will resend the TX - // with a certain number of retries. - throw new IOException( - "Failed to delete blocks for TXID = " + delTX.getTxID(), e); + try(ReferenceCountedDB containerDB = + BlockUtils.getDB(containerData, conf)) { + for (Long blk : delTX.getLocalIDList()) { + BatchOperation batch = new BatchOperation(); + byte[] blkBytes = Longs.toByteArray(blk); + byte[] blkInfo = containerDB.getStore().get(blkBytes); + if (blkInfo != null) { + byte[] deletingKeyBytes = + DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk); + byte[] deletedKeyBytes = + DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk); + if (containerDB.getStore().get(deletingKeyBytes) != null + || containerDB.getStore().get(deletedKeyBytes) != null) { + LOG.debug(String.format( + "Ignoring delete for block %d in container %d." + + " Entry already added.", blk, containerId)); + continue; + } + // Found the block in container db, + // use an atomic update to change its state to deleting. + batch.put(deletingKeyBytes, blkInfo); + batch.delete(blkBytes); + try { + containerDB.getStore().writeBatch(batch); + newDeletionBlocks++; + LOG.debug("Transited Block {} to DELETING state in container {}", + blk, containerId); + } catch (IOException e) { + // if some blocks failed to delete, we fail this TX, + // without sending this ACK to SCM, SCM will resend the TX + // with a certain number of retries. + throw new IOException( + "Failed to delete blocks for TXID = " + delTX.getTxID(), e); + } + } else { + LOG.debug("Block {} not found or already under deletion in" + + " container {}, skip deleting it.", blk, containerId); } - } else { - LOG.debug("Block {} not found or already under deletion in" - + " container {}, skip deleting it.", blk, containerId); } - } - containerDB - .put(DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX), - Longs.toByteArray(delTX.getTxID())); - containerData - .updateDeleteTransactionId(delTX.getTxID()); - // update pending deletion blocks count in in-memory container status - containerData.incrPendingDeletionBlocks(newDeletionBlocks); + containerDB.getStore() + .put(DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX), + Longs.toByteArray(delTX.getTxID())); + containerData + .updateDeleteTransactionId(delTX.getTxID()); + // update pending deletion blocks count in in-memory container status + containerData.incrPendingDeletionBlocks(newDeletionBlocks); + } } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java index 25d1bdf291817..c15bef0c0cfd3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java @@ -28,8 +28,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.Closeable; import java.io.File; import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -92,8 +95,8 @@ public void shutdownCache() { MapIterator iterator = cache.mapIterator(); while (iterator.hasNext()) { iterator.next(); - MetadataStore db = (MetadataStore) iterator.getValue(); - closeDB((String)iterator.getKey(), db); + ReferenceCountedDB db = (ReferenceCountedDB) iterator.getValue(); + db.setEvicted(true); } // reset the cache cache.clear(); @@ -107,11 +110,11 @@ public void shutdownCache() { */ @Override protected boolean removeLRU(LinkEntry entry) { - MetadataStore db = (MetadataStore) entry.getValue(); + ReferenceCountedDB db = (ReferenceCountedDB) entry.getValue(); String dbFile = (String)entry.getKey(); lock.lock(); try { - closeDB(dbFile, db); + db.setEvicted(false); return true; } catch (Exception e) { LOG.error("Eviction for db:{} failed", dbFile, e); @@ -128,26 +131,30 @@ protected boolean removeLRU(LinkEntry entry) { * @param containerDBType - DB type of the container. * @param containerDBPath - DB path of the container. * @param conf - Hadoop Configuration. - * @return MetadataStore. + * @return ReferenceCountedDB. */ - public MetadataStore getDB(long containerID, String containerDBType, + public ReferenceCountedDB getDB(long containerID, String containerDBType, String containerDBPath, Configuration conf) throws IOException { Preconditions.checkState(containerID >= 0, "Container ID cannot be negative."); lock.lock(); try { - MetadataStore db = (MetadataStore) this.get(containerDBPath); + ReferenceCountedDB db = (ReferenceCountedDB) this.get(containerDBPath); if (db == null) { - db = MetadataStoreBuilder.newBuilder() + MetadataStore metadataStore = + MetadataStoreBuilder.newBuilder() .setDbFile(new File(containerDBPath)) .setCreateIfMissing(false) .setConf(conf) .setDBType(containerDBType) .build(); + db = new ReferenceCountedDB(metadataStore, containerDBPath); this.put(containerDBPath, db); } + // increment the reference before returning the object + db.incrementReference(); return db; } catch (Exception e) { LOG.error("Error opening DB. Container:{} ContainerPath:{}", @@ -161,16 +168,70 @@ public MetadataStore getDB(long containerID, String containerDBType, /** * Remove a DB handler from cache. * - * @param containerPath - path of the container db file. + * @param containerDBPath - path of the container db file. */ - public void removeDB(String containerPath) { + public void removeDB(String containerDBPath) { lock.lock(); try { - MetadataStore db = (MetadataStore)this.get(containerPath); - closeDB(containerPath, db); - this.remove(containerPath); + ReferenceCountedDB db = (ReferenceCountedDB)this.get(containerDBPath); + if (db != null) { + // marking it as evicted will close the db as well. + db.setEvicted(true); + } + this.remove(containerDBPath); } finally { lock.unlock(); } } + + + /** + * Class to implement reference counting over instances handed by Container + * Cache. + */ + public class ReferenceCountedDB implements Closeable { + private final AtomicInteger referenceCount; + private final AtomicBoolean isEvicted; + private final MetadataStore store; + private final String containerDBPath; + + public ReferenceCountedDB(MetadataStore store, String containerDBPath) { + this.referenceCount = new AtomicInteger(0); + this.isEvicted = new AtomicBoolean(false); + this.store = store; + this.containerDBPath = containerDBPath; + } + + private void incrementReference() { + this.referenceCount.incrementAndGet(); + } + + private void decrementReference() { + this.referenceCount.decrementAndGet(); + cleanup(); + } + + private void setEvicted(boolean checkNoReferences) { + Preconditions.checkState(!checkNoReferences || + (referenceCount.get() == 0), + "checkNoReferences:%b, referencount:%d", + checkNoReferences, referenceCount.get()); + isEvicted.set(true); + cleanup(); + } + + private void cleanup() { + if (referenceCount.get() == 0 && isEvicted.get() && store != null) { + closeDB(containerDBPath, store); + } + } + + public MetadataStore getStore() { + return store; + } + + public void close() { + decrementReference(); + } + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java index 535af29c190c9..f1b71b89a9373 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java @@ -31,11 +31,12 @@ import org.apache.hadoop.utils.MetaStoreIterator; import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.apache.hadoop.utils.MetadataStore.KeyValue; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.Closeable; import java.io.File; import java.io.IOException; import java.util.NoSuchElementException; @@ -48,12 +49,14 @@ * {@link MetadataKeyFilters#getNormalKeyFilter()} */ @InterfaceAudience.Public -public class KeyValueBlockIterator implements BlockIterator { +public class KeyValueBlockIterator implements BlockIterator, + Closeable { private static final Logger LOG = LoggerFactory.getLogger( KeyValueBlockIterator.class); private MetaStoreIterator blockIterator; + private final ReferenceCountedDB db; private static KeyPrefixFilter defaultBlockFilter = MetadataKeyFilters .getNormalKeyFilter(); private KeyPrefixFilter blockFilter; @@ -91,9 +94,9 @@ public KeyValueBlockIterator(long id, File path, KeyPrefixFilter filter) containerData; keyValueContainerData.setDbFile(KeyValueContainerLocationUtil .getContainerDBFile(metdataPath, containerId)); - MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, new + db = BlockUtils.getDB(keyValueContainerData, new OzoneConfiguration()); - blockIterator = metadataStore.iterator(); + blockIterator = db.getStore().iterator(); blockFilter = filter; } @@ -145,4 +148,8 @@ public void seekToLast() { nextBlock = null; blockIterator.seekToLast(); } + + public void close() { + db.close(); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 26b0ce1d788b2..8d5ec72b980e5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -54,7 +54,6 @@ .KeyValueContainerLocationUtil; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -import org.apache.hadoop.utils.MetadataStore; import com.google.common.base.Preconditions; import org.apache.commons.io.FileUtils; @@ -74,6 +73,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .Result.UNSUPPORTED_REQUEST; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -349,11 +349,12 @@ private void updateContainerData(Runnable update) void compactDB() throws StorageContainerException { try { - MetadataStore db = BlockUtils.getDB(containerData, config); - db.compactDB(); - LOG.info("Container {} is closed with bcsId {}.", - containerData.getContainerID(), - containerData.getBlockCommitSequenceId()); + try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { + db.getStore().compactDB(); + LOG.info("Container {} is closed with bcsId {}.", + containerData.getContainerID(), + containerData.getBlockCommitSequenceId()); + } } catch (StorageContainerException ex) { throw ex; } catch (IOException ex) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index bdfdf21b4c250..4043914c89de7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -30,12 +30,12 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; -import org.apache.hadoop.utils.MetadataStore; import java.io.File; import java.io.IOException; import java.util.List; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -236,41 +236,42 @@ private void checkBlockDB() throws IOException { onDiskContainerData.setDbFile(dbFile); - MetadataStore db = BlockUtils - .getDB(onDiskContainerData, checkConfig); - - iterateBlockDB(db); + try(ReferenceCountedDB db = + BlockUtils.getDB(onDiskContainerData, checkConfig)) { + iterateBlockDB(db); + } } - private void iterateBlockDB(MetadataStore db) + private void iterateBlockDB(ReferenceCountedDB db) throws IOException { Preconditions.checkState(db != null); // get "normal" keys from the Block DB - KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID, - new File(onDiskContainerData.getContainerPath())); - - // ensure there is a chunk file for each key in the DB - while (kvIter.hasNext()) { - BlockData block = kvIter.nextBlock(); - - List chunkInfoList = block.getChunks(); - for (ContainerProtos.ChunkInfo chunk : chunkInfoList) { - File chunkFile; - chunkFile = ChunkUtils.getChunkFile(onDiskContainerData, - ChunkInfo.getFromProtoBuf(chunk)); - - if (!chunkFile.exists()) { - // concurrent mutation in Block DB? lookup the block again. - byte[] bdata = db.get( - Longs.toByteArray(block.getBlockID().getLocalID())); - if (bdata == null) { - LOG.trace("concurrency with delete, ignoring deleted block"); - break; // skip to next block from kvIter - } else { - String errorStr = "Missing chunk file " - + chunkFile.getAbsolutePath(); - throw new IOException(errorStr); + try(KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID, + new File(onDiskContainerData.getContainerPath()))) { + + // ensure there is a chunk file for each key in the DB + while (kvIter.hasNext()) { + BlockData block = kvIter.nextBlock(); + + List chunkInfoList = block.getChunks(); + for (ContainerProtos.ChunkInfo chunk : chunkInfoList) { + File chunkFile; + chunkFile = ChunkUtils.getChunkFile(onDiskContainerData, + ChunkInfo.getFromProtoBuf(chunk)); + + if (!chunkFile.exists()) { + // concurrent mutation in Block DB? lookup the block again. + byte[] bdata = db.getStore().get( + Longs.toByteArray(block.getBlockID().getLocalID())); + if (bdata == null) { + LOG.trace("concurrency with delete, ignoring deleted block"); + break; // skip to next block from kvIter + } else { + String errorStr = "Missing chunk file " + + chunkFile.getAbsolutePath(); + throw new IOException(errorStr); + } } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index 996b5922fe57c..fd3c7688f6cda 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -37,7 +37,7 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.common.utils.ContainerCache; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import java.io.IOException; @@ -66,7 +66,7 @@ private BlockUtils() { * @return MetadataStore handle. * @throws StorageContainerException */ - public static MetadataStore getDB(KeyValueContainerData containerData, + public static ReferenceCountedDB getDB(KeyValueContainerData containerData, Configuration conf) throws StorageContainerException { Preconditions.checkNotNull(containerData); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index 7a309555a37c9..377536a1c91df 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -39,6 +39,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -174,22 +175,25 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData, } kvContainerData.setDbFile(dbFile); - MetadataStore metadata = BlockUtils.getDB(kvContainerData, config); - long bytesUsed = 0; - List> liveKeys = metadata - .getRangeKVs(null, Integer.MAX_VALUE, - MetadataKeyFilters.getNormalKeyFilter()); - bytesUsed = liveKeys.parallelStream().mapToLong(e-> { - BlockData blockData; - try { - blockData = BlockUtils.getBlockData(e.getValue()); - return blockData.getSize(); - } catch (IOException ex) { - return 0L; - } - }).sum(); - kvContainerData.setBytesUsed(bytesUsed); - kvContainerData.setKeyCount(liveKeys.size()); + try(ReferenceCountedDB metadata = + BlockUtils.getDB(kvContainerData, config)) { + long bytesUsed = 0; + List> liveKeys = metadata.getStore() + .getRangeKVs(null, Integer.MAX_VALUE, + MetadataKeyFilters.getNormalKeyFilter()); + + bytesUsed = liveKeys.parallelStream().mapToLong(e-> { + BlockData blockData; + try { + blockData = BlockUtils.getBlockData(e.getValue()); + return blockData.getSize(); + } catch (IOException ex) { + return 0L; + } + }).sum(); + kvContainerData.setBytesUsed(bytesUsed); + kvContainerData.setKeyCount(liveKeys.size()); + } } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 3033dd9017d2d..f62a013f4cce7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -35,7 +35,7 @@ import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.utils.BatchOperation; import org.apache.hadoop.utils.MetadataKeyFilters; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -84,47 +84,47 @@ public long putBlock(Container container, BlockData data) throws IOException { "cannot be negative"); // We are not locking the key manager since LevelDb serializes all actions // against a single DB. We rely on DB level locking to avoid conflicts. - MetadataStore db = BlockUtils.getDB((KeyValueContainerData) container - .getContainerData(), config); + try(ReferenceCountedDB db = BlockUtils. + getDB((KeyValueContainerData) container.getContainerData(), config)) { + // This is a post condition that acts as a hint to the user. + // Should never fail. + Preconditions.checkNotNull(db, "DB cannot be null here"); - // This is a post condition that acts as a hint to the user. - // Should never fail. - Preconditions.checkNotNull(db, "DB cannot be null here"); + long bcsId = data.getBlockCommitSequenceId(); + long containerBCSId = ((KeyValueContainerData) container. + getContainerData()).getBlockCommitSequenceId(); - long bcsId = data.getBlockCommitSequenceId(); - long containerBCSId = ((KeyValueContainerData) container.getContainerData()) - .getBlockCommitSequenceId(); - - // default blockCommitSequenceId for any block is 0. It the putBlock - // request is not coming via Ratis(for test scenarios), it will be 0. - // In such cases, we should overwrite the block as well - if (bcsId != 0) { - if (bcsId <= containerBCSId) { - // Since the blockCommitSequenceId stored in the db is greater than - // equal to blockCommitSequenceId to be updated, it means the putBlock - // transaction is reapplied in the ContainerStateMachine on restart. - // It also implies that the given block must already exist in the db. - // just log and return - LOG.warn("blockCommitSequenceId " + containerBCSId - + " in the Container Db is greater than" + " the supplied value " - + bcsId + " .Ignoring it"); - return data.getSize(); + // default blockCommitSequenceId for any block is 0. It the putBlock + // request is not coming via Ratis(for test scenarios), it will be 0. + // In such cases, we should overwrite the block as well + if (bcsId != 0) { + if (bcsId <= containerBCSId) { + // Since the blockCommitSequenceId stored in the db is greater than + // equal to blockCommitSequenceId to be updated, it means the putBlock + // transaction is reapplied in the ContainerStateMachine on restart. + // It also implies that the given block must already exist in the db. + // just log and return + LOG.warn("blockCommitSequenceId " + containerBCSId + + " in the Container Db is greater than" + " the supplied value " + + bcsId + " .Ignoring it"); + return data.getSize(); + } } + // update the blockData as well as BlockCommitSequenceId here + BatchOperation batch = new BatchOperation(); + batch.put(Longs.toByteArray(data.getLocalID()), + data.getProtoBufMessage().toByteArray()); + batch.put(blockCommitSequenceIdKey, + Longs.toByteArray(bcsId)); + db.getStore().writeBatch(batch); + container.updateBlockCommitSequenceId(bcsId); + // Increment keycount here + container.getContainerData().incrKeyCount(); + LOG.debug( + "Block " + data.getBlockID() + " successfully committed with bcsId " + + bcsId + " chunk size " + data.getChunks().size()); + return data.getSize(); } - // update the blockData as well as BlockCommitSequenceId here - BatchOperation batch = new BatchOperation(); - batch.put(Longs.toByteArray(data.getLocalID()), - data.getProtoBufMessage().toByteArray()); - batch.put(blockCommitSequenceIdKey, - Longs.toByteArray(bcsId)); - db.writeBatch(batch); - container.updateBlockCommitSequenceId(bcsId); - // Increment keycount here - container.getContainerData().incrKeyCount(); - LOG.debug( - "Block " + data.getBlockID() + " successfully committed with bcsId " - + bcsId + " chunk size " + data.getChunks().size()); - return data.getSize(); } /** @@ -146,32 +146,33 @@ public BlockData getBlock(Container container, BlockID blockID) KeyValueContainerData containerData = (KeyValueContainerData) container .getContainerData(); - MetadataStore db = BlockUtils.getDB(containerData, config); - // This is a post condition that acts as a hint to the user. - // Should never fail. - Preconditions.checkNotNull(db, "DB cannot be null here"); + try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { + // This is a post condition that acts as a hint to the user. + // Should never fail. + Preconditions.checkNotNull(db, "DB cannot be null here"); - long containerBCSId = containerData.getBlockCommitSequenceId(); - if (containerBCSId < bcsId) { - throw new StorageContainerException( - "Unable to find the block with bcsID " + bcsId + " .Container " - + container.getContainerData().getContainerID() + " bcsId is " - + containerBCSId + ".", UNKNOWN_BCSID); - } - byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID())); - if (kData == null) { - throw new StorageContainerException("Unable to find the block." + blockID, - NO_SUCH_BLOCK); - } - ContainerProtos.BlockData blockData = - ContainerProtos.BlockData.parseFrom(kData); - long id = blockData.getBlockID().getBlockCommitSequenceId(); - if (id < bcsId) { - throw new StorageContainerException( - "bcsId " + bcsId + " mismatches with existing block Id " - + id + " for block " + blockID + ".", BCSID_MISMATCH); + long containerBCSId = containerData.getBlockCommitSequenceId(); + if (containerBCSId < bcsId) { + throw new StorageContainerException( + "Unable to find the block with bcsID " + bcsId + " .Container " + + container.getContainerData().getContainerID() + " bcsId is " + + containerBCSId + ".", UNKNOWN_BCSID); + } + byte[] kData = db.getStore().get(Longs.toByteArray(blockID.getLocalID())); + if (kData == null) { + throw new StorageContainerException("Unable to find the block." + + blockID, NO_SUCH_BLOCK); + } + ContainerProtos.BlockData blockData = + ContainerProtos.BlockData.parseFrom(kData); + long id = blockData.getBlockID().getBlockCommitSequenceId(); + if (id < bcsId) { + throw new StorageContainerException( + "bcsId " + bcsId + " mismatches with existing block Id " + + id + " for block " + blockID + ".", BCSID_MISMATCH); + } + return BlockData.getFromProtoBuf(blockData); } - return BlockData.getFromProtoBuf(blockData); } /** @@ -187,18 +188,19 @@ public long getCommittedBlockLength(Container container, BlockID blockID) throws IOException { KeyValueContainerData containerData = (KeyValueContainerData) container .getContainerData(); - MetadataStore db = BlockUtils.getDB(containerData, config); - // This is a post condition that acts as a hint to the user. - // Should never fail. - Preconditions.checkNotNull(db, "DB cannot be null here"); - byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID())); - if (kData == null) { - throw new StorageContainerException("Unable to find the block.", - NO_SUCH_BLOCK); + try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { + // This is a post condition that acts as a hint to the user. + // Should never fail. + Preconditions.checkNotNull(db, "DB cannot be null here"); + byte[] kData = db.getStore().get(Longs.toByteArray(blockID.getLocalID())); + if (kData == null) { + throw new StorageContainerException("Unable to find the block.", + NO_SUCH_BLOCK); + } + ContainerProtos.BlockData blockData = + ContainerProtos.BlockData.parseFrom(kData); + return blockData.getSize(); } - ContainerProtos.BlockData blockData = - ContainerProtos.BlockData.parseFrom(kData); - return blockData.getSize(); } /** @@ -218,24 +220,24 @@ public void deleteBlock(Container container, BlockID blockID) throws KeyValueContainerData cData = (KeyValueContainerData) container .getContainerData(); - MetadataStore db = BlockUtils.getDB(cData, config); - // This is a post condition that acts as a hint to the user. - // Should never fail. - Preconditions.checkNotNull(db, "DB cannot be null here"); - // Note : There is a race condition here, since get and delete - // are not atomic. Leaving it here since the impact is refusing - // to delete a Block which might have just gotten inserted after - // the get check. - byte[] kKey = Longs.toByteArray(blockID.getLocalID()); - byte[] kData = db.get(kKey); - if (kData == null) { - throw new StorageContainerException("Unable to find the block.", - NO_SUCH_BLOCK); + try(ReferenceCountedDB db = BlockUtils.getDB(cData, config)) { + // This is a post condition that acts as a hint to the user. + // Should never fail. + Preconditions.checkNotNull(db, "DB cannot be null here"); + // Note : There is a race condition here, since get and delete + // are not atomic. Leaving it here since the impact is refusing + // to delete a Block which might have just gotten inserted after + // the get check. + byte[] kKey = Longs.toByteArray(blockID.getLocalID()); + try { + db.getStore().delete(kKey); + } catch (IOException e) { + throw new StorageContainerException("Unable to find the block.", + NO_SUCH_BLOCK); + } + // Decrement blockcount here + container.getContainerData().decrKeyCount(); } - db.delete(kKey); - - // Decrement blockcount here - container.getContainerData().decrKeyCount(); } /** @@ -258,18 +260,19 @@ public List listBlock(Container container, long startLocalID, int List result = null; KeyValueContainerData cData = (KeyValueContainerData) container .getContainerData(); - MetadataStore db = BlockUtils.getDB(cData, config); - result = new ArrayList<>(); - byte[] startKeyInBytes = Longs.toByteArray(startLocalID); - List> range = - db.getSequentialRangeKVs(startKeyInBytes, count, - MetadataKeyFilters.getNormalKeyFilter()); - for (Map.Entry entry : range) { - BlockData value = BlockUtils.getBlockData(entry.getValue()); - BlockData data = new BlockData(value.getBlockID()); - result.add(data); + try(ReferenceCountedDB db = BlockUtils.getDB(cData, config)) { + result = new ArrayList<>(); + byte[] startKeyInBytes = Longs.toByteArray(startLocalID); + List> range = + db.getStore().getSequentialRangeKVs(startKeyInBytes, count, + MetadataKeyFilters.getNormalKeyFilter()); + for (Map.Entry entry : range) { + BlockData value = BlockUtils.getBlockData(entry.getValue()); + BlockData data = new BlockData(value.getBlockID()); + result.add(data); + } + return result; } - return result; } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index 61a303fcdd20d..c03bea791fcbd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -43,7 +43,7 @@ import org.apache.hadoop.utils.BackgroundTaskResult; import org.apache.hadoop.utils.BatchOperation; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -185,69 +185,71 @@ public BackgroundTaskResult call() throws Exception { ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult(); long startTime = Time.monotonicNow(); // Scan container's db and get list of under deletion blocks - MetadataStore meta = BlockUtils.getDB( - (KeyValueContainerData) containerData, conf); - // # of blocks to delete is throttled - KeyPrefixFilter filter = - new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX); - List> toDeleteBlocks = - meta.getSequentialRangeKVs(null, blockLimitPerTask, filter); - if (toDeleteBlocks.isEmpty()) { - LOG.debug("No under deletion block found in container : {}", - containerData.getContainerID()); - } + try (ReferenceCountedDB meta = BlockUtils.getDB(containerData, conf)) { + // # of blocks to delete is throttled + KeyPrefixFilter filter = + new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX); + List> toDeleteBlocks = + meta.getStore().getSequentialRangeKVs(null, blockLimitPerTask, + filter); + if (toDeleteBlocks.isEmpty()) { + LOG.debug("No under deletion block found in container : {}", + containerData.getContainerID()); + } - List succeedBlocks = new LinkedList<>(); - LOG.debug("Container : {}, To-Delete blocks : {}", - containerData.getContainerID(), toDeleteBlocks.size()); - File dataDir = new File(containerData.getChunksPath()); - if (!dataDir.exists() || !dataDir.isDirectory()) { - LOG.error("Invalid container data dir {} : " - + "does not exist or not a directory", dataDir.getAbsolutePath()); - return crr; - } + List succeedBlocks = new LinkedList<>(); + LOG.debug("Container : {}, To-Delete blocks : {}", + containerData.getContainerID(), toDeleteBlocks.size()); + File dataDir = new File(containerData.getChunksPath()); + if (!dataDir.exists() || !dataDir.isDirectory()) { + LOG.error("Invalid container data dir {} : " + + "does not exist or not a directory", dataDir.getAbsolutePath()); + return crr; + } - toDeleteBlocks.forEach(entry -> { - String blockName = DFSUtil.bytes2String(entry.getKey()); - LOG.debug("Deleting block {}", blockName); - try { - ContainerProtos.BlockData data = - ContainerProtos.BlockData.parseFrom(entry.getValue()); - for (ContainerProtos.ChunkInfo chunkInfo : data.getChunksList()) { - File chunkFile = dataDir.toPath() - .resolve(chunkInfo.getChunkName()).toFile(); - if (FileUtils.deleteQuietly(chunkFile)) { - LOG.debug("block {} chunk {} deleted", blockName, - chunkFile.getAbsolutePath()); + toDeleteBlocks.forEach(entry -> { + String blockName = DFSUtil.bytes2String(entry.getKey()); + LOG.debug("Deleting block {}", blockName); + try { + ContainerProtos.BlockData data = + ContainerProtos.BlockData.parseFrom(entry.getValue()); + for (ContainerProtos.ChunkInfo chunkInfo : data.getChunksList()) { + File chunkFile = dataDir.toPath() + .resolve(chunkInfo.getChunkName()).toFile(); + if (FileUtils.deleteQuietly(chunkFile)) { + LOG.debug("block {} chunk {} deleted", blockName, + chunkFile.getAbsolutePath()); + } } + succeedBlocks.add(blockName); + } catch (InvalidProtocolBufferException e) { + LOG.error("Failed to parse block info for block {}", blockName, e); } - succeedBlocks.add(blockName); - } catch (InvalidProtocolBufferException e) { - LOG.error("Failed to parse block info for block {}", blockName, e); - } - }); + }); - // Once files are deleted... replace deleting entries with deleted entries - BatchOperation batch = new BatchOperation(); - succeedBlocks.forEach(entry -> { - String blockId = - entry.substring(OzoneConsts.DELETING_KEY_PREFIX.length()); - String deletedEntry = OzoneConsts.DELETED_KEY_PREFIX + blockId; - batch.put(DFSUtil.string2Bytes(deletedEntry), - DFSUtil.string2Bytes(blockId)); - batch.delete(DFSUtil.string2Bytes(entry)); - }); - meta.writeBatch(batch); - // update count of pending deletion blocks in in-memory container status - containerData.decrPendingDeletionBlocks(succeedBlocks.size()); + // Once files are deleted... replace deleting entries with deleted + // entries + BatchOperation batch = new BatchOperation(); + succeedBlocks.forEach(entry -> { + String blockId = + entry.substring(OzoneConsts.DELETING_KEY_PREFIX.length()); + String deletedEntry = OzoneConsts.DELETED_KEY_PREFIX + blockId; + batch.put(DFSUtil.string2Bytes(deletedEntry), + DFSUtil.string2Bytes(blockId)); + batch.delete(DFSUtil.string2Bytes(entry)); + }); + meta.getStore().writeBatch(batch); + // update count of pending deletion blocks in in-memory container status + containerData.decrPendingDeletionBlocks(succeedBlocks.size()); - if (!succeedBlocks.isEmpty()) { - LOG.info("Container: {}, deleted blocks: {}, task elapsed time: {}ms", - containerData.getContainerID(), succeedBlocks.size(), - Time.monotonicNow() - startTime); + if (!succeedBlocks.isEmpty()) { + LOG.info("Container: {}, deleted blocks: {}, task elapsed time: {}ms", + containerData.getContainerID(), succeedBlocks.size(), + Time.monotonicNow() - startTime); + } + crr.addAll(succeedBlocks); + return crr; } - crr.addAll(succeedBlocks); - return crr; } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 0192fd5dd1b57..d5455aa3e7e05 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -38,7 +38,7 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; import org.apache.hadoop.utils.MetadataKeyFilters; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -180,28 +180,31 @@ public void verifyContainerData(ContainerData containerData) KeyValueContainerUtil.parseKVContainerData(kvContainerData, config); KeyValueContainer kvContainer = new KeyValueContainer( kvContainerData, config); - MetadataStore containerDB = BlockUtils.getDB(kvContainerData, config); - MetadataKeyFilters.KeyPrefixFilter filter = - new MetadataKeyFilters.KeyPrefixFilter() - .addFilter(OzoneConsts.DELETING_KEY_PREFIX); - int numPendingDeletionBlocks = - containerDB.getSequentialRangeKVs(null, Integer.MAX_VALUE, filter) - .size(); - kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks); - byte[] delTxnId = containerDB.get( - DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX)); - if (delTxnId != null) { - kvContainerData - .updateDeleteTransactionId(Longs.fromByteArray(delTxnId)); - } - // sets the BlockCommitSequenceId. - byte[] bcsId = containerDB.get( - DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX)); - if (bcsId != null) { - kvContainerData - .updateBlockCommitSequenceId(Longs.fromByteArray(bcsId)); + try(ReferenceCountedDB containerDB = BlockUtils.getDB(kvContainerData, + config)) { + MetadataKeyFilters.KeyPrefixFilter filter = + new MetadataKeyFilters.KeyPrefixFilter() + .addFilter(OzoneConsts.DELETING_KEY_PREFIX); + int numPendingDeletionBlocks = + containerDB.getStore().getSequentialRangeKVs(null, + Integer.MAX_VALUE, filter) + .size(); + kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks); + byte[] delTxnId = containerDB.getStore().get( + DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX)); + if (delTxnId != null) { + kvContainerData + .updateDeleteTransactionId(Longs.fromByteArray(delTxnId)); + } + // sets the BlockCommitSequenceId. + byte[] bcsId = containerDB.getStore().get(DFSUtil.string2Bytes( + OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX)); + if (bcsId != null) { + kvContainerData + .updateBlockCommitSequenceId(Longs.fromByteArray(bcsId)); + } + containerSet.addContainer(kvContainer); } - containerSet.addContainer(kvContainer); } else { throw new StorageContainerException("Container File is corrupted. " + "ContainerType is KeyValueContainer but cast to " + diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index 15d7b342d4a90..687e64e16b18a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -34,7 +34,7 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.utils.MetadataKeyFilters; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -109,30 +109,31 @@ public void testKeyValueBlockIteratorWithMixedBlocks() throws Exception { createContainerWithBlocks(containerID, normalBlocks, deletedBlocks); String containerPath = new File(containerData.getMetadataPath()) .getParent(); - KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerID, new File(containerPath)); - - int counter = 0; - while(keyValueBlockIterator.hasNext()) { - BlockData blockData = keyValueBlockIterator.nextBlock(); - assertEquals(blockData.getLocalID(), counter++); - } - - assertFalse(keyValueBlockIterator.hasNext()); - - keyValueBlockIterator.seekToFirst(); - counter = 0; - while(keyValueBlockIterator.hasNext()) { - BlockData blockData = keyValueBlockIterator.nextBlock(); - assertEquals(blockData.getLocalID(), counter++); - } - assertFalse(keyValueBlockIterator.hasNext()); - - try { - keyValueBlockIterator.nextBlock(); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Block Iterator reached end " + - "for ContainerID " + containerID, ex); + try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( + containerID, new File(containerPath))) { + + int counter = 0; + while (keyValueBlockIterator.hasNext()) { + BlockData blockData = keyValueBlockIterator.nextBlock(); + assertEquals(blockData.getLocalID(), counter++); + } + + assertFalse(keyValueBlockIterator.hasNext()); + + keyValueBlockIterator.seekToFirst(); + counter = 0; + while (keyValueBlockIterator.hasNext()) { + BlockData blockData = keyValueBlockIterator.nextBlock(); + assertEquals(blockData.getLocalID(), counter++); + } + assertFalse(keyValueBlockIterator.hasNext()); + + try { + keyValueBlockIterator.nextBlock(); + } catch (NoSuchElementException ex) { + GenericTestUtils.assertExceptionContains("Block Iterator reached end " + + "for ContainerID " + containerID, ex); + } } } @@ -142,17 +143,18 @@ public void testKeyValueBlockIteratorWithNextBlock() throws Exception { createContainerWithBlocks(containerID, 2, 0); String containerPath = new File(containerData.getMetadataPath()) .getParent(); - KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerID, new File(containerPath)); - long blockID = 0L; - assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); - assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); - - try { - keyValueBlockIterator.nextBlock(); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Block Iterator reached end " + - "for ContainerID " + containerID, ex); + try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( + containerID, new File(containerPath))) { + long blockID = 0L; + assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); + assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); + + try { + keyValueBlockIterator.nextBlock(); + } catch (NoSuchElementException ex) { + GenericTestUtils.assertExceptionContains("Block Iterator reached end " + + "for ContainerID " + containerID, ex); + } } } @@ -162,42 +164,41 @@ public void testKeyValueBlockIteratorWithHasNext() throws Exception { createContainerWithBlocks(containerID, 2, 0); String containerPath = new File(containerData.getMetadataPath()) .getParent(); - KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerID, new File(containerPath)); - long blockID = 0L; - - // Even calling multiple times hasNext() should not move entry forward. - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); - - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); - - keyValueBlockIterator.seekToLast(); - assertTrue(keyValueBlockIterator.hasNext()); - assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); - - keyValueBlockIterator.seekToFirst(); - blockID = 0L; - assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); - assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); - - try { - keyValueBlockIterator.nextBlock(); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Block Iterator reached end " + - "for ContainerID " + containerID, ex); + try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( + containerID, new File(containerPath))) { + long blockID = 0L; + + // Even calling multiple times hasNext() should not move entry forward. + assertTrue(keyValueBlockIterator.hasNext()); + assertTrue(keyValueBlockIterator.hasNext()); + assertTrue(keyValueBlockIterator.hasNext()); + assertTrue(keyValueBlockIterator.hasNext()); + assertTrue(keyValueBlockIterator.hasNext()); + assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); + + assertTrue(keyValueBlockIterator.hasNext()); + assertTrue(keyValueBlockIterator.hasNext()); + assertTrue(keyValueBlockIterator.hasNext()); + assertTrue(keyValueBlockIterator.hasNext()); + assertTrue(keyValueBlockIterator.hasNext()); + assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); + + keyValueBlockIterator.seekToLast(); + assertTrue(keyValueBlockIterator.hasNext()); + assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); + + keyValueBlockIterator.seekToFirst(); + blockID = 0L; + assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); + assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); + + try { + keyValueBlockIterator.nextBlock(); + } catch (NoSuchElementException ex) { + GenericTestUtils.assertExceptionContains("Block Iterator reached end " + + "for ContainerID " + containerID, ex); + } } - - } @Test @@ -208,14 +209,15 @@ public void testKeyValueBlockIteratorWithFilter() throws Exception { createContainerWithBlocks(containerId, normalBlocks, deletedBlocks); String containerPath = new File(containerData.getMetadataPath()) .getParent(); - KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( + try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( containerId, new File(containerPath), MetadataKeyFilters - .getDeletingKeyFilter()); + .getDeletingKeyFilter())) { - int counter = 5; - while(keyValueBlockIterator.hasNext()) { - BlockData blockData = keyValueBlockIterator.nextBlock(); - assertEquals(blockData.getLocalID(), counter++); + int counter = 5; + while (keyValueBlockIterator.hasNext()) { + BlockData blockData = keyValueBlockIterator.nextBlock(); + assertEquals(blockData.getLocalID(), counter++); + } } } @@ -226,11 +228,12 @@ public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws createContainerWithBlocks(containerId, 0, 5); String containerPath = new File(containerData.getMetadataPath()) .getParent(); - KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerId, new File(containerPath)); - //As all blocks are deleted blocks, blocks does not match with normal key - // filter. - assertFalse(keyValueBlockIterator.hasNext()); + try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( + containerId, new File(containerPath))) { + //As all blocks are deleted blocks, blocks does not match with normal key + // filter. + assertFalse(keyValueBlockIterator.hasNext()); + } } /** @@ -251,27 +254,30 @@ private void createContainerWithBlocks(long containerId, int container = new KeyValueContainer(containerData, conf); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID .randomUUID().toString()); - MetadataStore metadataStore = BlockUtils.getDB(containerData, conf); - - List chunkList = new ArrayList<>(); - ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024); - chunkList.add(info.getProtoBufMessage()); - - for (int i=0; i chunkList = new ArrayList<>(); + ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024); + chunkList.add(info.getProtoBufMessage()); + + for (int i = 0; i < normalBlocks; i++) { + BlockID blockID = new BlockID(containerId, i); + BlockData blockData = new BlockData(blockID); + blockData.setChunks(chunkList); + metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()), + blockData + .getProtoBufMessage().toByteArray()); + } + + for (int i = normalBlocks; i < deletedBlocks; i++) { + BlockID blockID = new BlockID(containerId, i); + BlockData blockData = new BlockData(blockID); + blockData.setChunks(chunkList); + metadataStore.getStore().put(DFSUtil.string2Bytes(OzoneConsts + .DELETING_KEY_PREFIX + blockID.getLocalID()), blockData + .getProtoBufMessage().toByteArray()); + } } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 8e2986cca6fb2..c16574155255b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -36,7 +36,7 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DiskChecker; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.junit.Assert; import org.junit.Before; @@ -132,23 +132,24 @@ public void testBlockIterator() throws Exception{ private void addBlocks(int count) throws Exception { long containerId = keyValueContainerData.getContainerID(); - MetadataStore metadataStore = BlockUtils.getDB(keyValueContainer - .getContainerData(), conf); - for (int i=0; i < count; i++) { - // Creating BlockData - BlockID blockID = new BlockID(containerId, i); - BlockData blockData = new BlockData(blockID); - blockData.addMetadata("VOLUME", "ozone"); - blockData.addMetadata("OWNER", "hdfs"); - List chunkList = new ArrayList<>(); - ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, 1024); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - metadataStore.put(Longs.toByteArray(blockID.getLocalID()), blockData - .getProtoBufMessage().toByteArray()); + try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer + .getContainerData(), conf)) { + for (int i = 0; i < count; i++) { + // Creating BlockData + BlockID blockID = new BlockID(containerId, i); + BlockData blockData = new BlockData(blockID); + blockData.addMetadata("VOLUME", "ozone"); + blockData.addMetadata("OWNER", "hdfs"); + List chunkList = new ArrayList<>(); + ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID + .getLocalID(), 0), 0, 1024); + chunkList.add(info.getProtoBufMessage()); + blockData.setChunks(chunkList); + metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()), + blockData + .getProtoBufMessage().toByteArray()); + } } - } @SuppressWarnings("RedundantCast") @@ -191,9 +192,12 @@ public void testContainerImportExport() throws Exception { int numberOfKeysToWrite = 12; //write one few keys to check the key count after import - MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, conf); - for (int i = 0; i < numberOfKeysToWrite; i++) { - metadataStore.put(("test" + i).getBytes(UTF_8), "test".getBytes(UTF_8)); + try(ReferenceCountedDB metadataStore = + BlockUtils.getDB(keyValueContainerData, conf)) { + for (int i = 0; i < numberOfKeysToWrite; i++) { + metadataStore.getStore().put(("test" + i).getBytes(UTF_8), + "test".getBytes(UTF_8)); + } } BlockUtils.removeDB(keyValueContainerData, conf); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index 0bc1bbc387b39..cae275af52584 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -35,7 +35,7 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -149,48 +149,50 @@ private void createContainerWithBlocks(long containerId, int normalBlocks, container = new KeyValueContainer(containerData, conf); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString()); - MetadataStore metadataStore = BlockUtils.getDB(containerData, conf); - chunkManager = new ChunkManagerImpl(true); - - assertTrue(containerData.getChunksPath() != null); - File chunksPath = new File(containerData.getChunksPath()); - assertTrue(chunksPath.exists()); - // Initially chunks folder should be empty. - assertTrue(chunksPath.listFiles().length == 0); - - List chunkList = new ArrayList<>(); - for (int i = 0; i < (totalBlks); i++) { - BlockID blockID = new BlockID(containerId, i); - BlockData blockData = new BlockData(blockID); - - chunkList.clear(); - for (chunkCount = 0; chunkCount < chunksPerBlock; chunkCount++) { - String chunkName = strBlock + i + strChunk + chunkCount; - long offset = chunkCount * chunkLen; - ChunkInfo info = new ChunkInfo(chunkName, offset, chunkLen); - chunkList.add(info.getProtoBufMessage()); - chunkManager - .writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), - new DispatcherContext.Builder() - .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA) - .build()); - chunkManager - .writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), - new DispatcherContext.Builder() - .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA) - .build()); - } - blockData.setChunks(chunkList); - - if (i >= normalBlocks) { - // deleted key - metadataStore.put(DFSUtil.string2Bytes( - OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()), - blockData.getProtoBufMessage().toByteArray()); - } else { - // normal key - metadataStore.put(Longs.toByteArray(blockID.getLocalID()), - blockData.getProtoBufMessage().toByteArray()); + try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, + conf)) { + chunkManager = new ChunkManagerImpl(true); + + assertTrue(containerData.getChunksPath() != null); + File chunksPath = new File(containerData.getChunksPath()); + assertTrue(chunksPath.exists()); + // Initially chunks folder should be empty. + assertTrue(chunksPath.listFiles().length == 0); + + List chunkList = new ArrayList<>(); + for (int i = 0; i < (totalBlks); i++) { + BlockID blockID = new BlockID(containerId, i); + BlockData blockData = new BlockData(blockID); + + chunkList.clear(); + for (chunkCount = 0; chunkCount < chunksPerBlock; chunkCount++) { + String chunkName = strBlock + i + strChunk + chunkCount; + long offset = chunkCount * chunkLen; + ChunkInfo info = new ChunkInfo(chunkName, offset, chunkLen); + chunkList.add(info.getProtoBufMessage()); + chunkManager + .writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), + new DispatcherContext.Builder() + .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA) + .build()); + chunkManager + .writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), + new DispatcherContext.Builder() + .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA) + .build()); + } + blockData.setChunks(chunkList); + + if (i >= normalBlocks) { + // deleted key + metadataStore.getStore().put(DFSUtil.string2Bytes( + OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()), + blockData.getProtoBufMessage().toByteArray()); + } else { + // normal key + metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()), + blockData.getProtoBufMessage().toByteArray()); + } } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java index 5b551199a4226..da81e6de4545a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java @@ -40,7 +40,7 @@ import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import java.io.IOException; import java.io.OutputStream; @@ -119,16 +119,17 @@ public Map createKeys(int numOfKeys, int keySize) public List getPendingDeletionBlocks(Long containerID) throws IOException { List pendingDeletionBlocks = Lists.newArrayList(); - MetadataStore meta = getContainerMetadata(containerID); + ReferenceCountedDB meta = getContainerMetadata(containerID); KeyPrefixFilter filter = new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX); - List> kvs = meta + List> kvs = meta.getStore() .getRangeKVs(null, Integer.MAX_VALUE, filter); kvs.forEach(entry -> { String key = DFSUtil.bytes2String(entry.getKey()); pendingDeletionBlocks .add(key.replace(OzoneConsts.DELETING_KEY_PREFIX, "")); }); + meta.close(); return pendingDeletionBlocks; } @@ -143,17 +144,18 @@ public List getAllBlocks(Set containerIDs) public List getAllBlocks(Long containeID) throws IOException { List allBlocks = Lists.newArrayList(); - MetadataStore meta = getContainerMetadata(containeID); + ReferenceCountedDB meta = getContainerMetadata(containeID); List> kvs = - meta.getRangeKVs(null, Integer.MAX_VALUE, + meta.getStore().getRangeKVs(null, Integer.MAX_VALUE, MetadataKeyFilters.getNormalKeyFilter()); kvs.forEach(entry -> { allBlocks.add(Longs.fromByteArray(entry.getKey())); }); + meta.close(); return allBlocks; } - private MetadataStore getContainerMetadata(Long containerID) + private ReferenceCountedDB getContainerMetadata(Long containerID) throws IOException { ContainerWithPipeline containerWithPipeline = cluster .getStorageContainerManager().getClientProtocolServer() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 0d32f83250659..17e199511e7e2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -953,18 +953,19 @@ public void testGetKeyDetails() throws IOException, OzoneException { .getContainerData()); String containerPath = new File(containerData.getMetadataPath()) .getParent(); - KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerID, new File(containerPath)); - while (keyValueBlockIterator.hasNext()) { - BlockData blockData = keyValueBlockIterator.nextBlock(); - if (blockData.getBlockID().getLocalID() == localID) { - long length = 0; - List chunks = blockData.getChunks(); - for (ContainerProtos.ChunkInfo chunk : chunks) { - length += chunk.getLen(); + try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( + containerID, new File(containerPath))) { + while (keyValueBlockIterator.hasNext()) { + BlockData blockData = keyValueBlockIterator.nextBlock(); + if (blockData.getBlockID().getLocalID() == localID) { + long length = 0; + List chunks = blockData.getChunks(); + for (ContainerProtos.ChunkInfo chunk : chunks) { + length += chunk.getLen(); + } + Assert.assertEquals(length, keyValue.getBytes().length); + break; } - Assert.assertEquals(length, keyValue.getBytes().length); - break; } } } @@ -1115,31 +1116,32 @@ private void corruptData(Container container, OzoneKey key) (KeyValueContainerData) container.getContainerData(); String containerPath = new File(containerData.getMetadataPath()).getParent(); - KeyValueBlockIterator keyValueBlockIterator = - new KeyValueBlockIterator(containerID, new File(containerPath)); - - // Find the block corresponding to the key we put. We use the localID of - // the BlockData to identify out key. - BlockData blockData = null; - while (keyValueBlockIterator.hasNext()) { - blockData = keyValueBlockIterator.nextBlock(); - if (blockData.getBlockID().getLocalID() == localID) { - break; + try (KeyValueBlockIterator keyValueBlockIterator = + new KeyValueBlockIterator(containerID, new File(containerPath))) { + + // Find the block corresponding to the key we put. We use the localID of + // the BlockData to identify out key. + BlockData blockData = null; + while (keyValueBlockIterator.hasNext()) { + blockData = keyValueBlockIterator.nextBlock(); + if (blockData.getBlockID().getLocalID() == localID) { + break; + } } + Assert.assertNotNull("Block not found", blockData); + + // Get the location of the chunk file + String chunkName = blockData.getChunks().get(0).getChunkName(); + String containreBaseDir = + container.getContainerData().getVolume().getHddsRootDir().getPath(); + File chunksLocationPath = KeyValueContainerLocationUtil + .getChunksLocationPath(containreBaseDir, scmId, containerID); + File chunkFile = new File(chunksLocationPath, chunkName); + + // Corrupt the contents of the chunk file + String newData = new String("corrupted data"); + FileUtils.writeByteArrayToFile(chunkFile, newData.getBytes()); } - Assert.assertNotNull("Block not found", blockData); - - // Get the location of the chunk file - String chunkName = blockData.getChunks().get(0).getChunkName(); - String containreBaseDir = - container.getContainerData().getVolume().getHddsRootDir().getPath(); - File chunksLocationPath = KeyValueContainerLocationUtil - .getChunksLocationPath(containreBaseDir, scmId, containerID); - File chunkFile = new File(chunksLocationPath, chunkName); - - // Corrupt the contents of the chunk file - String newData = new String("corrupted data"); - FileUtils.writeByteArrayToFile(chunkFile, newData.getBytes()); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 27fe4ffedd0b8..9993f90ca279f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -45,7 +45,7 @@ import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.utils.BackgroundService; import org.apache.hadoop.utils.MetadataKeyFilters; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Test; @@ -119,35 +119,36 @@ private void createToDeleteBlocks(ContainerSet containerSet, containerSet.addContainer(container); data = (KeyValueContainerData) containerSet.getContainer( containerID).getContainerData(); - MetadataStore metadata = BlockUtils.getDB(data, conf); - for (int j = 0; j chunks = Lists.newArrayList(); - for (int k = 0; k chunks = Lists.newArrayList(); + for (int k = 0; k < numOfChunksPerBlock; k++) { + // offset doesn't matter here + String chunkName = blockID.getLocalID() + "_chunk_" + k; + File chunk = new File(data.getChunksPath(), chunkName); + FileUtils.writeStringToFile(chunk, "a chunk", + Charset.defaultCharset()); + LOG.info("Creating file {}", chunk.getAbsolutePath()); + // make sure file exists + Assert.assertTrue(chunk.isFile() && chunk.exists()); + ContainerProtos.ChunkInfo info = + ContainerProtos.ChunkInfo.newBuilder() + .setChunkName(chunk.getAbsolutePath()) + .setLen(0) + .setOffset(0) + .setChecksumData(Checksum.getNoChecksumDataProto()) + .build(); + chunks.add(info); + } + kd.setChunks(chunks); + metadata.getStore().put(DFSUtil.string2Bytes(deleteStateName), + kd.getProtoBufMessage().toByteArray()); } - kd.setChunks(chunks); - metadata.put(DFSUtil.string2Bytes(deleteStateName), - kd.getProtoBufMessage().toByteArray()); } } } @@ -166,17 +167,19 @@ private void deleteAndWait(BlockDeletingServiceTestImpl service, * Get under deletion blocks count from DB, * note this info is parsed from container.db. */ - private int getUnderDeletionBlocksCount(MetadataStore meta) + private int getUnderDeletionBlocksCount(ReferenceCountedDB meta) throws IOException { List> underDeletionBlocks = - meta.getRangeKVs(null, 100, new MetadataKeyFilters.KeyPrefixFilter() + meta.getStore().getRangeKVs(null, 100, + new MetadataKeyFilters.KeyPrefixFilter() .addFilter(OzoneConsts.DELETING_KEY_PREFIX)); return underDeletionBlocks.size(); } - private int getDeletedBlocksCount(MetadataStore db) throws IOException { + private int getDeletedBlocksCount(ReferenceCountedDB db) throws IOException { List> underDeletionBlocks = - db.getRangeKVs(null, 100, new MetadataKeyFilters.KeyPrefixFilter() + db.getStore().getRangeKVs(null, 100, + new MetadataKeyFilters.KeyPrefixFilter() .addFilter(OzoneConsts.DELETED_KEY_PREFIX)); return underDeletionBlocks.size(); } @@ -202,37 +205,38 @@ public void testBlockDeletion() throws Exception { containerSet.listContainer(0L, 1, containerData); Assert.assertEquals(1, containerData.size()); - MetadataStore meta = BlockUtils.getDB( - (KeyValueContainerData) containerData.get(0), conf); - Map containerMap = containerSet.getContainerMapCopy(); - // NOTE: this test assumes that all the container is KetValueContainer and - // have DeleteTransactionId in KetValueContainerData. If other - // types is going to be added, this test should be checked. - long transactionId = ((KeyValueContainerData)containerMap - .get(containerData.get(0).getContainerID()).getContainerData()) - .getDeleteTransactionId(); - - - // Number of deleted blocks in container should be equal to 0 before - // block delete - Assert.assertEquals(0, transactionId); - - // Ensure there are 3 blocks under deletion and 0 deleted blocks - Assert.assertEquals(3, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(0, getDeletedBlocksCount(meta)); - - // An interval will delete 1 * 2 blocks - deleteAndWait(svc, 1); - Assert.assertEquals(1, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(2, getDeletedBlocksCount(meta)); - - deleteAndWait(svc, 2); - Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(3, getDeletedBlocksCount(meta)); - - deleteAndWait(svc, 3); - Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(3, getDeletedBlocksCount(meta)); + try(ReferenceCountedDB meta = BlockUtils.getDB( + (KeyValueContainerData) containerData.get(0), conf)) { + Map containerMap = containerSet.getContainerMapCopy(); + // NOTE: this test assumes that all the container is KetValueContainer and + // have DeleteTransactionId in KetValueContainerData. If other + // types is going to be added, this test should be checked. + long transactionId = ((KeyValueContainerData) containerMap + .get(containerData.get(0).getContainerID()).getContainerData()) + .getDeleteTransactionId(); + + + // Number of deleted blocks in container should be equal to 0 before + // block delete + Assert.assertEquals(0, transactionId); + + // Ensure there are 3 blocks under deletion and 0 deleted blocks + Assert.assertEquals(3, getUnderDeletionBlocksCount(meta)); + Assert.assertEquals(0, getDeletedBlocksCount(meta)); + + // An interval will delete 1 * 2 blocks + deleteAndWait(svc, 1); + Assert.assertEquals(1, getUnderDeletionBlocksCount(meta)); + Assert.assertEquals(2, getDeletedBlocksCount(meta)); + + deleteAndWait(svc, 2); + Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); + Assert.assertEquals(3, getDeletedBlocksCount(meta)); + + deleteAndWait(svc, 3); + Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); + Assert.assertEquals(3, getDeletedBlocksCount(meta)); + } svc.shutdown(); } @@ -311,25 +315,26 @@ public void testBlockDeletionTimeout() throws Exception { // get container meta data List containerData = Lists.newArrayList(); containerSet.listContainer(0L, 1, containerData); - MetadataStore meta = BlockUtils.getDB( - (KeyValueContainerData) containerData.get(0), conf); + try(ReferenceCountedDB meta = BlockUtils.getDB( + (KeyValueContainerData) containerData.get(0), conf)) { - LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG); - GenericTestUtils.waitFor(() -> { - try { - if (getUnderDeletionBlocksCount(meta) == 0) { - return true; + LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG); + GenericTestUtils.waitFor(() -> { + try { + if (getUnderDeletionBlocksCount(meta) == 0) { + return true; + } + } catch (IOException ignored) { } - } catch (IOException ignored) { - } - return false; - }, 1000, 100000); - newLog.stopCapturing(); + return false; + }, 1000, 100000); + newLog.stopCapturing(); - // The block deleting successfully and shouldn't catch timed - // out warning log. - Assert.assertTrue(!newLog.getOutput().contains( - "Background task executes timed out, retrying in next interval")); + // The block deleting successfully and shouldn't catch timed + // out warning log. + Assert.assertTrue(!newLog.getOutput().contains( + "Background task executes timed out, retrying in next interval")); + } svc.shutdown(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index 2fd169c03c3ed..f43caeeda2b49 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -47,7 +47,7 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -202,7 +202,7 @@ public void testCreateContainer() throws Exception { Path meta = kvData.getDbFile().toPath().getParent(); Assert.assertTrue(meta != null && Files.exists(meta)); - MetadataStore store = null; + ReferenceCountedDB store = null; try { store = BlockUtils.getDB(kvData, conf); Assert.assertNotNull(store); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 14db90d8cdbd3..cbb83eaadd004 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -50,7 +50,7 @@ import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; @@ -300,9 +300,12 @@ private void verifyBlocksCreated( cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() .getContainer().getContainerSet(); OzoneTestUtils.performOperationOnKeyContainers((blockID) -> { - MetadataStore db = BlockUtils.getDB((KeyValueContainerData) dnContainerSet - .getContainer(blockID.getContainerID()).getContainerData(), conf); - Assert.assertNotNull(db.get(Longs.toByteArray(blockID.getLocalID()))); + try(ReferenceCountedDB db = + BlockUtils.getDB((KeyValueContainerData) dnContainerSet + .getContainer(blockID.getContainerID()).getContainerData(), conf)) { + Assert.assertNotNull(db.getStore().get( + Longs.toByteArray(blockID.getLocalID()))); + } }, omKeyLocationInfoGroups); } @@ -312,13 +315,16 @@ private void verifyBlocksDeleted( cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() .getContainer().getContainerSet(); OzoneTestUtils.performOperationOnKeyContainers((blockID) -> { - MetadataStore db = BlockUtils.getDB((KeyValueContainerData) dnContainerSet - .getContainer(blockID.getContainerID()).getContainerData(), conf); - Assert.assertNull(db.get(Longs.toByteArray(blockID.getLocalID()))); - Assert.assertNull(db.get(DFSUtil.string2Bytes( - OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()))); - Assert.assertNotNull(DFSUtil - .string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blockID.getLocalID())); + try(ReferenceCountedDB db = + BlockUtils.getDB((KeyValueContainerData) dnContainerSet + .getContainer(blockID.getContainerID()).getContainerData(), conf)) { + Assert.assertNull(db.getStore().get( + Longs.toByteArray(blockID.getLocalID()))); + Assert.assertNull(db.getStore().get(DFSUtil.string2Bytes( + OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()))); + Assert.assertNotNull(DFSUtil.string2Bytes( + OzoneConsts.DELETED_KEY_PREFIX + blockID.getLocalID())); + } containerIdsWithDeletedBlocks.add(blockID.getContainerID()); }, omKeyLocationInfoGroups); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index 4a86f440170ae..e384d71f609a6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -41,7 +41,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -226,7 +226,7 @@ public void testCloseContainerViaRatis() throws IOException, List datanodes = pipeline.getNodes(); Assert.assertEquals(3, datanodes.size()); - List metadataStores = new ArrayList<>(datanodes.size()); + List metadataStores = new ArrayList<>(datanodes.size()); for (DatanodeDetails details : datanodes) { Assert.assertFalse(isContainerClosed(cluster, containerID, details)); //send the order to close the container @@ -237,8 +237,10 @@ public void testCloseContainerViaRatis() throws IOException, Container dnContainer = cluster.getHddsDatanodes().get(index) .getDatanodeStateMachine().getContainer().getContainerSet() .getContainer(containerID); - metadataStores.add(BlockUtils.getDB((KeyValueContainerData) dnContainer - .getContainerData(), conf)); + try(ReferenceCountedDB store = BlockUtils.getDB( + (KeyValueContainerData) dnContainer.getContainerData(), conf)) { + metadataStores.add(store); + } } // There should be as many rocks db as the number of datanodes in pipeline. From a315913c48f475a31065de48a441c7faae89ab15 Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Wed, 22 May 2019 17:24:27 +0530 Subject: [PATCH 0017/1308] HDDS-1517. AllocateBlock call fails with ContainerNotFoundException (#826). Contributed by Shashikant Banerjee. --- .../scm/container/ContainerStateManager.java | 10 ++-- .../scm/container/SCMContainerManager.java | 15 +++--- .../hdds/scm/block/TestBlockManager.java | 43 +++++++++++++++++ .../container/TestSCMContainerManager.java | 47 ++++++++++++++++++- 4 files changed, 102 insertions(+), 13 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index a37bf33bd95bf..cd3f423213898 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -263,11 +263,15 @@ ContainerInfo allocateContainer(final PipelineManager pipelineManager, } pipeline = pipelines.get((int) containerCount.get() % pipelines.size()); } - return allocateContainer(pipelineManager, owner, pipeline); + synchronized (pipeline) { + return allocateContainer(pipelineManager, owner, pipeline); + } } /** * Allocates a new container based on the type, replication etc. + * This method should be called only after the lock on the pipeline is held + * on which the container will be allocated. * * @param pipelineManager - Pipeline Manager class. * @param owner - Owner of the container. @@ -296,10 +300,10 @@ ContainerInfo allocateContainer( .setReplicationFactor(pipeline.getFactor()) .setReplicationType(pipeline.getType()) .build(); - pipelineManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(containerID)); Preconditions.checkNotNull(containerInfo); containers.addContainer(containerInfo); + pipelineManager.addContainerToPipeline(pipeline.getId(), + ContainerID.valueof(containerID)); containerStateCount.incrementAndGet(containerInfo.getState()); LOG.trace("New container allocated: {}", containerInfo); return containerInfo; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java index 80d7ec10e0cee..359731cfe4652 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java @@ -386,18 +386,17 @@ public ContainerInfo getMatchingContainer(final long sizeRequired, public ContainerInfo getMatchingContainer(final long sizeRequired, String owner, Pipeline pipeline, List excludedContainers) { + NavigableSet containerIDs; try { - //TODO: #CLUTIL See if lock is required here - NavigableSet containerIDs = - pipelineManager.getContainersInPipeline(pipeline.getId()); + synchronized (pipeline) { + //TODO: #CLUTIL See if lock is required here + containerIDs = + pipelineManager.getContainersInPipeline(pipeline.getId()); - containerIDs = getContainersForOwner(containerIDs, owner); - if (containerIDs.size() < numContainerPerOwnerInPipeline) { - synchronized (pipeline) { + containerIDs = getContainersForOwner(containerIDs, owner); + if (containerIDs.size() < numContainerPerOwnerInPipeline) { // TODO: #CLUTIL Maybe we can add selection logic inside synchronized // as well - containerIDs = getContainersForOwner( - pipelineManager.getContainersInPipeline(pipeline.getId()), owner); if (containerIDs.size() < numContainerPerOwnerInPipeline) { ContainerInfo containerInfo = containerStateManager.allocateContainer(pipelineManager, owner, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index 6a98a346f73c4..e5c4766697d5a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -19,7 +19,13 @@ import java.io.File; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeoutException; + import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -132,6 +138,43 @@ public void testAllocateBlock() throws Exception { Assert.assertNotNull(block); } + @Test + public void testAllocateBlockInParallel() throws Exception { + eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus); + GenericTestUtils.waitFor(() -> { + return !blockManager.isScmInSafeMode(); + }, 10, 1000 * 5); + int threadCount = 20; + List executors = new ArrayList<>(threadCount); + for (int i = 0; i < threadCount; i++) { + executors.add(Executors.newSingleThreadExecutor()); + } + List> futureList = + new ArrayList<>(threadCount); + for (int i = 0; i < threadCount; i++) { + final CompletableFuture future = + new CompletableFuture<>(); + CompletableFuture.supplyAsync(() -> { + try { + future.complete(blockManager + .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner, + new ExcludeList())); + } catch (IOException e) { + future.completeExceptionally(e); + } + return future; + }, executors.get(i)); + futureList.add(future); + } + try { + CompletableFuture + .allOf(futureList.toArray(new CompletableFuture[futureList.size()])) + .get(); + } catch (Exception e) { + Assert.fail("testAllocateBlockInParallel failed"); + } + } + @Test public void testAllocateOversizedBlock() throws Exception { eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java index a562efeab4f9b..bfdeac5263e0f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java @@ -43,14 +43,20 @@ import java.io.File; import java.io.IOException; -import java.util.Iterator; -import java.util.Optional; import java.util.Random; import java.util.Set; import java.util.TreeSet; import java.util.UUID; +import java.util.Iterator; +import java.util.Optional; +import java.util.List; +import java.util.ArrayList; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; + import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -144,6 +150,43 @@ public void testallocateContainerDistributesAllocation() throws Exception { Assert.assertTrue(pipelineList.size() > 5); } + @Test + public void testAllocateContainerInParallel() throws Exception { + int threadCount = 20; + List executors = new ArrayList<>(threadCount); + for (int i = 0; i < threadCount; i++) { + executors.add(Executors.newSingleThreadExecutor()); + } + List> futureList = + new ArrayList<>(threadCount); + for (int i = 0; i < threadCount; i++) { + final CompletableFuture future = new CompletableFuture<>(); + CompletableFuture.supplyAsync(() -> { + try { + ContainerInfo containerInfo = containerManager + .allocateContainer(xceiverClientManager.getType(), + xceiverClientManager.getFactor(), containerOwner); + + Assert.assertNotNull(containerInfo); + Assert.assertNotNull(containerInfo.getPipelineID()); + future.complete(containerInfo); + return containerInfo; + } catch (IOException e) { + future.completeExceptionally(e); + } + return future; + }, executors.get(i)); + futureList.add(future); + } + try { + CompletableFuture + .allOf(futureList.toArray(new CompletableFuture[futureList.size()])) + .get(); + } catch (Exception e) { + Assert.fail("testAllocateBlockInParallel failed"); + } + } + @Test public void testGetContainer() throws IOException { ContainerInfo containerInfo = containerManager.allocateContainer( From 9c61494c02ee5fc27841a0d82959a8a2acc18a4e Mon Sep 17 00:00:00 2001 From: Ajay Yadav <7813154+ajayydv@users.noreply.github.com> Date: Wed, 22 May 2019 11:47:32 -0700 Subject: [PATCH 0018/1308] HDDS-1065. OM and DN should persist SCM certificate as the trust root. Contributed by Ajay Kumar. (#834) --- .../org/apache/hadoop/hdds/HddsUtils.java | 2 +- ...ecurityProtocolClientSideTranslatorPB.java | 53 ++++++++++++++----- ...ecurityProtocolServerSideTranslatorPB.java | 7 ++- .../certificate/client/CertificateClient.java | 13 +++++ .../client/DefaultCertificateClient.java | 25 ++++++++- .../src/main/proto/SCMSecurityProtocol.proto | 1 + .../hadoop/ozone/HddsDatanodeService.java | 28 ++++++---- .../hadoop/ozone/TestSecureOzoneCluster.java | 8 +++ .../client/CertificateClientTestImpl.java | 4 ++ .../apache/hadoop/ozone/om/OzoneManager.java | 31 +++++++---- 10 files changed, 135 insertions(+), 37 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index 92ed9b61630c9..a5961cb5dde52 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -178,7 +178,7 @@ public static InetSocketAddress getScmAddressForBlockClients( * @return {@link SCMSecurityProtocol} * @throws IOException */ - public static SCMSecurityProtocol getScmSecurityClient( + public static SCMSecurityProtocolClientSideTranslatorPB getScmSecurityClient( OzoneConfiguration conf, InetSocketAddress address) throws IOException { RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class, ProtobufRpcEngine.class); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java index 7cf9476ff20c9..d7d53a4b8cdb2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto; import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto; +import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto; import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto.Builder; import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto; @@ -79,18 +80,8 @@ public void close() throws IOException { @Override public String getDataNodeCertificate(DatanodeDetailsProto dataNodeDetails, String certSignReq) throws IOException { - SCMGetDataNodeCertRequestProto.Builder builder = - SCMGetDataNodeCertRequestProto - .newBuilder() - .setCSR(certSignReq) - .setDatanodeDetails(dataNodeDetails); - try { - return rpcProxy - .getDataNodeCertificate(NULL_RPC_CONTROLLER, builder.build()) - .getX509Certificate(); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } + return getDataNodeCertificateChain(dataNodeDetails, certSignReq) + .getX509Certificate(); } /** @@ -103,13 +94,25 @@ public String getDataNodeCertificate(DatanodeDetailsProto dataNodeDetails, @Override public String getOMCertificate(OzoneManagerDetailsProto omDetails, String certSignReq) throws IOException { + return getOMCertChain(omDetails, certSignReq).getX509Certificate(); + } + + /** + * Get SCM signed certificate for OM. + * + * @param omDetails - OzoneManager Details. + * @param certSignReq - Certificate signing request. + * @return byte[] - SCM signed certificate. + */ + public SCMGetCertResponseProto getOMCertChain( + OzoneManagerDetailsProto omDetails, String certSignReq) + throws IOException { SCMGetOMCertRequestProto.Builder builder = SCMGetOMCertRequestProto .newBuilder() .setCSR(certSignReq) .setOmDetails(omDetails); try { - return rpcProxy.getOMCertificate(NULL_RPC_CONTROLLER, builder.build()) - .getX509Certificate(); + return rpcProxy.getOMCertificate(NULL_RPC_CONTROLLER, builder.build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -135,6 +138,28 @@ public String getCertificate(String certSerialId) throws IOException { } } + /** + * Get SCM signed certificate for Datanode. + * + * @param dnDetails - Datanode Details. + * @param certSignReq - Certificate signing request. + * @return byte[] - SCM signed certificate. + */ + public SCMGetCertResponseProto getDataNodeCertificateChain( + DatanodeDetailsProto dnDetails, String certSignReq) + throws IOException { + SCMGetDataNodeCertRequestProto.Builder builder = + SCMGetDataNodeCertRequestProto.newBuilder() + .setCSR(certSignReq) + .setDatanodeDetails(dnDetails); + try { + return rpcProxy.getDataNodeCertificate(NULL_RPC_CONTROLLER, + builder.build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + /** * Get CA certificate. * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolServerSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolServerSideTranslatorPB.java index c7c4ff6b5988a..2fd55945753b1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolServerSideTranslatorPB.java @@ -61,7 +61,9 @@ public SCMGetCertResponseProto getDataNodeCertificate( SCMGetCertResponseProto .newBuilder() .setResponseCode(ResponseCode.success) - .setX509Certificate(certificate); + .setX509Certificate(certificate) + .setX509CACertificate(impl.getCACertificate()); + return builder.build(); } catch (IOException e) { throw new ServiceException(e); @@ -87,7 +89,8 @@ public SCMGetCertResponseProto getOMCertificate( SCMGetCertResponseProto .newBuilder() .setResponseCode(ResponseCode.success) - .setX509Certificate(certificate); + .setX509Certificate(certificate) + .setX509CACertificate(impl.getCACertificate()); return builder.build(); } catch (IOException e) { throw new ServiceException(e); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java index 480758b9ee1b6..c36c9e0537c30 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java @@ -141,6 +141,19 @@ boolean verifySignature(byte[] data, byte[] signature, void storeCertificate(String pemEncodedCert, boolean force) throws CertificateException; + /** + * Stores the Certificate for this client. Don't use this api to add + * trusted certificates of others. + * + * @param pemEncodedCert - pem encoded X509 Certificate + * @param force - override any existing file + * @param caCert - Is CA certificate. + * @throws CertificateException - on Error. + * + */ + void storeCertificate(String pemEncodedCert, boolean force, boolean caCert) + throws CertificateException; + /** * Stores the trusted chain of certificates. * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java index 26be970043414..8f135745ab7ab 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java @@ -80,6 +80,7 @@ public abstract class DefaultCertificateClient implements CertificateClient { private static final String CERT_FILE_NAME_FORMAT = "%s.crt"; + private static final String CA_CERT_PREFIX = "CA-"; private final Logger logger; private final SecurityConfig securityConfig; private final KeyCodec keyCodec; @@ -452,14 +453,30 @@ public X509Certificate queryCertificate(String query) { * Stores the Certificate for this client. Don't use this api to add trusted * certificates of others. * - * @param pemEncodedCert - pem encoded X509 Certificate - * @param force - override any existing file + * @param pemEncodedCert - pem encoded X509 Certificate + * @param force - override any existing file * @throws CertificateException - on Error. * */ @Override public void storeCertificate(String pemEncodedCert, boolean force) throws CertificateException { + this.storeCertificate(pemEncodedCert, force, false); + } + + /** + * Stores the Certificate for this client. Don't use this api to add trusted + * certificates of others. + * + * @param pemEncodedCert - pem encoded X509 Certificate + * @param force - override any existing file + * @param caCert - Is CA certificate. + * @throws CertificateException - on Error. + * + */ + @Override + public void storeCertificate(String pemEncodedCert, boolean force, + boolean caCert) throws CertificateException { CertificateCodec certificateCodec = new CertificateCodec(securityConfig); try { Path basePath = securityConfig.getCertificateLocation(); @@ -469,6 +486,10 @@ public void storeCertificate(String pemEncodedCert, boolean force) String certName = String.format(CERT_FILE_NAME_FORMAT, cert.getSerialNumber().toString()); + if(caCert) { + certName = CA_CERT_PREFIX + certName; + } + certificateCodec.writeCertificate(basePath, certName, pemEncodedCert, force); certificateMap.putIfAbsent(cert.getSerialNumber().toString(), cert); diff --git a/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto b/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto index 5fcd98e6b9172..5b6dd27bf84fb 100644 --- a/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto +++ b/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto @@ -76,6 +76,7 @@ message SCMGetCertResponseProto { } required ResponseCode responseCode = 1; required string x509Certificate = 2; // Base64 encoded X509 certificate. + optional string x509CACertificate = 3; // Base64 encoded CA X509 certificate. } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 7cd4fd82c574a..93e94908375de 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -26,7 +26,8 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; +import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; +import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.HddsServerUtil; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.security.x509.SecurityConfig; @@ -271,16 +272,25 @@ private void getSCMSignedCert(OzoneConfiguration config) { try { PKCS10CertificationRequest csr = getCSR(config); // TODO: For SCM CA we should fetch certificate from multiple SCMs. - SCMSecurityProtocol secureScmClient = + SCMSecurityProtocolClientSideTranslatorPB secureScmClient = HddsUtils.getScmSecurityClient(config, HddsUtils.getScmAddressForSecurityProtocol(config)); - - String pemEncodedCert = secureScmClient.getDataNodeCertificate( - datanodeDetails.getProtoBufMessage(), getEncodedString(csr)); - dnCertClient.storeCertificate(pemEncodedCert, true); - datanodeDetails.setCertSerialId(getX509Certificate(pemEncodedCert). - getSerialNumber().toString()); - persistDatanodeDetails(datanodeDetails); + SCMGetCertResponseProto response = secureScmClient. + getDataNodeCertificateChain(datanodeDetails.getProtoBufMessage(), + getEncodedString(csr)); + // Persist certificates. + if(response.hasX509CACertificate()) { + String pemEncodedCert = response.getX509Certificate(); + dnCertClient.storeCertificate(pemEncodedCert, true); + dnCertClient.storeCertificate(response.getX509CACertificate(), true, + true); + datanodeDetails.setCertSerialId(getX509Certificate(pemEncodedCert). + getSerialNumber().toString()); + persistDatanodeDetails(datanodeDetails); + } else { + throw new RuntimeException("Unable to retrieve datanode certificate " + + "chain"); + } } catch (IOException | CertificateException e) { LOG.error("Error while storing SCM signed certificate.", e); throw new RuntimeException(e); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index d7cfd374981ae..7269e30aaaa5e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; import org.apache.hadoop.io.Text; @@ -98,6 +99,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_EXPIRED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; @@ -780,6 +782,12 @@ public void testSecureOmInitSuccess() throws Exception { "SCM signed certificate")); X509Certificate certificate = om.getCertificateClient().getCertificate(); validateCertificate(certificate); + String pemEncodedCACert = + scm.getSecurityProtocolServer().getCACertificate(); + X509Certificate caCert = CertificateCodec.getX509Cert(pemEncodedCACert); + X509Certificate caCertStored = om.getCertificateClient() + .getCertificate(caCert.getSerialNumber().toString()); + assertEquals(caCert, caCertStored); } finally { if (scm != null) { scm.stop(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java index 87fe70622f166..25bde38daf07b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java @@ -139,7 +139,11 @@ public X509Certificate queryCertificate(String query) { @Override public void storeCertificate(String cert, boolean force) throws CertificateException { + } + @Override + public void storeCertificate(String cert, boolean force, boolean caCert) + throws CertificateException { } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index ec51fe77c04d0..6b341bce352e7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; +import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB; import org.apache.hadoop.hdds.scm.ScmInfo; @@ -785,8 +786,8 @@ private static ScmBlockLocationProtocol getScmBlockClient( * @return {@link SCMSecurityProtocol} * @throws IOException */ - private static SCMSecurityProtocol getScmSecurityClient( - OzoneConfiguration conf) throws IOException { + private static SCMSecurityProtocolClientSideTranslatorPB + getScmSecurityClient(OzoneConfiguration conf) throws IOException { RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class, ProtobufRpcEngine.class); long scmVersion = @@ -1455,16 +1456,28 @@ private static void getSCMSignedCert(CertificateClient client, HddsProtos.OzoneManagerDetailsProto omDetailsProto = omDetailsProtoBuilder.build(); LOG.info("OzoneManager ports added:{}", omDetailsProto.getPortsList()); - SCMSecurityProtocol secureScmClient = getScmSecurityClient(config); + SCMSecurityProtocolClientSideTranslatorPB secureScmClient = + getScmSecurityClient(config); - String pemEncodedCert = secureScmClient.getOMCertificate(omDetailsProto, - getEncodedString(csr)); + SCMGetCertResponseProto response = secureScmClient. + getOMCertChain(omDetailsProto, getEncodedString(csr)); + String pemEncodedCert = response.getX509Certificate(); try { - client.storeCertificate(pemEncodedCert, true); - // Persist om cert serial id. - omStore.setOmCertSerialId(CertificateCodec. - getX509Certificate(pemEncodedCert).getSerialNumber().toString()); + + + // Store SCM CA certificate. + if(response.hasX509CACertificate()) { + String pemEncodedRootCert = response.getX509CACertificate(); + client.storeCertificate(pemEncodedRootCert, true, true); + client.storeCertificate(pemEncodedCert, true); + // Persist om cert serial id. + omStore.setOmCertSerialId(CertificateCodec. + getX509Certificate(pemEncodedCert).getSerialNumber().toString()); + } else { + throw new RuntimeException("Unable to retrieve OM certificate " + + "chain"); + } } catch (IOException | CertificateException e) { LOG.error("Error while storing SCM signed certificate.", e); throw new RuntimeException(e); From 5565f2c532f5a2bee6999155672dce8bf3179519 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 23 May 2019 10:21:11 +0900 Subject: [PATCH 0019/1308] MAPREDUCE-7198. mapreduce.task.timeout=0 configuration used to disable timeout doesn't work. --- .../v2/app/TaskHeartbeatHandler.java | 5 ++- .../v2/app/TestTaskHeartbeatHandler.java | 43 ++++++++++++++++++- .../src/main/resources/mapred-default.xml | 1 + 3 files changed, 46 insertions(+), 3 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java index 456f2a66c8f91..9439a7be8d61c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java @@ -192,7 +192,8 @@ private void checkRunning(long currentTime) { (currentTime > (entry.getValue().getLastProgress() + taskTimeOut)); // when container in NM not started in a long time, // we think the taskAttempt is stuck - boolean taskStuck = (!entry.getValue().isReported()) && + boolean taskStuck = (taskStuckTimeOut > 0) && + (!entry.getValue().isReported()) && (currentTime > (entry.getValue().getLastProgress() + taskStuckTimeOut)); @@ -225,7 +226,7 @@ private void checkRecentlyUnregistered(long currentTime) { } @VisibleForTesting - ConcurrentMap getRunningAttempts(){ + ConcurrentMap getRunningAttempts(){ return runningAttempts; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestTaskHeartbeatHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestTaskHeartbeatHandler.java index f0368ebe871ad..418f09e8d3597 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestTaskHeartbeatHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestTaskHeartbeatHandler.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertFalse; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -48,7 +49,7 @@ public class TestTaskHeartbeatHandler { - @SuppressWarnings({ "rawtypes", "unchecked" }) + @SuppressWarnings("unchecked") @Test public void testTaskTimeout() throws InterruptedException { EventHandler mockHandler = mock(EventHandler.class); @@ -81,6 +82,46 @@ public void testTaskTimeout() throws InterruptedException { } } + @Test + @SuppressWarnings("unchecked") + public void testTaskTimeoutDisable() throws InterruptedException { + EventHandler mockHandler = mock(EventHandler.class); + Clock clock = SystemClock.getInstance(); + TaskHeartbeatHandler hb = new TaskHeartbeatHandler(mockHandler, clock, 1); + + Configuration conf = new Configuration(); + conf.setLong(MRJobConfig.TASK_STUCK_TIMEOUT_MS, 0); // no timeout + conf.setInt(MRJobConfig.TASK_TIMEOUT, 0); // no timeout + // set TASK_PROGRESS_REPORT_INTERVAL to a value smaller than TASK_TIMEOUT + // so that TASK_TIMEOUT is not overridden + conf.setLong(MRJobConfig.TASK_PROGRESS_REPORT_INTERVAL, 0); + conf.setInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 10); //10 ms + + hb.init(conf); + hb.start(); + try { + ApplicationId appId = ApplicationId.newInstance(0L, 5); + JobId jobId = MRBuilderUtils.newJobId(appId, 4); + TaskId tid = MRBuilderUtils.newTaskId(jobId, 3, TaskType.MAP); + TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 2); + hb.register(taid); + + ConcurrentMap + runningAttempts = hb.getRunningAttempts(); + for (Map.Entry entry + : runningAttempts.entrySet()) { + assertFalse(entry.getValue().isReported()); + } + + Thread.sleep(100); + + // Timeout is disabled, so the task should not be canceled + verify(mockHandler, never()).handle(any(Event.class)); + } finally { + hb.stop(); + } + } + @SuppressWarnings("unchecked") @Test public void testTaskStuck() throws InterruptedException { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index fa26e4d738295..1ba82d2b256da 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -282,6 +282,7 @@ The max timeout before receiving remote task's first heartbeat. This parameter is in order to avoid waiting for the container to start indefinitely, which made task stuck in the NEW state. + A value of 0 disables the timeout.
    From 03aa70fe19e06a0b3d637acc7b5db4c955f0a693 Mon Sep 17 00:00:00 2001 From: Zhankun Tang Date: Thu, 23 May 2019 10:02:00 +0800 Subject: [PATCH 0020/1308] SUBMARINE-83. Refine the documents of submarine targeting 0.2.0 release. Contributed by Zhankun Tang. --- .../hadoop-submarine-core/README.md | 7 ++-- .../src/site/markdown/DeveloperGuide.md | 24 ------------ .../src/site/markdown/Examples.md | 4 +- .../src/site/markdown/Index.md | 5 +-- .../src/site/markdown/QuickStart.md | 19 +++++++++- .../site/markdown/RunningZeppelinOnYARN.md | 37 ------------------- .../src/site/markdown/TonYRuntimeGuide.md} | 10 ++--- .../src/site/resources/css/site.css | 29 --------------- .../src/site/site.xml | 28 -------------- 9 files changed, 28 insertions(+), 135 deletions(-) delete mode 100644 hadoop-submarine/hadoop-submarine-core/src/site/markdown/DeveloperGuide.md delete mode 100644 hadoop-submarine/hadoop-submarine-core/src/site/markdown/RunningZeppelinOnYARN.md rename hadoop-submarine/{hadoop-submarine-tony-runtime/src/site/markdown/QuickStart.md => hadoop-submarine-core/src/site/markdown/TonYRuntimeGuide.md} (98%) delete mode 100644 hadoop-submarine/hadoop-submarine-tony-runtime/src/site/resources/css/site.css delete mode 100644 hadoop-submarine/hadoop-submarine-tony-runtime/src/site/site.xml diff --git a/hadoop-submarine/hadoop-submarine-core/README.md b/hadoop-submarine/hadoop-submarine-core/README.md index cb2e2da107bce..cc137ea5db320 100644 --- a/hadoop-submarine/hadoop-submarine-core/README.md +++ b/hadoop-submarine/hadoop-submarine-core/README.md @@ -37,11 +37,12 @@ \__________________________________________________________/ (_) ``` -Submarine is a project which allows infra engineer / data scientist to run *unmodified* Tensorflow programs on YARN. +Submarine is a project which allows infra engineer / data scientist to run +*unmodified* Tensorflow or PyTorch programs on YARN or Kubernetes. Goals of Submarine: - It allows jobs easy access data/models in HDFS and other storages. -- Can launch services to serve Tensorflow/MXNet models. +- Can launch services to serve Tensorflow/PyTorch models. - Support run distributed Tensorflow jobs with simple configs. - Support run user-specified Docker images. - Support specify GPU and other resources. @@ -51,5 +52,3 @@ Goals of Submarine: Please jump to [QuickStart](src/site/markdown/QuickStart.md) guide to quickly understand how to use this framework. Please jump to [Examples](src/site/markdown/Examples.md) to try other examples like running Distributed Tensorflow Training for CIFAR 10. - -If you're a developer, please find [Developer](src/site/markdown/DeveloperGuide.md) guide for more details. diff --git a/hadoop-submarine/hadoop-submarine-core/src/site/markdown/DeveloperGuide.md b/hadoop-submarine/hadoop-submarine-core/src/site/markdown/DeveloperGuide.md deleted file mode 100644 index 9ab0641235a60..0000000000000 --- a/hadoop-submarine/hadoop-submarine-core/src/site/markdown/DeveloperGuide.md +++ /dev/null @@ -1,24 +0,0 @@ - - -# Developer Guide - -By default, Submarine uses YARN service framework as runtime. If you want to add your own implementation, you can add a new `RuntimeFactory` implementation and configure following option to `submarine.xml` (which should be placed under same `$HADOOP_CONF_DIR`) - -``` - - submarine.runtime.class - ... full qualified class name for your runtime factory ... - -``` diff --git a/hadoop-submarine/hadoop-submarine-core/src/site/markdown/Examples.md b/hadoop-submarine/hadoop-submarine-core/src/site/markdown/Examples.md index d878adde253b0..b66b32d403f83 100644 --- a/hadoop-submarine/hadoop-submarine-core/src/site/markdown/Examples.md +++ b/hadoop-submarine/hadoop-submarine-core/src/site/markdown/Examples.md @@ -18,6 +18,4 @@ Here're some examples about Submarine usage. [Running Distributed CIFAR 10 Tensorflow Job](RunningDistributedCifar10TFJobs.html) -[Running Standalone CIFAR 10 PyTorch Job](RunningSingleNodeCifar10PTJobs.html) - -[Running Zeppelin Notebook on YARN](RunningZeppelinOnYARN.html) \ No newline at end of file +[Running Standalone CIFAR 10 PyTorch Job](RunningSingleNodeCifar10PTJobs.html) \ No newline at end of file diff --git a/hadoop-submarine/hadoop-submarine-core/src/site/markdown/Index.md b/hadoop-submarine/hadoop-submarine-core/src/site/markdown/Index.md index f8556a6c103d3..d11fa4572ab7b 100644 --- a/hadoop-submarine/hadoop-submarine-core/src/site/markdown/Index.md +++ b/hadoop-submarine/hadoop-submarine-core/src/site/markdown/Index.md @@ -12,7 +12,8 @@ limitations under the License. See accompanying LICENSE file. --> -Submarine is a project which allows infra engineer / data scientist to run *unmodified* Tensorflow programs on YARN. +Submarine is a project which allows infra engineer / data scientist to run +*unmodified* Tensorflow or PyTorch programs on YARN or Kubernetes. Goals of Submarine: @@ -43,6 +44,4 @@ Click below contents if you want to understand more. - [How to write Dockerfile for Submarine PyTorch jobs](WriteDockerfilePT.html) -- [Developer guide](DeveloperGuide.html) - - [Installation guides](HowToInstall.html) diff --git a/hadoop-submarine/hadoop-submarine-core/src/site/markdown/QuickStart.md b/hadoop-submarine/hadoop-submarine-core/src/site/markdown/QuickStart.md index f693917d90565..e2df213dc4ab6 100644 --- a/hadoop-submarine/hadoop-submarine-core/src/site/markdown/QuickStart.md +++ b/hadoop-submarine/hadoop-submarine-core/src/site/markdown/QuickStart.md @@ -18,7 +18,7 @@ Must: -- Apache Hadoop 3.1.x, YARN service enabled. +- Apache Hadoop version newer than 2.7.3 Optional: @@ -37,6 +37,20 @@ For more details, please refer to: - [How to write Dockerfile for Submarine PyTorch jobs](WriteDockerfilePT.html) +## Submarine runtimes +After submarine 0.2.0, it supports two runtimes which are YARN native service + runtime and Linkedin's TonY runtime. Each runtime can support both Tensorflow + and Pytorch framework. And the user don't need to worry about the usage + because the two runtime implements the same interface. + +To use the TonY runtime, please set below value in the submarine configuration. + +|Configuration Name | Description | +|:---- |:---- | +| `submarine.runtime.class` | org.apache.hadoop.yarn.submarine.runtimes.tony.TonyRuntimeFactory | + +For more details of TonY runtime, please check [TonY runtime guide](TonYRuntimeGuide.html) + ## Run jobs ### Commandline options @@ -164,7 +178,8 @@ See below screenshot: ![alt text](./images/tensorboard-service.png "Tensorboard service") -If there is no hadoop client, we can also use the java command and the uber jar, hadoop-submarine-all-*.jar, to submit the job. +After v0.2.0, if there is no hadoop client, we can also use the java command +and the uber jar, hadoop-submarine-all-*.jar, to submit the job. ``` java -cp /path-to/hadoop-conf:/path-to/hadoop-submarine-all-*.jar \ diff --git a/hadoop-submarine/hadoop-submarine-core/src/site/markdown/RunningZeppelinOnYARN.md b/hadoop-submarine/hadoop-submarine-core/src/site/markdown/RunningZeppelinOnYARN.md deleted file mode 100644 index e06526c2e6aca..0000000000000 --- a/hadoop-submarine/hadoop-submarine-core/src/site/markdown/RunningZeppelinOnYARN.md +++ /dev/null @@ -1,37 +0,0 @@ - - -# Running Zeppelin Notebook On Submarine - -This is a simple example about how to run Zeppelin notebook by using Submarine. - -## Step 1: Build Docker Image - -Go to `src/main/docker/zeppelin-notebook-example`, build the Docker image. Or you can use the prebuilt one: `hadoopsubmarine/zeppelin-on-yarn-gpu:0.0.1` - -## Step 2: Launch the notebook on YARN - -Submit command to YARN: - -`yarn app -destroy zeppelin-notebook; -yarn jar path-to/hadoop-yarn-applications-submarine-3.2.0-SNAPSHOT.jar \ - job run --name zeppelin-notebook \ - --docker_image hadoopsubmarine/zeppelin-on-yarn-gpu:0.0.1 \ - --worker_resources memory=8G,vcores=2,gpu=1 \ - --num_workers 1 \ - -worker_launch_cmd "/usr/local/bin/run_container.sh"` - -Once the container got launched, you can go to `YARN services` UI page, access the `zeppelin-notebook` job, and go to the quicklink `notebook` by clicking `...`. - -The notebook is secured by admin/admin user name and password. \ No newline at end of file diff --git a/hadoop-submarine/hadoop-submarine-tony-runtime/src/site/markdown/QuickStart.md b/hadoop-submarine/hadoop-submarine-core/src/site/markdown/TonYRuntimeGuide.md similarity index 98% rename from hadoop-submarine/hadoop-submarine-tony-runtime/src/site/markdown/QuickStart.md rename to hadoop-submarine/hadoop-submarine-core/src/site/markdown/TonYRuntimeGuide.md index 864aebcea31fa..105a72431ddb3 100644 --- a/hadoop-submarine/hadoop-submarine-tony-runtime/src/site/markdown/QuickStart.md +++ b/hadoop-submarine/hadoop-submarine-core/src/site/markdown/TonYRuntimeGuide.md @@ -247,16 +247,16 @@ CLASSPATH=$(hadoop classpath --glob): \ /home/pi/hadoop/TonY/tony-cli/build/libs/tony-cli-0.3.2-all.jar \ java org.apache.hadoop.yarn.submarine.client.cli.Cli job run --name tf-job-001 \ - --framework tensorflow \ --num_workers 2 \ --worker_resources memory=3G,vcores=2 \ --num_ps 2 \ --ps_resources memory=3G,vcores=2 \ --worker_launch_cmd "venv.zip/venv/bin/python mnist_distributed.py" \ --ps_launch_cmd "venv.zip/venv/bin/python mnist_distributed.py" \ - --insecure + --insecure \ --conf tony.containers.resources=PATH_TO_VENV_YOU_CREATED/venv.zip#archive,PATH_TO_MNIST_EXAMPLE/mnist_distributed.py, \ -PATH_TO_TONY_CLI_JAR/tony-cli-0.3.2-all.jar +PATH_TO_TONY_CLI_JAR/tony-cli-0.3.2-all.jar \ +--conf tony.application.framework=pytorch ``` You should then be able to see links and status of the jobs from command line: @@ -284,7 +284,6 @@ CLASSPATH=$(hadoop classpath --glob): \ /home/pi/hadoop/TonY/tony-cli/build/libs/tony-cli-0.3.2-all.jar \ java org.apache.hadoop.yarn.submarine.client.cli.Cli job run --name tf-job-001 \ - --framework tensorflow \ --docker_image hadoopsubmarine/tf-1.8.0-cpu:0.0.3 \ --input_path hdfs://pi-aw:9000/dataset/cifar-10-data \ --worker_resources memory=3G,vcores=2 \ @@ -297,5 +296,6 @@ java org.apache.hadoop.yarn.submarine.client.cli.Cli job run --name tf-job-001 \ --env HADOOP_COMMON_HOME=/hadoop-3.1.0 \ --env HADOOP_HDFS_HOME=/hadoop-3.1.0 \ --env HADOOP_CONF_DIR=/hadoop-3.1.0/etc/hadoop \ - --conf tony.containers.resources=--conf tony.containers.resources=/home/pi/hadoop/TonY/tony-cli/build/libs/tony-cli-0.3.2-all.jar + --conf tony.containers.resources=PATH_TO_TONY_CLI_JAR/tony-cli-0.3.2-all.jar \ + --conf tony.application.framework=pytorch ``` diff --git a/hadoop-submarine/hadoop-submarine-tony-runtime/src/site/resources/css/site.css b/hadoop-submarine/hadoop-submarine-tony-runtime/src/site/resources/css/site.css deleted file mode 100644 index 7315db31e53ca..0000000000000 --- a/hadoop-submarine/hadoop-submarine-tony-runtime/src/site/resources/css/site.css +++ /dev/null @@ -1,29 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one or more -* contributor license agreements. See the NOTICE file distributed with -* this work for additional information regarding copyright ownership. -* The ASF licenses this file to You under the Apache License, Version 2.0 -* (the "License"); you may not use this file except in compliance with -* the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ -#banner { - height: 93px; - background: none; -} - -#bannerLeft img { - margin-left: 30px; - margin-top: 10px; -} - -#bannerRight img { - margin: 17px; -} diff --git a/hadoop-submarine/hadoop-submarine-tony-runtime/src/site/site.xml b/hadoop-submarine/hadoop-submarine-tony-runtime/src/site/site.xml deleted file mode 100644 index 5feae9a879b8d..0000000000000 --- a/hadoop-submarine/hadoop-submarine-tony-runtime/src/site/site.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - - - org.apache.maven.skins - maven-stylus-skin - ${maven-stylus-skin.version} - - - - - - - - - From a771e2a638aadbb2f1daff9fbb42b47f74a96183 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 23 May 2019 13:59:42 +0900 Subject: [PATCH 0021/1308] HADOOP-12948. Remove the defunct startKdc profile from hadoop-common. Contributed by Wei-Chiu Chuang. --- hadoop-common-project/hadoop-common/pom.xml | 85 ------ .../security/TestUGIWithSecurityOn.java | 117 -------- .../src/test/resources/kdc/killKdc.sh | 19 -- .../src/test/resources/kdc/ldif/users.ldif | 78 ------ .../src/test/resources/kdc/server.xml | 258 ------------------ 5 files changed, 557 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java delete mode 100644 hadoop-common-project/hadoop-common/src/test/resources/kdc/killKdc.sh delete mode 100644 hadoop-common-project/hadoop-common/src/test/resources/kdc/ldif/users.ldif delete mode 100644 hadoop-common-project/hadoop-common/src/test/resources/kdc/server.xml diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 10417eb910bf3..73b4c18b1503e 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -30,7 +30,6 @@ jar - src/test/resources/kdc common true true @@ -462,8 +461,6 @@ maven-surefire-plugin - ${startKdc} - ${kdc.resource.dir} ${runningWithNative} @@ -544,7 +541,6 @@ src/main/native/m4/* src/test/empty-file src/test/all-tests - src/test/resources/kdc/ldif/users.ldif src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.h src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h @@ -862,87 +858,6 @@ - - - - startKdc - - - startKdc - true - - - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - enforce-os - - enforce - - - - - - mac - unix - - - true - - - - - - org.apache.maven.plugins - maven-antrun-plugin - - - kdc - compile - - run - - - - - - - - - - - - - - - - - - - - - - killKdc - test - - run - - - - - - - - - - - - - parallel-tests diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java deleted file mode 100644 index 028cc38f1b358..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java +++ /dev/null @@ -1,117 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.security; - -import java.io.IOException; -import java.security.PrivilegedAction; -import java.util.Set; - -import javax.security.auth.kerberos.KerberosPrincipal; - -import org.junit.Assert; -import static org.junit.Assert.*; - - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; -import org.junit.Assume; -import org.junit.Before; -import org.junit.Test; - -public class TestUGIWithSecurityOn { - - public static boolean isKdcRunning() { - String startKdc = System.getProperty("startKdc"); - if(startKdc == null || !startKdc.equals("true")) { - return false; - } - return true; - } - - @Before - public void testKdcRunning() { - //Tests are skipped if KDC is not running - Assume.assumeTrue(isKdcRunning()); - } - @Test - public void testLogin() throws IOException { - String nn1keyTabFilepath = System.getProperty("kdc.resource.dir") - + "/keytabs/nn1.keytab"; - String user1keyTabFilepath = System.getProperty("kdc.resource.dir") - + "/keytabs/user1.keytab"; - Configuration conf = new Configuration(); - SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); - UserGroupInformation.setConfiguration(conf); - - UserGroupInformation ugiNn = UserGroupInformation - .loginUserFromKeytabAndReturnUGI("nn1/localhost@EXAMPLE.COM", - nn1keyTabFilepath); - UserGroupInformation ugiDn = UserGroupInformation - .loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM", - user1keyTabFilepath); - - Assert.assertEquals(AuthenticationMethod.KERBEROS, - ugiNn.getAuthenticationMethod()); - Assert.assertEquals(AuthenticationMethod.KERBEROS, - ugiDn.getAuthenticationMethod()); - - try { - UserGroupInformation - .loginUserFromKeytabAndReturnUGI("bogus@EXAMPLE.COM", - nn1keyTabFilepath); - Assert.fail("Login should have failed"); - } catch (Exception ex) { - ex.printStackTrace(); - } - } - - @Test - public void testGetUGIFromKerberosSubject() throws IOException { - String user1keyTabFilepath = System.getProperty("kdc.resource.dir") - + "/keytabs/user1.keytab"; - - UserGroupInformation ugi = UserGroupInformation - .loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM", - user1keyTabFilepath); - Set principals = ugi.getSubject().getPrincipals( - KerberosPrincipal.class); - if (principals.isEmpty()) { - Assert.fail("There should be a kerberos principal in the subject."); - } - else { - UserGroupInformation ugi2 = UserGroupInformation.getUGIFromSubject( - ugi.getSubject()); - if (ugi2 != null) { - ugi2.doAs(new PrivilegedAction() { - - @Override - public Object run() { - try { - UserGroupInformation ugi3 = UserGroupInformation.getCurrentUser(); - String doAsUserName = ugi3.getUserName(); - assertEquals(doAsUserName, "user1@EXAMPLE.COM"); - System.out.println("DO AS USERNAME: " + doAsUserName); - } catch (IOException e) { - e.printStackTrace(); - } - return null; - } - }); - } - } - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/resources/kdc/killKdc.sh b/hadoop-common-project/hadoop-common/src/test/resources/kdc/killKdc.sh deleted file mode 100644 index a6a3d77a3e570..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/resources/kdc/killKdc.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ps -ef | grep apacheds | grep -v grep | awk '{printf $2"\n"}' | xargs -t --no-run-if-empty kill -9 - diff --git a/hadoop-common-project/hadoop-common/src/test/resources/kdc/ldif/users.ldif b/hadoop-common-project/hadoop-common/src/test/resources/kdc/ldif/users.ldif deleted file mode 100644 index a3d2704949c44..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/resources/kdc/ldif/users.ldif +++ /dev/null @@ -1,78 +0,0 @@ -dn: dc=example,dc=com -objectClass: dcObject -objectClass: organization -objectClass: top -dc: example -o: example.com - -dn: ou=Users,dc=example,dc=com -objectClass: organizationalUnit -objectClass: top -ou: Users - -dn: uid=user1,ou=Users,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: inetOrgPerson -objectClass: krb5principal -objectClass: krb5kdcentry -cn: user1 Service -sn: Service -uid: user1 -userPassword: secret -krb5PrincipalName: user1@EXAMPLE.COM -krb5KeyVersionNumber: 0 - -dn: uid=krbtgt,ou=Users,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: inetOrgPerson -objectClass: krb5principal -objectClass: krb5kdcentry -cn: KDC Service -sn: Service -uid: krbtgt -userPassword: secret -krb5PrincipalName: krbtgt/EXAMPLE.COM@EXAMPLE.COM -krb5KeyVersionNumber: 0 - -dn: uid=ldap,ou=Users,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: inetOrgPerson -objectClass: krb5principal -objectClass: krb5kdcentry -cn: LDAP -sn: Service -uid: ldap -userPassword: randall -krb5PrincipalName: ldap/localhost@EXAMPLE.COM -krb5KeyVersionNumber: 0 - -dn: uid=nn1,ou=Users,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: inetOrgPerson -objectClass: krb5principal -objectClass: krb5kdcentry -cn: NameNode Service -sn: Service -uid: nn1 -userPassword: secret -krb5PrincipalName: nn1/localhost@EXAMPLE.COM -krb5KeyVersionNumber: 0 - -dn: uid=dn1,ou=Users,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: inetOrgPerson -objectClass: krb5principal -objectClass: krb5kdcentry -cn: DataNode Service -sn: Service -uid: dn1 -userPassword: secret -krb5PrincipalName: dn1/localhost@EXAMPLE.COM -krb5KeyVersionNumber: 0 - - diff --git a/hadoop-common-project/hadoop-common/src/test/resources/kdc/server.xml b/hadoop-common-project/hadoop-common/src/test/resources/kdc/server.xml deleted file mode 100644 index bb8c52a9976ad..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/resources/kdc/server.xml +++ /dev/null @@ -1,258 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #directoryService - - - - - - - - - - - - - - - - - - #directoryService - - - - - - - - - - - - - - example.com - apache.org - - - - - - - - - - - - - - #ldapServer - - - - - From 72bef0f6cb58bf2f237be9d65f0d1f62b08a4524 Mon Sep 17 00:00:00 2001 From: sdeka Date: Thu, 23 May 2019 11:36:40 +0530 Subject: [PATCH 0022/1308] fixed merge error. adapted to new signature of BlockUtils getDB --- .../ozone/container/ozoneimpl/TestOzoneContainer.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 7cdb692597e3b..6c089021cb372 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -33,13 +33,13 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.utils.MetadataStore; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -155,7 +155,7 @@ private long addBlocks(KeyValueContainer container, long freeBytes = container.getContainerData().getMaxSize(); long containerId = container.getContainerData().getContainerID(); - MetadataStore metadataStore = BlockUtils.getDB(container + ContainerCache.ReferenceCountedDB db = BlockUtils.getDB(container .getContainerData(), conf); for (int bi = 0; bi < blocks; bi++) { @@ -173,7 +173,7 @@ private long addBlocks(KeyValueContainer container, chunkList.add(info.getProtoBufMessage()); } blockData.setChunks(chunkList); - metadataStore.put(Longs.toByteArray(blockID.getLocalID()), + db.getStore().put(Longs.toByteArray(blockID.getLocalID()), blockData.getProtoBufMessage().toByteArray()); } From ca93760504487f4e1a821585d5481b235ba9aaba Mon Sep 17 00:00:00 2001 From: sdeka Date: Thu, 23 May 2019 20:49:52 +0530 Subject: [PATCH 0023/1308] fixed checkstyle issue post merge --- .../hadoop/ozone/container/ozoneimpl/ContainerReader.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 2d14341653d18..37b726ddbb31b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -215,7 +215,7 @@ public void verifyAndFixupContainerData(ContainerData containerData) .updateBlockCommitSequenceId(Longs.fromByteArray(bcsId)); } if (kvContainer.getContainerState() - == ContainerProtos.ContainerDataProto.State.OPEN) { + == ContainerProtos.ContainerDataProto.State.OPEN) { // commitSpace for Open Containers relies on usedBytes initializeUsedBytes(kvContainer); } From ea0b1d8fba57f56e2a75e9a70d4768ba75952823 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Thu, 23 May 2019 11:36:32 -0400 Subject: [PATCH 0024/1308] HADOOP-16287. Implement ProxyUserAuthenticationFilter for web protocol impersonation. Contributed by Prabhu Joseph --- hadoop-common-project/hadoop-common/pom.xml | 10 ++ .../server/ProxyUserAuthenticationFilter.java | 115 ++++++++++++++++ ...xyUserAuthenticationFilterInitializer.java | 60 +++++++++ .../authentication/server/package-info.java | 22 +++ .../src/site/markdown/HttpAuthentication.md | 8 ++ .../TestProxyUserAuthenticationFilter.java | 125 ++++++++++++++++++ 6 files changed, 340 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilterInitializer.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/package-info.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authentication/server/TestProxyUserAuthenticationFilter.java diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 73b4c18b1503e..54efeeb0602b6 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -160,6 +160,16 @@ junit test + + org.assertj + assertj-core + test + + + org.glassfish.grizzly + grizzly-http-servlet + test + commons-beanutils commons-beanutils diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java new file mode 100644 index 0000000000000..42902b31601fd --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java @@ -0,0 +1,115 @@ +/** + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ +package org.apache.hadoop.security.authentication.server; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.HttpExceptionUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.security.Principal; +import java.util.Enumeration; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletRequestWrapper; +import javax.servlet.http.HttpServletResponse; + +/** + * AuthenticationFilter which adds support to perform operations + * using end user instead of proxy user. Fetches the end user from + * doAs Query Parameter. + */ +public class ProxyUserAuthenticationFilter extends AuthenticationFilter { + + private static final Logger LOG = LoggerFactory.getLogger( + ProxyUserAuthenticationFilter.class); + + private static final String DO_AS = "doAs"; + public static final String PROXYUSER_PREFIX = "proxyuser"; + + @Override + public void init(FilterConfig filterConfig) throws ServletException { + Configuration conf = getProxyuserConfiguration(filterConfig); + ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX); + super.init(filterConfig); + } + + @Override + protected void doFilter(FilterChain filterChain, HttpServletRequest request, + HttpServletResponse response) throws IOException, ServletException { + + String doAsUser = request.getParameter(DO_AS); + if (doAsUser != null && !doAsUser.equals(request.getRemoteUser())) { + LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ", + doAsUser, request.getRemoteUser(), request.getRemoteAddr()); + UserGroupInformation requestUgi = (request.getUserPrincipal() != null) ? + UserGroupInformation.createRemoteUser(request.getRemoteUser()) + : null; + if (requestUgi != null) { + requestUgi = UserGroupInformation.createProxyUser(doAsUser, + requestUgi); + try { + ProxyUsers.authorize(requestUgi, request.getRemoteAddr()); + + final UserGroupInformation ugiF = requestUgi; + request = new HttpServletRequestWrapper(request) { + @Override + public String getRemoteUser() { + return ugiF.getShortUserName(); + } + + @Override + public Principal getUserPrincipal() { + return new Principal() { + @Override + public String getName() { + return ugiF.getUserName(); + } + }; + } + }; + LOG.debug("Proxy user Authentication successful"); + } catch (AuthorizationException ex) { + HttpExceptionUtils.createServletExceptionResponse(response, + HttpServletResponse.SC_FORBIDDEN, ex); + LOG.warn("Proxy user Authentication exception", ex); + return; + } + } + } + super.doFilter(filterChain, request, response); + } + + protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) + throws ServletException { + Configuration conf = new Configuration(false); + Enumeration names = filterConfig.getInitParameterNames(); + while (names.hasMoreElements()) { + String name = (String) names.nextElement(); + if (name.startsWith(PROXYUSER_PREFIX + ".")) { + String value = filterConfig.getInitParameter(name); + conf.set(name, value); + } + } + return conf; + } + +} + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilterInitializer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilterInitializer.java new file mode 100644 index 0000000000000..ad79742aeb528 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilterInitializer.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.security.authentication.server; + +import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.http.FilterContainer; +import org.apache.hadoop.http.FilterInitializer; +import org.apache.hadoop.security.AuthenticationFilterInitializer; +import org.apache.hadoop.security.authorize.ProxyUsers; + +/** + * Filter initializer to initialize + * {@link ProxyUserAuthenticationFilter} which adds support + * to perform operations using end user instead of proxy user. + */ +public class ProxyUserAuthenticationFilterInitializer + extends FilterInitializer { + + private String configPrefix; + + public ProxyUserAuthenticationFilterInitializer() { + this.configPrefix = "hadoop.http.authentication."; + } + + protected Map createFilterConfig(Configuration conf) { + Map filterConfig = AuthenticationFilterInitializer + .getFilterConfigMap(conf, configPrefix); + //Add proxy user configs + for (Map.Entry entry : conf.getPropsWithPrefix( + ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) { + filterConfig.put("proxyuser" + entry.getKey(), entry.getValue()); + } + return filterConfig; + } + + @Override + public void initFilter(FilterContainer container, Configuration conf) { + Map filterConfig = createFilterConfig(conf); + container.addFilter("ProxyUserAuthenticationFilter", + ProxyUserAuthenticationFilter.class.getName(), filterConfig); + } + +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/package-info.java new file mode 100644 index 0000000000000..b0accf71b351a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Provides the server-side framework for authentication. + */ +package org.apache.hadoop.security.authentication.server; diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md index 721abea93b764..97d12fb30168b 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md @@ -64,3 +64,11 @@ Add org.apache.hadoop.security.HttpCrossOriginFilterInitializer to hadoop.http.f | hadoop.http.cross-origin.allowed-methods | `GET,POST,HEAD` | Comma separated list of methods that are allowed | | hadoop.http.cross-origin.allowed-headers | `X-Requested-With,Content-Type,Accept,Origin` | Comma separated list of headers that are allowed | | hadoop.http.cross-origin.max-age | `1800` | Number of seconds a pre-flighted request can be cached | + + +Trusted Proxy +------------- +Trusted Proxy adds support to perform operations using end user instead of proxy user. It fetches the end user from +doAs query parameter. To enable Trusted Proxy, please set the following configuration parameter: + +Add org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer to hadoop.http.filter.initializers at the end in core-site.xml. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authentication/server/TestProxyUserAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authentication/server/TestProxyUserAuthenticationFilter.java new file mode 100644 index 0000000000000..019ab798c64cb --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authentication/server/TestProxyUserAuthenticationFilter.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.security.authentication.server; + +import java.security.Principal; +import java.util.Collections; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Map; + +import javax.servlet.FilterConfig; +import javax.servlet.FilterChain; +import javax.servlet.ServletContext; +import javax.servlet.ServletResponse; +import javax.servlet.ServletRequest; +import javax.servlet.http.HttpServletRequest; + +import static org.assertj.core.api.Assertions.assertThat; +import org.glassfish.grizzly.servlet.HttpServletResponseImpl; +import org.junit.Test; +import org.mockito.Mockito; + + +/** + * Test ProxyUserAuthenticationFilter with doAs Request Parameter. + */ +public class TestProxyUserAuthenticationFilter { + + private String actualUser; + + private static class DummyFilterConfig implements FilterConfig { + private final Map map; + + DummyFilterConfig(Map map) { + this.map = map; + } + + @Override + public String getFilterName() { + return "dummy"; + } + + @Override + public String getInitParameter(String param) { + return map.get(param); + } + + @Override + public Enumeration getInitParameterNames() { + return Collections.enumeration(map.keySet()); + } + + @Override + public ServletContext getServletContext() { + ServletContext context = Mockito.mock(ServletContext.class); + Mockito.when(context.getAttribute( + AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE)) + .thenReturn(null); + return context; + } + } + + private class HttpServletResponseForTest extends HttpServletResponseImpl { + + } + + + @Test(timeout = 10000) + public void testFilter() throws Exception { + Map params = new HashMap(); + params.put("proxyuser.knox.users", "testuser"); + params.put("proxyuser.knox.hosts", "127.0.0.1"); + params.put("type", "simple"); + + FilterConfig config = new DummyFilterConfig(params); + + FilterChain chain = new FilterChain() { + @Override + public void doFilter(ServletRequest servletRequest, + ServletResponse servletResponse) { + HttpServletRequest request = (HttpServletRequest) servletRequest; + actualUser = request.getRemoteUser(); + } + }; + + ProxyUserAuthenticationFilter testFilter = + new ProxyUserAuthenticationFilter(); + testFilter.init(config); + + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + Mockito.when(request.getRemoteUser()).thenReturn("knox"); + Mockito.when(request.getParameter("doAs")).thenReturn("testuser"); + Mockito.when(request.getRemoteAddr()).thenReturn("127.0.0.1"); + Mockito.when(request.getUserPrincipal()).thenReturn(new Principal() { + @Override + public String getName() { + return "knox@EXAMPLE.COM"; + } + }); + + HttpServletResponseForTest response = new HttpServletResponseForTest(); + + testFilter.doFilter(chain, request, response); + + assertThat(actualUser).isEqualTo("testuser"); + } + + +} From 7b03072fd466de5817fdcd65f9dd88fd59c0bb00 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Thu, 23 May 2019 12:08:44 -0400 Subject: [PATCH 0025/1308] YARN-9080. Added clean up of bucket directories. Contributed by Prabhu Joseph, Peter Bacsko, Szilard Nemeth --- .../timeline/EntityGroupFSTimelineStore.java | 65 ++++++++++++++----- .../TestEntityGroupFSTimelineStore.java | 53 ++++++++++++++- 2 files changed, 100 insertions(+), 18 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java index 80baf897376a4..498230ae7586a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java @@ -24,6 +24,8 @@ import com.fasterxml.jackson.module.jaxb.JaxbAnnotationIntrospector; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.commons.lang3.mutable.MutableBoolean; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -456,43 +458,76 @@ private AppLogs getAndSetAppLogs(ApplicationId applicationId) * dirpath should be a directory that contains a set of * application log directories. The cleaner method will not * work if the given dirpath itself is an application log dir. - * @param fs * @param retainMillis * @throws IOException */ @InterfaceAudience.Private @VisibleForTesting - void cleanLogs(Path dirpath, FileSystem fs, long retainMillis) + void cleanLogs(Path dirpath, long retainMillis) throws IOException { long now = Time.now(); + RemoteIterator iter = list(dirpath); + while (iter.hasNext()) { + FileStatus stat = iter.next(); + Path clusterTimeStampPath = stat.getPath(); + if (isValidClusterTimeStampDir(clusterTimeStampPath)) { + MutableBoolean appLogDirPresent = new MutableBoolean(false); + cleanAppLogDir(clusterTimeStampPath, retainMillis, appLogDirPresent); + if (appLogDirPresent.isFalse() && + (now - stat.getModificationTime() > retainMillis)) { + deleteDir(clusterTimeStampPath); + } + } + } + } + + + private void cleanAppLogDir(Path dirpath, long retainMillis, + MutableBoolean appLogDirPresent) throws IOException { + long now = Time.now(); // Depth first search from root directory for all application log dirs RemoteIterator iter = list(dirpath); while (iter.hasNext()) { FileStatus stat = iter.next(); + Path childPath = stat.getPath(); if (stat.isDirectory()) { // If current is an application log dir, decide if we need to remove it // and remove if necessary. // Otherwise, keep iterating into it. - ApplicationId appId = parseApplicationId(dirpath.getName()); + ApplicationId appId = parseApplicationId(childPath.getName()); if (appId != null) { // Application log dir - if (shouldCleanAppLogDir(dirpath, now, fs, retainMillis)) { - try { - LOG.info("Deleting {}", dirpath); - if (!fs.delete(dirpath, true)) { - LOG.error("Unable to remove " + dirpath); - } - metrics.incrLogsDirsCleaned(); - } catch (IOException e) { - LOG.error("Unable to remove " + dirpath, e); - } + appLogDirPresent.setTrue(); + if (shouldCleanAppLogDir(childPath, now, fs, retainMillis)) { + deleteDir(childPath); } } else { // Keep cleaning inside - cleanLogs(stat.getPath(), fs, retainMillis); + cleanAppLogDir(childPath, retainMillis, appLogDirPresent); } } } } + private void deleteDir(Path path) { + try { + LOG.info("Deleting {}", path); + if (fs.delete(path, true)) { + metrics.incrLogsDirsCleaned(); + } else { + LOG.error("Unable to remove {}", path); + } + } catch (IOException e) { + LOG.error("Unable to remove {}", path, e); + } + } + + private boolean isValidClusterTimeStampDir(Path clusterTimeStampPath) + throws IOException { + FileStatus stat = fs.getFileStatus(clusterTimeStampPath); + return stat.isDirectory() && + StringUtils.isNumeric(clusterTimeStampPath.getName()); + } + + private static boolean shouldCleanAppLogDir(Path appLogPath, long now, FileSystem fs, long logRetainMillis) throws IOException { RemoteIterator iter = fs.listStatusIterator(appLogPath); @@ -908,7 +943,7 @@ public void run() { LOG.debug("Cleaner starting"); long startTime = Time.monotonicNow(); try { - cleanLogs(doneRootPath, fs, logRetainMillis); + cleanLogs(doneRootPath, logRetainMillis); } catch (Exception e) { Throwable t = extract(e); if (t instanceof InterruptedException) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java index 64354166358c6..dc10912586aac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java @@ -268,7 +268,8 @@ public void testCleanLogs() throws Exception { Path irrelevantDirPath = new Path(testDoneDirPath, "irrelevant"); fs.mkdirs(irrelevantDirPath); - Path doneAppHomeDir = new Path(new Path(testDoneDirPath, "0000"), "001"); + Path doneAppHomeDir = new Path(new Path(new Path(testDoneDirPath, + Long.toString(mainTestAppId.getClusterTimestamp())), "0000"), "001"); // First application, untouched after creation Path appDirClean = new Path(doneAppHomeDir, appDirName); Path attemptDirClean = new Path(appDirClean, attemptDirName); @@ -300,7 +301,7 @@ public void testCleanLogs() throws Exception { // Should retain all logs after this run MutableCounterLong dirsCleaned = store.metrics.getLogsDirsCleaned(); long before = dirsCleaned.value(); - store.cleanLogs(testDoneDirPath, fs, 10000); + store.cleanLogs(testDoneDirPath, 10000); assertTrue(fs.exists(irrelevantDirPath)); assertTrue(fs.exists(irrelevantFilePath)); assertTrue(fs.exists(filePath)); @@ -317,7 +318,7 @@ public void testCleanLogs() throws Exception { // Touch the third application by creating a new dir fs.mkdirs(new Path(dirPathHold, "holdByMe")); - store.cleanLogs(testDoneDirPath, fs, 1000); + store.cleanLogs(testDoneDirPath, 1000); // Verification after the second cleaner call assertTrue(fs.exists(irrelevantDirPath)); @@ -332,6 +333,52 @@ public void testCleanLogs() throws Exception { assertEquals(before + 2L, dirsCleaned.value()); } + @Test + public void testCleanBuckets() throws Exception { + // ClusterTimeStampDir with App Log Dirs + Path clusterTimeStampDir1 = new Path(testDoneDirPath, + Long.toString(sampleAppIds.get(0).getClusterTimestamp())); + Path appDir1 = new Path(new Path(new Path( + clusterTimeStampDir1, "0000"), "000"), sampleAppIds.get(0).toString()); + Path appDir2 = new Path(new Path(new Path( + clusterTimeStampDir1, "0000"), "001"), sampleAppIds.get(1).toString()); + Path appDir3 = new Path(new Path(new Path( + clusterTimeStampDir1, "0000"), "002"), sampleAppIds.get(2).toString()); + Path appDir4 = new Path(new Path(new Path( + clusterTimeStampDir1, "0001"), "000"), sampleAppIds.get(3).toString()); + + // ClusterTimeStampDir with no App Log Dirs + Path clusterTimeStampDir2 = new Path(testDoneDirPath, "1235"); + + // Irrevelant ClusterTimeStampDir + Path clusterTimeStampDir3 = new Path(testDoneDirPath, "irrevelant"); + Path appDir5 = new Path(new Path(new Path( + clusterTimeStampDir3, "0000"), "000"), sampleAppIds.get(4).toString()); + + fs.mkdirs(appDir1); + fs.mkdirs(appDir2); + fs.mkdirs(appDir3); + fs.mkdirs(appDir4); + fs.mkdirs(clusterTimeStampDir2); + fs.mkdirs(appDir5); + + Thread.sleep(2000); + + store.cleanLogs(testDoneDirPath, 1000); + + // ClusterTimeStampDir will be removed only if no App Log Dir Present + assertTrue(fs.exists(clusterTimeStampDir1)); + assertFalse(fs.exists(appDir1)); + assertFalse(fs.exists(appDir2)); + assertFalse(fs.exists(appDir3)); + assertFalse(fs.exists(appDir4)); + assertFalse(fs.exists(clusterTimeStampDir2)); + assertTrue(fs.exists(appDir5)); + + store.cleanLogs(testDoneDirPath, 1000); + assertFalse(fs.exists(clusterTimeStampDir1)); + } + @Test public void testPluginRead() throws Exception { // Verify precondition From e2b79912c38ba6f85f321c890feee520360fe8d9 Mon Sep 17 00:00:00 2001 From: Igor Rudenko Date: Fri, 12 Apr 2019 17:34:16 +0300 Subject: [PATCH 0026/1308] YARN-9469. Fix typo in YarnConfiguration. --- .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index b4ed2b00dae35..6bbcdcb1e117b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1470,7 +1470,7 @@ public static boolean isAclEnabled(Configuration conf) { + "pmem-check-enabled"; public static final boolean DEFAULT_NM_PMEM_CHECK_ENABLED = true; - /** Specifies whether physical memory check is enabled. */ + /** Specifies whether virtual memory check is enabled. */ public static final String NM_VMEM_CHECK_ENABLED = NM_PREFIX + "vmem-check-enabled"; public static final boolean DEFAULT_NM_VMEM_CHECK_ENABLED = true; From f96a2df38d889f29314c57f4d94227b2e419a11f Mon Sep 17 00:00:00 2001 From: Christopher Gregorian Date: Mon, 29 Apr 2019 15:37:25 -0700 Subject: [PATCH 0027/1308] HADOOP-16266. Add more fine-grained processing time metrics to the RPC layer. Contributed by Christopher Gregorian. --- .../apache/hadoop/ipc/CallQueueManager.java | 5 +- .../apache/hadoop/ipc/DecayRpcScheduler.java | 12 +- .../hadoop/ipc/DefaultRpcScheduler.java | 4 +- .../org/apache/hadoop/ipc/ExternalCall.java | 5 + .../apache/hadoop/ipc/ProcessingDetails.java | 96 +++++++++++ .../apache/hadoop/ipc/ProtobufRpcEngine.java | 31 +--- .../org/apache/hadoop/ipc/RpcScheduler.java | 41 ++++- .../java/org/apache/hadoop/ipc/Server.java | 151 ++++++++++++++---- .../apache/hadoop/ipc/WritableRpcEngine.java | 20 +-- .../ipc/metrics/RpcDetailedMetrics.java | 6 +- .../apache/hadoop/ipc/metrics/RpcMetrics.java | 63 +++++--- .../src/site/markdown/Metrics.md | 9 ++ .../hadoop/ipc/TestProcessingDetails.java | 61 +++++++ .../apache/hadoop/ipc/TestProtoBufRpc.java | 9 +- .../java/org/apache/hadoop/ipc/TestRPC.java | 18 ++- .../org/apache/hadoop/ipc/TestRpcBase.java | 28 ++++ .../src/test/proto/test_rpc_service.proto | 1 + .../server/namenode/FSNamesystemLock.java | 66 ++++++-- .../ha/TestConsistentReadsObserver.java | 5 - 19 files changed, 498 insertions(+), 133 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProcessingDetails.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProcessingDetails.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java index 9731e13d86b87..e18f307322746 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java @@ -193,9 +193,8 @@ boolean shouldBackOff(Schedulable e) { return scheduler.shouldBackOff(e); } - void addResponseTime(String name, int priorityLevel, int queueTime, - int processingTime) { - scheduler.addResponseTime(name, priorityLevel, queueTime, processingTime); + void addResponseTime(String name, Schedulable e, ProcessingDetails details) { + scheduler.addResponseTime(name, e, details); } // This should be only called once per call and cached in the call object diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java index 5410aebbd0100..38218b24c6480 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java @@ -55,6 +55,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ipc.ProcessingDetails.Timing; + /** * The decay RPC scheduler counts incoming requests in a map, then * decays the counts at a fixed time interval. The scheduler is optimized @@ -601,14 +603,18 @@ public boolean shouldBackOff(Schedulable obj) { } @Override - public void addResponseTime(String name, int priorityLevel, int queueTime, - int processingTime) { + public void addResponseTime(String callName, Schedulable schedulable, + ProcessingDetails details) { + int priorityLevel = schedulable.getPriorityLevel(); + long queueTime = details.get(Timing.QUEUE, TimeUnit.MILLISECONDS); + long processingTime = details.get(Timing.PROCESSING, TimeUnit.MILLISECONDS); + responseTimeCountInCurrWindow.getAndIncrement(priorityLevel); responseTimeTotalInCurrWindow.getAndAdd(priorityLevel, queueTime+processingTime); if (LOG.isDebugEnabled()) { LOG.debug("addResponseTime for call: {} priority: {} queueTime: {} " + - "processingTime: {} ", name, priorityLevel, queueTime, + "processingTime: {} ", callName, priorityLevel, queueTime, processingTime); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DefaultRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DefaultRpcScheduler.java index 0847af7f37b7a..696160ecb6c2b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DefaultRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DefaultRpcScheduler.java @@ -35,8 +35,8 @@ public boolean shouldBackOff(Schedulable obj) { } @Override - public void addResponseTime(String name, int priorityLevel, int queueTime, - int processingTime) { + public void addResponseTime(String callName, Schedulable schedulable, + ProcessingDetails details) { } public DefaultRpcScheduler(int priorityLevels, String namespace, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java index 5cc366561f091..39e55348c81e8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java @@ -37,6 +37,11 @@ public ExternalCall(PrivilegedExceptionAction action) { this.action = action; } + @Override + public String getDetailedMetricsName() { + return "(external)"; + } + public abstract UserGroupInformation getRemoteUser(); public final T get() throws InterruptedException, ExecutionException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProcessingDetails.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProcessingDetails.java new file mode 100644 index 0000000000000..5b97eec9c11cd --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProcessingDetails.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ipc; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.TimeUnit; + +/** + * Stores the times that a call takes to be processed through each step. + */ +@InterfaceStability.Unstable +@InterfaceAudience.Private +public class ProcessingDetails { + public static final Logger LOG = + LoggerFactory.getLogger(ProcessingDetails.class); + private final TimeUnit valueTimeUnit; + + /** + * The different stages to track the time of. + */ + public enum Timing { + ENQUEUE, // time for reader to insert in call queue. + QUEUE, // time in the call queue. + HANDLER, // handler overhead not spent in processing/response. + PROCESSING, // time handler spent processing the call. always equal to + // lock_free + lock_wait + lock_shared + lock_exclusive + LOCKFREE, // processing with no lock. + LOCKWAIT, // processing while waiting for lock. + LOCKSHARED, // processing with a read lock. + LOCKEXCLUSIVE, // processing with a write lock. + RESPONSE; // time to encode and send response. + } + + private long[] timings = new long[Timing.values().length]; + + ProcessingDetails(TimeUnit timeUnit) { + this.valueTimeUnit = timeUnit; + } + + public long get(Timing type) { + // When using nanoTime to fetch timing information, it is possible to see + // time "move backward" slightly under unusual/rare circumstances. To avoid + // displaying a confusing number, round such timings to 0 here. + long ret = timings[type.ordinal()]; + return ret < 0 ? 0 : ret; + } + + public long get(Timing type, TimeUnit timeUnit) { + return timeUnit.convert(get(type), valueTimeUnit); + } + + public void set(Timing type, long value) { + timings[type.ordinal()] = value; + } + + public void set(Timing type, long value, TimeUnit timeUnit) { + set(type, valueTimeUnit.convert(value, timeUnit)); + } + + public void add(Timing type, long value, TimeUnit timeUnit) { + timings[type.ordinal()] += valueTimeUnit.convert(value, timeUnit); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(256); + for (Timing type : Timing.values()) { + if (sb.length() > 0) { + sb.append(" "); + } + sb.append(type.name().toLowerCase()) + .append("Time=").append(get(type)); + } + return sb.toString(); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java index ae305279d02d2..c6b3fded7c731 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java @@ -521,46 +521,29 @@ public Writable call(RPC.Server server, String connectionProtocolName, Message param = request.getValue(prototype); Message result; - long startTime = Time.now(); - int qTime = (int) (startTime - receiveTime); - Exception exception = null; - boolean isDeferred = false; + Call currentCall = Server.getCurCall().get(); try { server.rpcDetailedMetrics.init(protocolImpl.protocolClass); currentCallInfo.set(new CallInfo(server, methodName)); + currentCall.setDetailedMetricsName(methodName); result = service.callBlockingMethod(methodDescriptor, null, param); // Check if this needs to be a deferred response, // by checking the ThreadLocal callback being set if (currentCallback.get() != null) { - Server.getCurCall().get().deferResponse(); - isDeferred = true; + currentCall.deferResponse(); currentCallback.set(null); return null; } } catch (ServiceException e) { - exception = (Exception) e.getCause(); + Exception exception = (Exception) e.getCause(); + currentCall.setDetailedMetricsName( + exception.getClass().getSimpleName()); throw (Exception) e.getCause(); } catch (Exception e) { - exception = e; + currentCall.setDetailedMetricsName(e.getClass().getSimpleName()); throw e; } finally { currentCallInfo.set(null); - int processingTime = (int) (Time.now() - startTime); - if (LOG.isDebugEnabled()) { - String msg = - "Served: " + methodName + (isDeferred ? ", deferred" : "") + - ", queueTime= " + qTime + - " procesingTime= " + processingTime; - if (exception != null) { - msg += " exception= " + exception.getClass().getSimpleName(); - } - LOG.debug(msg); - } - String detailedMetricsName = (exception == null) ? - methodName : - exception.getClass().getSimpleName(); - server.updateMetrics(detailedMetricsName, qTime, processingTime, - isDeferred); } return RpcWritable.wrap(result); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java index 95c5a13cdfa88..63812f47f2db0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java @@ -18,6 +18,8 @@ package org.apache.hadoop.ipc; +import java.util.concurrent.TimeUnit; + /** * Implement this interface to be used for RPC scheduling and backoff. * @@ -30,8 +32,43 @@ public interface RpcScheduler { boolean shouldBackOff(Schedulable obj); - void addResponseTime(String name, int priorityLevel, int queueTime, - int processingTime); + /** + * This method only exists to maintain backwards compatibility with old + * implementations. It will not be called by any Hadoop code, and should not + * be implemented by new implementations. + * + * @deprecated Use + * {@link #addResponseTime(String, Schedulable, ProcessingDetails)} instead. + */ + @Deprecated + @SuppressWarnings("unused") + default void addResponseTime(String name, int priorityLevel, int queueTime, + int processingTime) { + throw new UnsupportedOperationException( + "This method is deprecated: use the other addResponseTime"); + } + + /** + * Store a processing time value for an RPC call into this scheduler. + * + * @param callName The name of the call. + * @param schedulable The schedulable representing the incoming call. + * @param details The details of processing time. + */ + @SuppressWarnings("deprecation") + default void addResponseTime(String callName, Schedulable schedulable, + ProcessingDetails details) { + // For the sake of backwards compatibility with old implementations of + // this interface, a default implementation is supplied which uses the old + // method. All new implementations MUST override this interface and should + // NOT use the other addResponseTime method. + int queueTimeMs = (int) + details.get(ProcessingDetails.Timing.QUEUE, TimeUnit.MILLISECONDS); + int processingTimeMs = (int) + details.get(ProcessingDetails.Timing.PROCESSING, TimeUnit.MILLISECONDS); + addResponseTime(callName, schedulable.getPriorityLevel(), + queueTimeMs, processingTimeMs); + } void stop(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 94d9bc33137fc..91cc4a60265bb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ipc; +import static org.apache.hadoop.ipc.ProcessingDetails.Timing; import static org.apache.hadoop.ipc.RpcConstants.AUTHORIZATION_FAILED_CALL_ID; import static org.apache.hadoop.ipc.RpcConstants.CONNECTION_CONTEXT_CALL_ID; import static org.apache.hadoop.ipc.RpcConstants.CURRENT_VERSION; @@ -64,6 +65,7 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -514,7 +516,7 @@ protected void setLogSlowRPC(boolean logSlowRPCFlag) { * if and only if it falls above 99.7% of requests. We start this logic * only once we have enough sample size. */ - void logSlowRpcCalls(String methodName, int processingTime) { + void logSlowRpcCalls(String methodName, Call call, long processingTime) { final int deviation = 3; // 1024 for minSampleSize just a guess -- not a number computed based on @@ -527,27 +529,47 @@ void logSlowRpcCalls(String methodName, int processingTime) { if ((rpcMetrics.getProcessingSampleCount() > minSampleSize) && (processingTime > threeSigma)) { - if(LOG.isWarnEnabled()) { - String client = CurCall.get().toString(); - LOG.warn( - "Slow RPC : " + methodName + " took " + processingTime + - " milliseconds to process from client " + client); - } + LOG.warn("Slow RPC : {} took {} {} to process from client {}", + methodName, processingTime, RpcMetrics.TIMEUNIT, call); rpcMetrics.incrSlowRpc(); } } - void updateMetrics(String name, int queueTime, int processingTime, - boolean deferredCall) { + void updateMetrics(Call call, long startTime, boolean connDropped) { + // delta = handler + processing + response + long deltaNanos = Time.monotonicNowNanos() - startTime; + long timestampNanos = call.timestampNanos; + + ProcessingDetails details = call.getProcessingDetails(); + // queue time is the delta between when the call first arrived and when it + // began being serviced, minus the time it took to be put into the queue + details.set(Timing.QUEUE, + startTime - timestampNanos - details.get(Timing.ENQUEUE)); + deltaNanos -= details.get(Timing.PROCESSING); + deltaNanos -= details.get(Timing.RESPONSE); + details.set(Timing.HANDLER, deltaNanos); + + long queueTime = details.get(Timing.QUEUE, RpcMetrics.TIMEUNIT); rpcMetrics.addRpcQueueTime(queueTime); - if (!deferredCall) { - rpcMetrics.addRpcProcessingTime(processingTime); - rpcDetailedMetrics.addProcessingTime(name, processingTime); - callQueue.addResponseTime(name, getPriorityLevel(), queueTime, - processingTime); - if (isLogSlowRPC()) { - logSlowRpcCalls(name, processingTime); - } + + if (call.isResponseDeferred() || connDropped) { + // call was skipped; don't include it in processing metrics + return; + } + + long processingTime = + details.get(Timing.PROCESSING, RpcMetrics.TIMEUNIT); + long waitTime = + details.get(Timing.LOCKWAIT, RpcMetrics.TIMEUNIT); + rpcMetrics.addRpcLockWaitTime(waitTime); + rpcMetrics.addRpcProcessingTime(processingTime); + // don't include lock wait for detailed metrics. + processingTime -= waitTime; + String name = call.getDetailedMetricsName(); + rpcDetailedMetrics.addProcessingTime(name, processingTime); + callQueue.addResponseTime(name, call, details); + if (isLogSlowRPC()) { + logSlowRpcCalls(name, call, processingTime); } } @@ -716,9 +738,13 @@ static boolean getClientBackoffEnable( /** A generic call queued for handling. */ public static class Call implements Schedulable, PrivilegedExceptionAction { + private final ProcessingDetails processingDetails = + new ProcessingDetails(TimeUnit.NANOSECONDS); + // the method name to use in metrics + private volatile String detailedMetricsName = ""; final int callId; // the client's call id final int retryCount; // the retry count of the call - long timestamp; // time received when response is null + long timestampNanos; // time received when response is null // time served when response is not null private AtomicInteger responseWaitCount = new AtomicInteger(1); final RPC.RpcKind rpcKind; @@ -755,7 +781,7 @@ public Call(int id, int retryCount, Void ignore1, Void ignore2, TraceScope traceScope, CallerContext callerContext) { this.callId = id; this.retryCount = retryCount; - this.timestamp = Time.now(); + this.timestampNanos = Time.monotonicNowNanos(); this.rpcKind = kind; this.clientId = clientId; this.traceScope = traceScope; @@ -764,6 +790,28 @@ public Call(int id, int retryCount, Void ignore1, Void ignore2, this.isCallCoordinated = false; } + /** + * Indicates whether the call has been processed. Always true unless + * overridden. + * + * @return true + */ + boolean isOpen() { + return true; + } + + String getDetailedMetricsName() { + return detailedMetricsName; + } + + void setDetailedMetricsName(String name) { + detailedMetricsName = name; + } + + public ProcessingDetails getProcessingDetails() { + return processingDetails; + } + @Override public String toString() { return "Call#" + callId + " Retry#" + retryCount; @@ -911,6 +959,11 @@ private class RpcCall extends Call { this.rpcRequest = param; } + @Override + boolean isOpen() { + return connection.channel.isOpen(); + } + void setResponseFields(Writable returnValue, ResponseParams responseParams) { this.rv = returnValue; @@ -938,18 +991,33 @@ public Void run() throws Exception { Server.LOG.info(Thread.currentThread().getName() + ": skipped " + this); return null; } + + long startNanos = Time.monotonicNowNanos(); Writable value = null; ResponseParams responseParams = new ResponseParams(); try { value = call( - rpcKind, connection.protocolName, rpcRequest, timestamp); + rpcKind, connection.protocolName, rpcRequest, timestampNanos); } catch (Throwable e) { populateResponseParamsOnError(e, responseParams); } if (!isResponseDeferred()) { + long deltaNanos = Time.monotonicNowNanos() - startNanos; + ProcessingDetails details = getProcessingDetails(); + + details.set(Timing.PROCESSING, deltaNanos, TimeUnit.NANOSECONDS); + deltaNanos -= details.get(Timing.LOCKWAIT, TimeUnit.NANOSECONDS); + deltaNanos -= details.get(Timing.LOCKSHARED, TimeUnit.NANOSECONDS); + deltaNanos -= details.get(Timing.LOCKEXCLUSIVE, TimeUnit.NANOSECONDS); + details.set(Timing.LOCKFREE, deltaNanos, TimeUnit.NANOSECONDS); + startNanos = Time.monotonicNowNanos(); + setResponseFields(value, responseParams); sendResponse(); + + deltaNanos = Time.monotonicNowNanos() - startNanos; + details.set(Timing.RESPONSE, deltaNanos, TimeUnit.NANOSECONDS); } else { if (LOG.isDebugEnabled()) { LOG.debug("Deferring response for callId: " + this.callId); @@ -1377,12 +1445,13 @@ Reader getReader() { } } + private final static long PURGE_INTERVAL_NANOS = TimeUnit.NANOSECONDS.convert( + 15, TimeUnit.MINUTES); + // Sends responses of RPC back to clients. private class Responder extends Thread { private final Selector writeSelector; private int pending; // connections waiting to register - - final static int PURGE_INTERVAL = 900000; // 15mins Responder() throws IOException { this.setName("IPC Server Responder"); @@ -1408,12 +1477,12 @@ public void run() { } private void doRunLoop() { - long lastPurgeTime = 0; // last check for old calls. + long lastPurgeTimeNanos = 0; // last check for old calls. while (running) { try { waitPending(); // If a channel is being registered, wait. - writeSelector.select(PURGE_INTERVAL); + writeSelector.select(PURGE_INTERVAL_NANOS); Iterator iter = writeSelector.selectedKeys().iterator(); while (iter.hasNext()) { SelectionKey key = iter.next(); @@ -1435,11 +1504,11 @@ private void doRunLoop() { LOG.info(Thread.currentThread().getName() + ": doAsyncWrite threw exception " + e); } } - long now = Time.now(); - if (now < lastPurgeTime + PURGE_INTERVAL) { + long nowNanos = Time.monotonicNowNanos(); + if (nowNanos < lastPurgeTimeNanos + PURGE_INTERVAL_NANOS) { continue; } - lastPurgeTime = now; + lastPurgeTimeNanos = nowNanos; // // If there were some calls that have not been sent out for a // long time, discard them. @@ -1463,7 +1532,7 @@ private void doRunLoop() { } for (RpcCall call : calls) { - doPurge(call, now); + doPurge(call, nowNanos); } } catch (OutOfMemoryError e) { // @@ -1514,7 +1583,7 @@ private void doPurge(RpcCall call, long now) { Iterator iter = responseQueue.listIterator(0); while (iter.hasNext()) { call = iter.next(); - if (now > call.timestamp + PURGE_INTERVAL) { + if (now > call.timestampNanos + PURGE_INTERVAL_NANOS) { closeConnection(call.connection); break; } @@ -1578,7 +1647,7 @@ private boolean processResponse(LinkedList responseQueue, if (inHandler) { // set the serve time when the response has to be sent later - call.timestamp = Time.now(); + call.timestampNanos = Time.monotonicNowNanos(); incPending(); try { @@ -2770,6 +2839,9 @@ private void internalQueueCall(Call call, boolean blocking) } else { callQueue.add(call); } + long deltaNanos = Time.monotonicNowNanos() - call.timestampNanos; + call.getProcessingDetails().set(Timing.ENQUEUE, deltaNanos, + TimeUnit.NANOSECONDS); } catch (CallQueueOverflowException cqe) { // If rpc scheduler indicates back off based on performance degradation // such as response time or rpc queue is full, we will ask the client @@ -2797,8 +2869,16 @@ public void run() { SERVER.set(Server.this); while (running) { TraceScope traceScope = null; + Call call = null; + long startTimeNanos = 0; + // True iff the connection for this call has been dropped. + // Set to true by default and update to false later if the connection + // can be succesfully read. + boolean connDropped = true; + try { - final Call call = callQueue.take(); // pop the queue; maybe blocked here + call = callQueue.take(); // pop the queue; maybe blocked here + startTimeNanos = Time.monotonicNowNanos(); if (alignmentContext != null && call.isCallCoordinated() && call.getClientStateId() > alignmentContext.getLastSeenStateId()) { /* @@ -2829,6 +2909,7 @@ public void run() { // always update the current call context CallerContext.setCurrent(call.callerContext); UserGroupInformation remoteUser = call.getRemoteUser(); + connDropped = !call.isOpen(); if (remoteUser != null) { remoteUser.doAs(call); } else { @@ -2851,6 +2932,14 @@ public void run() { } finally { CurCall.set(null); IOUtils.cleanupWithLogger(LOG, traceScope); + if (call != null) { + updateMetrics(call, startTimeNanos, connDropped); + ProcessingDetails.LOG.debug( + "Served: [{}]{} name={} user={} details={}", + call, (call.isResponseDeferred() ? ", deferred" : ""), + call.getDetailedMetricsName(), call.getRemoteUser(), + call.getProcessingDetails()); + } } } LOG.debug(Thread.currentThread().getName() + ": exiting"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java index c590dbdaf2aab..b303f8494b63c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java @@ -537,15 +537,15 @@ public Writable call(org.apache.hadoop.ipc.RPC.Server server, } // Invoke the protocol method - long startTime = Time.now(); - int qTime = (int) (startTime-receivedTime); Exception exception = null; + Call currentCall = Server.getCurCall().get(); try { Method method = protocolImpl.protocolClass.getMethod(call.getMethodName(), call.getParameterClasses()); method.setAccessible(true); server.rpcDetailedMetrics.init(protocolImpl.protocolClass); + currentCall.setDetailedMetricsName(call.getMethodName()); Object value = method.invoke(protocolImpl.protocolImpl, call.getParameters()); if (server.verbose) log("Return: "+value); @@ -571,20 +571,10 @@ public Writable call(org.apache.hadoop.ipc.RPC.Server server, exception = ioe; throw ioe; } finally { - int processingTime = (int) (Time.now() - startTime); - if (LOG.isDebugEnabled()) { - String msg = "Served: " + call.getMethodName() + - " queueTime= " + qTime + " procesingTime= " + processingTime; - if (exception != null) { - msg += " exception= " + exception.getClass().getSimpleName(); - } - LOG.debug(msg); + if (exception != null) { + currentCall.setDetailedMetricsName( + exception.getClass().getSimpleName()); } - String detailedMetricsName = (exception == null) ? - call.getMethodName() : - exception.getClass().getSimpleName(); - server - .updateMetrics(detailedMetricsName, qTime, processingTime, false); } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java index 0160b0e5b71b1..6461b186a4b81 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java @@ -66,12 +66,12 @@ public void init(Class protocol) { /** * Add an RPC processing time sample - * @param name of the RPC call + * @param rpcCallName of the RPC call * @param processingTime the processing time */ //@Override // some instrumentation interface - public void addProcessingTime(String name, int processingTime) { - rates.add(name, processingTime); + public void addProcessingTime(String rpcCallName, long processingTime) { + rates.add(rpcCallName, processingTime); } public void addDeferredProcessingTime(String name, long processingTime) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java index a36bcd8648c58..06f9244f188ad 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.ipc.metrics; +import java.util.concurrent.TimeUnit; + import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ipc.Server; @@ -27,7 +29,6 @@ import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; -import org.apache.hadoop.metrics2.lib.MutableCounterInt; import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableQuantiles; import org.apache.hadoop.metrics2.lib.MutableRate; @@ -47,6 +48,8 @@ public class RpcMetrics { final MetricsRegistry registry; final String name; final boolean rpcQuantileEnable; + /** The time unit used when storing/accessing time durations. */ + public final static TimeUnit TIMEUNIT = TimeUnit.MILLISECONDS; RpcMetrics(Server server, Configuration conf) { String port = String.valueOf(server.getListenerAddress().getPort()); @@ -61,24 +64,31 @@ public class RpcMetrics { CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE, CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE_DEFAULT); if (rpcQuantileEnable) { - rpcQueueTimeMillisQuantiles = + rpcQueueTimeQuantiles = + new MutableQuantiles[intervals.length]; + rpcLockWaitTimeQuantiles = new MutableQuantiles[intervals.length]; - rpcProcessingTimeMillisQuantiles = + rpcProcessingTimeQuantiles = new MutableQuantiles[intervals.length]; - deferredRpcProcessingTimeMillisQuantiles = + deferredRpcProcessingTimeQuantiles = new MutableQuantiles[intervals.length]; for (int i = 0; i < intervals.length; i++) { int interval = intervals[i]; - rpcQueueTimeMillisQuantiles[i] = registry.newQuantiles("rpcQueueTime" - + interval + "s", "rpc queue time in milli second", "ops", + rpcQueueTimeQuantiles[i] = registry.newQuantiles("rpcQueueTime" + + interval + "s", "rpc queue time in " + TIMEUNIT, "ops", + "latency", interval); + rpcLockWaitTimeQuantiles[i] = registry.newQuantiles( + "rpcLockWaitTime" + interval + "s", + "rpc lock wait time in " + TIMEUNIT, "ops", "latency", interval); - rpcProcessingTimeMillisQuantiles[i] = registry.newQuantiles( + rpcProcessingTimeQuantiles[i] = registry.newQuantiles( "rpcProcessingTime" + interval + "s", - "rpc processing time in milli second", "ops", "latency", interval); - deferredRpcProcessingTimeMillisQuantiles[i] = registry - .newQuantiles("deferredRpcProcessingTime" + interval + "s", - "deferred rpc processing time in milli seconds", "ops", - "latency", interval); + "rpc processing time in " + TIMEUNIT, "ops", + "latency", interval); + deferredRpcProcessingTimeQuantiles[i] = registry.newQuantiles( + "deferredRpcProcessingTime" + interval + "s", + "deferred rpc processing time in " + TIMEUNIT, "ops", + "latency", interval); } } LOG.debug("Initialized " + registry); @@ -94,11 +104,13 @@ public static RpcMetrics create(Server server, Configuration conf) { @Metric("Number of received bytes") MutableCounterLong receivedBytes; @Metric("Number of sent bytes") MutableCounterLong sentBytes; @Metric("Queue time") MutableRate rpcQueueTime; - MutableQuantiles[] rpcQueueTimeMillisQuantiles; + MutableQuantiles[] rpcQueueTimeQuantiles; + @Metric("Lock wait time") MutableRate rpcLockWaitTime; + MutableQuantiles[] rpcLockWaitTimeQuantiles; @Metric("Processing time") MutableRate rpcProcessingTime; - MutableQuantiles[] rpcProcessingTimeMillisQuantiles; + MutableQuantiles[] rpcProcessingTimeQuantiles; @Metric("Deferred Processing time") MutableRate deferredRpcProcessingTime; - MutableQuantiles[] deferredRpcProcessingTimeMillisQuantiles; + MutableQuantiles[] deferredRpcProcessingTimeQuantiles; @Metric("Number of authentication failures") MutableCounterLong rpcAuthenticationFailures; @Metric("Number of authentication successes") @@ -194,25 +206,32 @@ public void incrReceivedBytes(int count) { * Add an RPC queue time sample * @param qTime the queue time */ - //@Override - public void addRpcQueueTime(int qTime) { + public void addRpcQueueTime(long qTime) { rpcQueueTime.add(qTime); if (rpcQuantileEnable) { - for (MutableQuantiles q : rpcQueueTimeMillisQuantiles) { + for (MutableQuantiles q : rpcQueueTimeQuantiles) { q.add(qTime); } } } + public void addRpcLockWaitTime(long waitTime) { + rpcLockWaitTime.add(waitTime); + if (rpcQuantileEnable) { + for (MutableQuantiles q : rpcLockWaitTimeQuantiles) { + q.add(waitTime); + } + } + } + /** * Add an RPC processing time sample * @param processingTime the processing time */ - //@Override - public void addRpcProcessingTime(int processingTime) { + public void addRpcProcessingTime(long processingTime) { rpcProcessingTime.add(processingTime); if (rpcQuantileEnable) { - for (MutableQuantiles q : rpcProcessingTimeMillisQuantiles) { + for (MutableQuantiles q : rpcProcessingTimeQuantiles) { q.add(processingTime); } } @@ -221,7 +240,7 @@ public void addRpcProcessingTime(int processingTime) { public void addDeferredRpcProcessingTime(long processingTime) { deferredRpcProcessingTime.add(processingTime); if (rpcQuantileEnable) { - for (MutableQuantiles q : deferredRpcProcessingTimeMillisQuantiles) { + for (MutableQuantiles q : deferredRpcProcessingTimeQuantiles) { q.add(processingTime); } } diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md index 1ef2b44b6ecb3..07f4257e7e16a 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md @@ -71,6 +71,8 @@ Each metrics record contains tags such as Hostname and port (number to which ser | `SentBytes` | Total number of sent bytes | | `RpcQueueTimeNumOps` | Total number of RPC calls | | `RpcQueueTimeAvgTime` | Average queue time in milliseconds | +| `RpcLockWaitTimeNumOps` | Total number of RPC call (same as RpcQueueTimeNumOps) | +| `RpcLockWaitTimeAvgTime` | Average time waiting for lock acquisition in milliseconds | | `RpcProcessingTimeNumOps` | Total number of RPC calls (same to RpcQueueTimeNumOps) | | `RpcProcessingAvgTime` | Average Processing time in milliseconds | | `RpcAuthenticationFailures` | Total number of authentication failures | @@ -92,6 +94,12 @@ Each metrics record contains tags such as Hostname and port (number to which ser | `rpcProcessingTime`*num*`s90thPercentileLatency` | Shows the 90th percentile of RPC processing time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. | | `rpcProcessingTime`*num*`s95thPercentileLatency` | Shows the 95th percentile of RPC processing time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. | | `rpcProcessingTime`*num*`s99thPercentileLatency` | Shows the 99th percentile of RPC processing time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. | +| `rpcLockWaitTime`*num*`sNumOps` | Shows total number of RPC calls (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. | +| `rpcLockWaitTime`*num*`s50thPercentileLatency` | Shows the 50th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. | +| `rpcLockWaitTime`*num*`s75thPercentileLatency` | Shows the 75th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. | +| `rpcLockWaitTime`*num*`s90thPercentileLatency` | Shows the 90th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. | +| `rpcLockWaitTime`*num*`s95thPercentileLatency` | Shows the 95th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. | +| `rpcLockWaitTime`*num*`s99thPercentileLatency` | Shows the 99th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. | RetryCache/NameNodeRetryCache ----------------------------- @@ -118,6 +126,7 @@ rpcdetailed context =================== Metrics of rpcdetailed context are exposed in unified manner by RPC layer. Two metrics are exposed for each RPC based on its name. Metrics named "(RPC method name)NumOps" indicates total number of method calls, and metrics named "(RPC method name)AvgTime" shows average turn around time for method calls in milliseconds. +Please note that the AvgTime metrics do not include time spent waiting to acquire locks on data structures (see RpcLockWaitTimeAvgTime). rpcdetailed ----------- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProcessingDetails.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProcessingDetails.java new file mode 100644 index 0000000000000..0ecc741b014b3 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProcessingDetails.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ipc; + +import org.junit.Test; + +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.ipc.ProcessingDetails.Timing; +import static org.junit.Assert.assertEquals; + +/** + * Unit tests for ProcessingDetails time unit conversion and output. + */ +public class TestProcessingDetails { + + /** + * Test that the conversion of time values in various units in and out of the + * details are done properly. + */ + @Test + public void testTimeConversion() { + ProcessingDetails details = new ProcessingDetails(TimeUnit.MICROSECONDS); + + details.set(Timing.ENQUEUE, 10); + assertEquals(10, details.get(Timing.ENQUEUE)); + assertEquals(10_000, details.get(Timing.ENQUEUE, TimeUnit.NANOSECONDS)); + + details.set(Timing.QUEUE, 20, TimeUnit.MILLISECONDS); + details.add(Timing.QUEUE, 20, TimeUnit.MICROSECONDS); + assertEquals(20_020, details.get(Timing.QUEUE)); + assertEquals(0, details.get(Timing.QUEUE, TimeUnit.SECONDS)); + } + + @Test + public void testToString() { + ProcessingDetails details = new ProcessingDetails(TimeUnit.MICROSECONDS); + details.set(Timing.ENQUEUE, 10); + details.set(Timing.QUEUE, 20, TimeUnit.MILLISECONDS); + + assertEquals("enqueueTime=10 queueTime=20000 handlerTime=0 " + + "processingTime=0 lockfreeTime=0 lockwaitTime=0 locksharedTime=0 " + + "lockexclusiveTime=0 responseTime=0", details.toString()); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java index 5fbd957312072..fd6a7ae283b64 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java @@ -34,6 +34,7 @@ import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -41,6 +42,7 @@ import java.io.IOException; import java.net.URISyntaxException; +import java.util.concurrent.TimeoutException; import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; @@ -215,7 +217,8 @@ public void testExtraLongRpc() throws Exception { } @Test(timeout = 12000) - public void testLogSlowRPC() throws IOException, ServiceException { + public void testLogSlowRPC() throws IOException, ServiceException, + TimeoutException, InterruptedException { TestRpcService2 client = getClient2(); // make 10 K fast calls for (int x = 0; x < 10000; x++) { @@ -234,9 +237,9 @@ public void testLogSlowRPC() throws IOException, ServiceException { // make a really slow call. Sleep sleeps for 1000ms client.sleep(null, newSleepRequest(SLEEP_DURATION * 3)); - long after = rpcMetrics.getRpcSlowCalls(); // Ensure slow call is logged. - Assert.assertEquals(before + 1L, after); + GenericTestUtils.waitFor(() + -> rpcMetrics.getRpcSlowCalls() == before + 1L, 10, 1000); } @Test(timeout = 12000) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java index 36a8885c9cfc8..d58cc120fb446 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java @@ -87,6 +87,8 @@ import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt; +import static org.apache.hadoop.test.MetricsAsserts.assertGauge; +import static org.apache.hadoop.test.MetricsAsserts.getDoubleGauge; import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; @@ -1072,10 +1074,14 @@ public TestRpcService run() { } MetricsRecordBuilder rpcMetrics = getMetrics(server.getRpcMetrics().name()); - assertTrue("Expected non-zero rpc queue time", - getLongCounter("RpcQueueTimeNumOps", rpcMetrics) > 0); - assertTrue("Expected non-zero rpc processing time", - getLongCounter("RpcProcessingTimeNumOps", rpcMetrics) > 0); + assertEquals("Expected correct rpc queue count", + 3000, getLongCounter("RpcQueueTimeNumOps", rpcMetrics)); + assertEquals("Expected correct rpc processing count", + 3000, getLongCounter("RpcProcessingTimeNumOps", rpcMetrics)); + assertEquals("Expected correct rpc lock wait count", + 3000, getLongCounter("RpcLockWaitTimeNumOps", rpcMetrics)); + assertEquals("Expected zero rpc lock wait time", + 0, getDoubleGauge("RpcLockWaitTimeAvgTime", rpcMetrics), 0.001); MetricsAsserts.assertQuantileGauges("RpcQueueTime" + interval + "s", rpcMetrics); MetricsAsserts.assertQuantileGauges("RpcProcessingTime" + interval + "s", @@ -1086,6 +1092,10 @@ public TestRpcService run() { UserGroupInformation.getCurrentUser().getShortUserName(); assertTrue(actualUserVsCon.contains("\"" + proxyUser + "\":1")); assertTrue(actualUserVsCon.contains("\"" + testUser + "\":1")); + + proxy.lockAndSleep(null, newSleepRequest(5)); + rpcMetrics = getMetrics(server.getRpcMetrics().name()); + assertGauge("RpcLockWaitTimeAvgTime", 10000.0, rpcMetrics); } finally { if (proxy2 != null) { RPC.stopProxy(proxy2); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java index 0d2f975c1d676..2f2d36f7b45d7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java @@ -21,12 +21,16 @@ import com.google.protobuf.BlockingService; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.protobuf.TestProtos; import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; +import org.apache.hadoop.util.Time; import org.junit.Assert; import org.apache.hadoop.io.Text; @@ -278,6 +282,7 @@ public interface TestRpcService public static class PBServerImpl implements TestRpcService { CountDownLatch fastPingCounter = new CountDownLatch(2); private List postponedCalls = new ArrayList<>(); + private final Lock lock = new ReentrantLock(); @Override public TestProtos.EmptyResponseProto ping(RpcController unused, @@ -388,6 +393,29 @@ public TestProtos.EmptyResponseProto sleep( return TestProtos.EmptyResponseProto.newBuilder().build(); } + @Override + public TestProtos.EmptyResponseProto lockAndSleep( + RpcController controller, TestProtos.SleepRequestProto request) + throws ServiceException { + ProcessingDetails details = + Server.getCurCall().get().getProcessingDetails(); + lock.lock(); + long startNanos = Time.monotonicNowNanos(); + try { + Thread.sleep(request.getMilliSeconds()); + } catch (InterruptedException ignore) { + // ignore + } finally { + lock.unlock(); + } + // Add some arbitrary large lock wait time since in any test scenario + // the lock wait time will probably actually be too small to notice + details.add(ProcessingDetails.Timing.LOCKWAIT, 10, TimeUnit.SECONDS); + details.add(ProcessingDetails.Timing.LOCKEXCLUSIVE, + Time.monotonicNowNanos() - startNanos, TimeUnit.NANOSECONDS); + return TestProtos.EmptyResponseProto.newBuilder().build(); + } + @Override public TestProtos.AuthMethodResponseProto getAuthMethod( RpcController controller, TestProtos.EmptyRequestProto request) diff --git a/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto index 3746411c90b3f..0df67a0ea3ee1 100644 --- a/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto +++ b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto @@ -39,6 +39,7 @@ service TestProtobufRpcProto { rpc testServerGet(EmptyRequestProto) returns (EmptyResponseProto); rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto); rpc sleep(SleepRequestProto) returns (EmptyResponseProto); + rpc lockAndSleep(SleepRequestProto) returns (EmptyResponseProto); rpc getAuthMethod(EmptyRequestProto) returns (AuthMethodResponseProto); rpc getAuthUser(EmptyRequestProto) returns (UserResponseProto); rpc echoPostponed(EchoRequestProto) returns (EchoResponseProto); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java index ebf51781f4e51..2f730b3137eca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java @@ -28,6 +28,7 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.math3.stat.descriptive.SummaryStatistics; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.Server; import org.apache.hadoop.log.LogThrottlingHelper; import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation; import org.apache.hadoop.util.StringUtils; @@ -43,6 +44,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY; +import static org.apache.hadoop.ipc.ProcessingDetails.Timing; import static org.apache.hadoop.log.LogThrottlingHelper.LogAction; /** @@ -154,17 +156,11 @@ public Long initialValue() { } public void readLock() { - coarseLock.readLock().lock(); - if (coarseLock.getReadHoldCount() == 1) { - readLockHeldTimeStampNanos.set(timer.monotonicNowNanos()); - } + doLock(false); } public void readLockInterruptibly() throws InterruptedException { - coarseLock.readLock().lockInterruptibly(); - if (coarseLock.getReadHoldCount() == 1) { - readLockHeldTimeStampNanos.set(timer.monotonicNowNanos()); - } + doLockInterruptibly(false); } public void readUnlock() { @@ -217,17 +213,11 @@ public void readUnlock(String opName) { } public void writeLock() { - coarseLock.writeLock().lock(); - if (coarseLock.getWriteHoldCount() == 1) { - writeLockHeldTimeStampNanos = timer.monotonicNowNanos(); - } + doLock(true); } public void writeLockInterruptibly() throws InterruptedException { - coarseLock.writeLock().lockInterruptibly(); - if (coarseLock.getWriteHoldCount() == 1) { - writeLockHeldTimeStampNanos = timer.monotonicNowNanos(); - } + doLockInterruptibly(true); } /** @@ -337,6 +327,50 @@ private void addMetric(String operationName, long value, boolean isWrite) { String overallMetric = getMetricName(OVERALL_METRIC_NAME, isWrite); detailedHoldTimeMetrics.add(overallMetric, value); } + updateProcessingDetails( + isWrite ? Timing.LOCKEXCLUSIVE : Timing.LOCKSHARED, value); + } + + private void doLock(boolean isWrite) { + long startNanos = timer.monotonicNowNanos(); + if (isWrite) { + coarseLock.writeLock().lock(); + } else { + coarseLock.readLock().lock(); + } + updateLockWait(startNanos, isWrite); + } + + private void doLockInterruptibly(boolean isWrite) + throws InterruptedException { + long startNanos = timer.monotonicNowNanos(); + if (isWrite) { + coarseLock.writeLock().lockInterruptibly(); + } else { + coarseLock.readLock().lockInterruptibly(); + } + updateLockWait(startNanos, isWrite); + } + + private void updateLockWait(long startNanos, boolean isWrite) { + long now = timer.monotonicNowNanos(); + updateProcessingDetails(Timing.LOCKWAIT, now - startNanos); + if (isWrite) { + if (coarseLock.getWriteHoldCount() == 1) { + writeLockHeldTimeStampNanos = now; + } + } else { + if (coarseLock.getReadHoldCount() == 1) { + readLockHeldTimeStampNanos.set(now); + } + } + } + + private static void updateProcessingDetails(Timing type, long deltaNanos) { + Server.Call call = Server.getCurCall().get(); + if (call != null) { + call.getProcessingDetails().add(type, deltaNanos, TimeUnit.NANOSECONDS); + } } private static String getMetricName(String operationName, boolean isWrite) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java index 1ec47ca68aea1..5cd0fa4b8ef8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java @@ -373,11 +373,6 @@ public boolean shouldBackOff(Schedulable obj) { return --allowed < 0; } - @Override - public void addResponseTime(String name, int priorityLevel, int queueTime, - int processingTime) { - } - @Override public void stop() { } From 20a4ec351c51da3459423852abea1d6c0e3097e3 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Thu, 23 May 2019 10:09:07 -0700 Subject: [PATCH 0028/1308] HDDS-700. Support rack awared node placement policy based on network topology. Contributed by Sammi Chen. --- .../hadoop/hdds/protocol/DatanodeDetails.java | 37 +- .../scm/container/ReplicationManager.java | 2 +- .../algorithms/ContainerPlacementPolicy.java | 3 +- .../placement/algorithms/SCMCommonPolicy.java | 10 +- .../SCMContainerPlacementCapacity.java | 9 +- .../SCMContainerPlacementRackAware.java | 329 ++++++++++++++++++ .../SCMContainerPlacementRandom.java | 8 +- .../org/apache/hadoop/hdds/scm/TestUtils.java | 27 +- .../scm/container/TestReplicationManager.java | 2 +- .../TestSCMContainerPlacementCapacity.java | 4 +- .../TestSCMContainerPlacementRackAware.java | 257 ++++++++++++++ .../TestSCMContainerPlacementRandom.java | 4 +- .../placement/TestContainerPlacement.java | 4 +- 13 files changed, 662 insertions(+), 34 deletions(-) create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 1dfeecd47ac9c..be6f44cd414d3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -22,6 +22,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.net.NetConstants; +import org.apache.hadoop.hdds.scm.net.NodeImpl; import java.util.ArrayList; import java.util.List; @@ -35,9 +37,9 @@ */ @InterfaceAudience.Private @InterfaceStability.Evolving -public class DatanodeDetails implements Comparable { - - /** +public class DatanodeDetails extends NodeImpl implements + Comparable { +/** * DataNode's unique identifier in the cluster. */ private final UUID uuid; @@ -47,18 +49,19 @@ public class DatanodeDetails implements Comparable { private List ports; private String certSerialId; - /** * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used * for instantiating DatanodeDetails. * @param uuid DataNode's UUID * @param ipAddress IP Address of this DataNode * @param hostName DataNode's hostname + * @param networkLocation DataNode's network location path * @param ports Ports used by the DataNode * @param certSerialId serial id from SCM issued certificate. */ private DatanodeDetails(String uuid, String ipAddress, String hostName, - List ports, String certSerialId) { + String networkLocation, List ports, String certSerialId) { + super(hostName, networkLocation, NetConstants.NODE_COST_DEFAULT); this.uuid = UUID.fromString(uuid); this.ipAddress = ipAddress; this.hostName = hostName; @@ -67,6 +70,8 @@ private DatanodeDetails(String uuid, String ipAddress, String hostName, } protected DatanodeDetails(DatanodeDetails datanodeDetails) { + super(datanodeDetails.getHostName(), datanodeDetails.getNetworkLocation(), + datanodeDetails.getCost()); this.uuid = datanodeDetails.uuid; this.ipAddress = datanodeDetails.ipAddress; this.hostName = datanodeDetails.hostName; @@ -223,6 +228,8 @@ public String toString() { ipAddress + ", host: " + hostName + + ", networkLocation: " + + getNetworkLocation() + ", certSerialId: " + certSerialId + "}"; } @@ -259,6 +266,7 @@ public static final class Builder { private String id; private String ipAddress; private String hostName; + private String networkLocation; private List ports; private String certSerialId; @@ -303,6 +311,17 @@ public Builder setHostName(String host) { return this; } + /** + * Sets the network location of DataNode. + * + * @param loc location + * @return DatanodeDetails.Builder + */ + public Builder setNetworkLocation(String loc) { + this.networkLocation = loc; + return this; + } + /** * Adds a DataNode Port. * @@ -334,9 +353,12 @@ public Builder setCertSerialId(String certId) { */ public DatanodeDetails build() { Preconditions.checkNotNull(id); - return new DatanodeDetails(id, ipAddress, hostName, ports, certSerialId); + if (networkLocation == null) { + networkLocation = NetConstants.DEFAULT_RACK; + } + return new DatanodeDetails(id, ipAddress, hostName, networkLocation, + ports, certSerialId); } - } /** @@ -437,5 +459,4 @@ public String getCertSerialId() { public void setCertSerialId(String certSerialId) { this.certSerialId = certSerialId; } - } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java index e247e96cd99ce..a911e5a8323aa 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java @@ -484,7 +484,7 @@ private void handleUnderReplicatedContainer(final ContainerInfo container, .getReplicationFactor().getNumber(); final int delta = replicationFactor - getReplicaCount(id, replicas); final List selectedDatanodes = containerPlacement - .chooseDatanodes(source, delta, container.getUsedBytes()); + .chooseDatanodes(source, null, delta, container.getUsedBytes()); LOG.info("Container {} is under replicated. Expected replica count" + " is {}, but found {}.", id, replicationFactor, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java index 3336c8e80e74e..52ce7964b6769 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java @@ -33,12 +33,13 @@ public interface ContainerPlacementPolicy { * that satisfy the nodes and size requirement. * * @param excludedNodes - list of nodes to be excluded. + * @param favoredNodes - list of nodes preferred. * @param nodesRequired - number of datanodes required. * @param sizeRequired - size required for the container or block. * @return list of datanodes chosen. * @throws IOException */ List chooseDatanodes(List excludedNodes, - int nodesRequired, long sizeRequired) + List favoredNodes, int nodesRequired, long sizeRequired) throws IOException; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java index 9fc47eaf74154..c3e3024fb7376 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java @@ -97,6 +97,7 @@ public Configuration getConf() { * * * @param excludedNodes - datanodes with existing replicas + * @param favoredNodes - list of nodes preferred. * @param nodesRequired - number of datanodes required. * @param sizeRequired - size required for the container or block. * @return list of datanodes chosen. @@ -104,7 +105,7 @@ public Configuration getConf() { */ @Override public List chooseDatanodes( - List excludedNodes, + List excludedNodes, List favoredNodes, int nodesRequired, final long sizeRequired) throws SCMException { List healthyNodes = nodeManager.getNodes(HddsProtos.NodeState.HEALTHY); @@ -137,7 +138,6 @@ public List chooseDatanodes( throw new SCMException(msg, SCMException.ResultCodes.FAILED_TO_FIND_NODES_WITH_SPACE); } - return healthyList; } @@ -147,8 +147,8 @@ public List chooseDatanodes( * @param datanodeDetails DatanodeDetails * @return true if we have enough space. */ - private boolean hasEnoughSpace(DatanodeDetails datanodeDetails, - long sizeRequired) { + boolean hasEnoughSpace(DatanodeDetails datanodeDetails, + long sizeRequired) { SCMNodeMetric nodeMetric = nodeManager.getNodeStat(datanodeDetails); return (nodeMetric != null) && (nodeMetric.get() != null) && nodeMetric.get().getRemaining().hasResources(sizeRequired); @@ -196,6 +196,4 @@ public List getResultSet( */ public abstract DatanodeDetails chooseNode( List healthyNodes); - - } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java index 8df8f6e034d1b..daf8222606641 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java @@ -86,6 +86,7 @@ public SCMContainerPlacementCapacity(final NodeManager nodeManager, * * * @param excludedNodes - list of the datanodes to exclude. + * @param favoredNodes - list of nodes preferred. * @param nodesRequired - number of datanodes required. * @param sizeRequired - size required for the container or block. * @return List of datanodes. @@ -93,10 +94,10 @@ public SCMContainerPlacementCapacity(final NodeManager nodeManager, */ @Override public List chooseDatanodes( - List excludedNodes, final int nodesRequired, - final long sizeRequired) throws SCMException { - List healthyNodes = - super.chooseDatanodes(excludedNodes, nodesRequired, sizeRequired); + List excludedNodes, List favoredNodes, + final int nodesRequired, final long sizeRequired) throws SCMException { + List healthyNodes = super.chooseDatanodes(excludedNodes, + favoredNodes, nodesRequired, sizeRequired); if (healthyNodes.size() == nodesRequired) { return healthyNodes; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java new file mode 100644 index 0000000000000..3758b858886f9 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -0,0 +1,329 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.container.placement.algorithms; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.net.NetConstants; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.Node; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Container placement policy that choose datanodes with network topology + * awareness, together with the space to satisfy the size constraints. + *

    + * This placement policy complies with the algorithm used in HDFS. With default + * 3 replica, two replica will be on the same rack, the third one will on a + * different rack. + *

    + * This implementation applies to network topology like "/rack/node". Don't + * recommend to use this if the network topology has more layers. + *

    + */ +public final class SCMContainerPlacementRackAware extends SCMCommonPolicy { + @VisibleForTesting + static final Logger LOG = + LoggerFactory.getLogger(SCMContainerPlacementRackAware.class); + private final NetworkTopology networkTopology; + private boolean fallback; + private int RACK_LEVEL = 1; + private int MAX_RETRY= 3; + + /** + * Constructs a Container Placement with rack awareness. + * + * @param nodeManager Node Manager + * @param conf Configuration + * @param fallback Whether reducing constrains to choose a data node when + * there is no node which satisfy all constrains. + * Basically, false for open container placement, and true + * for closed container placement. + */ + public SCMContainerPlacementRackAware(final NodeManager nodeManager, + final Configuration conf, final NetworkTopology networkTopology, + final boolean fallback) { + super(nodeManager, conf); + this.networkTopology = networkTopology; + this.fallback = fallback; + } + + /** + * Called by SCM to choose datanodes. + * There are two scenarios, one is choosing all nodes for a new pipeline. + * Another is choosing node to meet replication requirement. + * + * + * @param excludedNodes - list of the datanodes to exclude. + * @param favoredNodes - list of nodes preferred. This is a hint to the + * allocator, whether the favored nodes will be used + * depends on whether the nodes meets the allocator's + * requirement. + * @param nodesRequired - number of datanodes required. + * @param sizeRequired - size required for the container or block. + * @return List of datanodes. + * @throws SCMException SCMException + */ + @Override + public List chooseDatanodes( + List excludedNodes, List favoredNodes, + int nodesRequired, final long sizeRequired) throws SCMException { + Preconditions.checkArgument(nodesRequired > 0); + + int datanodeCount = networkTopology.getNumOfLeafNode(NetConstants.ROOT); + int excludedNodesCount = excludedNodes == null ? 0 : excludedNodes.size(); + if (datanodeCount < nodesRequired + excludedNodesCount) { + throw new SCMException("No enough datanodes to choose.", null); + } + List mutableFavoredNodes = favoredNodes; + // sanity check of favoredNodes + if (mutableFavoredNodes != null && excludedNodes != null) { + mutableFavoredNodes = new ArrayList<>(); + mutableFavoredNodes.addAll(favoredNodes); + mutableFavoredNodes.removeAll(excludedNodes); + } + int favoredNodeNum = mutableFavoredNodes == null? 0 : + mutableFavoredNodes.size(); + + List chosenNodes = new ArrayList<>(); + int favorIndex = 0; + if (excludedNodes == null || excludedNodes.isEmpty()) { + // choose all nodes for a new pipeline case + // choose first datanode from scope ROOT or from favoredNodes if not null + Node favoredNode = favoredNodeNum > favorIndex ? + mutableFavoredNodes.get(favorIndex) : null; + Node firstNode; + if (favoredNode != null) { + firstNode = favoredNode; + favorIndex++; + } else { + firstNode = chooseNode(null, null, sizeRequired); + } + chosenNodes.add(firstNode); + nodesRequired--; + if (nodesRequired == 0) { + return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0])); + } + + // choose second datanode on the same rack as first one + favoredNode = favoredNodeNum > favorIndex ? + mutableFavoredNodes.get(favorIndex) : null; + Node secondNode; + if (favoredNode != null && + networkTopology.isSameParent(firstNode, favoredNode)) { + secondNode = favoredNode; + favorIndex++; + } else { + secondNode = chooseNode(chosenNodes, firstNode, sizeRequired); + } + chosenNodes.add(secondNode); + nodesRequired--; + if (nodesRequired == 0) { + return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0])); + } + + // choose remaining datanodes on different rack as first and second + return chooseNodes(null, chosenNodes, mutableFavoredNodes, favorIndex, + nodesRequired, sizeRequired); + } else { + List mutableExcludedNodes = new ArrayList<>(); + mutableExcludedNodes.addAll(excludedNodes); + // choose node to meet replication requirement + // case 1: one excluded node, choose one on the same rack as the excluded + // node, choose others on different racks. + Node favoredNode; + if (excludedNodes.size() == 1) { + favoredNode = favoredNodeNum > favorIndex ? + mutableFavoredNodes.get(favorIndex) : null; + Node firstNode; + if (favoredNode != null && + networkTopology.isSameParent(excludedNodes.get(0), favoredNode)) { + firstNode = favoredNode; + favorIndex++; + } else { + firstNode = chooseNode(mutableExcludedNodes, excludedNodes.get(0), + sizeRequired); + } + chosenNodes.add(firstNode); + nodesRequired--; + if (nodesRequired == 0) { + return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0])); + } + // choose remaining nodes on different racks + return chooseNodes(null, chosenNodes, mutableFavoredNodes, favorIndex, + nodesRequired, sizeRequired); + } + // case 2: two or more excluded nodes, if these two nodes are + // in the same rack, then choose nodes on different racks, otherwise, + // choose one on the same rack as one of excluded nodes, remaining chosen + // are on different racks. + for(int i = 0; i < excludedNodesCount; i++) { + for (int j = i + 1; j < excludedNodesCount; j++) { + if (networkTopology.isSameParent( + excludedNodes.get(i), excludedNodes.get(j))) { + // choose remaining nodes on different racks + return chooseNodes(mutableExcludedNodes, chosenNodes, + mutableFavoredNodes, favorIndex, nodesRequired, sizeRequired); + } + } + } + // choose one data on the same rack with one excluded node + favoredNode = favoredNodeNum > favorIndex ? + mutableFavoredNodes.get(favorIndex) : null; + Node secondNode; + if (favoredNode != null && networkTopology.isSameParent( + mutableExcludedNodes.get(0), favoredNode)) { + secondNode = favoredNode; + favorIndex++; + } else { + secondNode = + chooseNode(chosenNodes, mutableExcludedNodes.get(0), sizeRequired); + } + chosenNodes.add(secondNode); + mutableExcludedNodes.add(secondNode); + nodesRequired--; + if (nodesRequired == 0) { + return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0])); + } + // choose remaining nodes on different racks + return chooseNodes(mutableExcludedNodes, chosenNodes, mutableFavoredNodes, + favorIndex, nodesRequired, sizeRequired); + } + } + + @Override + public DatanodeDetails chooseNode(List healthyNodes) { + return null; + } + + /** + * Choose a datanode which meets the requirements. If there is no node which + * meets all the requirements, there is fallback chosen process depending on + * whether fallback is allowed when this class is instantiated. + * + * + * @param excludedNodes - list of the datanodes to excluded. Can be null. + * @param affinityNode - the chosen nodes should be on the same rack as + * affinityNode. Can be null. + * @param sizeRequired - size required for the container or block. + * @return List of chosen datanodes. + * @throws SCMException SCMException + */ + private Node chooseNode(List excludedNodes, Node affinityNode, + long sizeRequired) throws SCMException { + int ancestorGen = RACK_LEVEL; + int maxRetry = MAX_RETRY; + while(true) { + Node node = networkTopology.chooseRandom(NetConstants.ROOT, null, + excludedNodes, affinityNode, ancestorGen); + if (node == null) { + // cannot find the node which meets all constrains + LOG.warn("Failed to find the datanode. excludedNodes:" + + (excludedNodes == null ? "" : excludedNodes.toString()) + + ", affinityNode:" + + (affinityNode == null ? "" : affinityNode.getNetworkFullPath())); + if (fallback) { + // fallback, don't consider the affinity node + if (affinityNode != null) { + affinityNode = null; + continue; + } + // fallback, don't consider cross rack + if (ancestorGen == RACK_LEVEL) { + ancestorGen--; + continue; + } + } + // there is no constrains to reduce or fallback is true + throw new SCMException("No satisfied datanode to meet the " + + " excludedNodes and affinityNode constrains.", null); + } + if (hasEnoughSpace((DatanodeDetails)node, sizeRequired)) { + LOG.debug("Datanode {} is chosen. Required size is {}", + node.toString(), sizeRequired); + return node; + } else { + maxRetry--; + if (maxRetry == 0) { + // avoid the infinite loop + String errMsg = "No satisfied datanode to meet the space constrains. " + + " sizeRequired: " + sizeRequired; + LOG.info(errMsg); + throw new SCMException(errMsg, null); + } + } + } + } + + /** + * Choose a batch of datanodes on different rack than excludedNodes or + * chosenNodes. + * + * + * @param excludedNodes - list of the datanodes to excluded. Can be null. + * @param chosenNodes - list of nodes already chosen. These nodes should also + * be excluded. Cannot be null. + * @param favoredNodes - list of favoredNodes. It's a hint. Whether the nodes + * are chosen depends on whether they meet the constrains. + * Can be null. + * @param favorIndex - the node index of favoredNodes which is not chosen yet. + * @param sizeRequired - size required for the container or block. + * @param nodesRequired - number of datanodes required. + * @param sizeRequired - size required for the container or block. + * @return List of chosen datanodes. + * @throws SCMException SCMException + */ + private List chooseNodes(List excludedNodes, + List chosenNodes, List favoredNodes, + int favorIndex, int nodesRequired, long sizeRequired) + throws SCMException { + Preconditions.checkArgument(chosenNodes != null); + List excludedNodeList = excludedNodes != null ? + excludedNodes : chosenNodes; + int favoredNodeNum = favoredNodes == null? 0 : favoredNodes.size(); + while(true) { + Node favoredNode = favoredNodeNum > favorIndex ? + favoredNodes.get(favorIndex) : null; + Node chosenNode; + if (favoredNode != null && networkTopology.isSameParent( + excludedNodeList.get(excludedNodeList.size() - 1), favoredNode)) { + chosenNode = favoredNode; + favorIndex++; + } else { + chosenNode = chooseNode(excludedNodeList, null, sizeRequired); + } + excludedNodeList.add(chosenNode); + if (excludedNodeList != chosenNodes) { + chosenNodes.add(chosenNode); + } + nodesRequired--; + if (nodesRequired == 0) { + return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0])); + } + } + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java index a70f633e820c4..48b613944202f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java @@ -58,6 +58,7 @@ public SCMContainerPlacementRandom(final NodeManager nodeManager, * * * @param excludedNodes - list of the datanodes to exclude. + * @param favoredNodes - list of nodes preferred. * @param nodesRequired - number of datanodes required. * @param sizeRequired - size required for the container or block. * @return List of Datanodes. @@ -65,10 +66,11 @@ public SCMContainerPlacementRandom(final NodeManager nodeManager, */ @Override public List chooseDatanodes( - List excludedNodes, final int nodesRequired, - final long sizeRequired) throws SCMException { + List excludedNodes, List favoredNodes, + final int nodesRequired, final long sizeRequired) throws SCMException { List healthyNodes = - super.chooseDatanodes(excludedNodes, nodesRequired, sizeRequired); + super.chooseDatanodes(excludedNodes, favoredNodes, nodesRequired, + sizeRequired); if (healthyNodes.size() == nodesRequired) { return healthyNodes; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java index d61924a91ee3c..b1dd77e5545f0 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java @@ -96,6 +96,22 @@ public static DatanodeDetails randomDatanodeDetails() { return createDatanodeDetails(UUID.randomUUID()); } + /** + * Creates DatanodeDetails with random UUID, specific hostname and network + * location. + * + * @return DatanodeDetails + */ + public static DatanodeDetails createDatanodeDetails(String hostname, + String loc) { + String ipAddress = random.nextInt(256) + + "." + random.nextInt(256) + + "." + random.nextInt(256) + + "." + random.nextInt(256); + return createDatanodeDetails(UUID.randomUUID().toString(), hostname, + ipAddress, loc); + } + /** * Creates DatanodeDetails using the given UUID. * @@ -108,7 +124,8 @@ public static DatanodeDetails createDatanodeDetails(UUID uuid) { + "." + random.nextInt(256) + "." + random.nextInt(256) + "." + random.nextInt(256); - return createDatanodeDetails(uuid.toString(), "localhost", ipAddress); + return createDatanodeDetails(uuid.toString(), "localhost", ipAddress, + null); } /** @@ -121,7 +138,8 @@ public static DatanodeDetails createDatanodeDetails(UUID uuid) { public static DatanodeDetails getDatanodeDetails( RegisteredCommand registeredCommand) { return createDatanodeDetails(registeredCommand.getDatanodeUUID(), - registeredCommand.getHostName(), registeredCommand.getIpAddress()); + registeredCommand.getHostName(), registeredCommand.getIpAddress(), + null); } /** @@ -134,7 +152,7 @@ public static DatanodeDetails getDatanodeDetails( * @return DatanodeDetails */ private static DatanodeDetails createDatanodeDetails(String uuid, - String hostname, String ipAddress) { + String hostname, String ipAddress, String networkLocation) { DatanodeDetails.Port containerPort = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, 0); DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( @@ -147,7 +165,8 @@ private static DatanodeDetails createDatanodeDetails(String uuid, .setIpAddress(ipAddress) .addPort(containerPort) .addPort(ratisPort) - .addPort(restPort); + .addPort(restPort) + .setNetworkLocation(networkLocation); return builder.build(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java index 5da057e10ee44..bc921e3ce55eb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java @@ -96,7 +96,7 @@ public void setup() throws IOException, InterruptedException { containerPlacementPolicy = Mockito.mock(ContainerPlacementPolicy.class); Mockito.when(containerPlacementPolicy.chooseDatanodes( - Mockito.anyListOf(DatanodeDetails.class), + Mockito.anyListOf(DatanodeDetails.class), null, Mockito.anyInt(), Mockito.anyLong())) .thenAnswer(invocation -> { int count = (int) invocation.getArguments()[1]; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java index f406016a46281..fb2a4c33dfcf7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java @@ -78,8 +78,8 @@ public void chooseDatanodes() throws SCMException { for (int i = 0; i < 1000; i++) { //when - List datanodeDetails = - scmContainerPlacementRandom.chooseDatanodes(existingNodes, 1, 15); + List datanodeDetails = scmContainerPlacementRandom + .chooseDatanodes(existingNodes, null, 1, 15); //then Assert.assertEquals(1, datanodeDetails.size()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java new file mode 100644 index 0000000000000..d80c7e53162ee --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -0,0 +1,257 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.container.placement.algorithms; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; +import org.apache.hadoop.hdds.scm.net.NodeSchema; +import org.apache.hadoop.hdds.scm.net.NodeSchemaManager; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.List; + +import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.when; + +/** + * Test for the scm container rack aware placement. + */ +public class TestSCMContainerPlacementRackAware { + private NetworkTopology cluster; + private List datanodes = new ArrayList<>(); + // policy with fallback capability + private SCMContainerPlacementRackAware policy; + // policy prohibit fallback + private SCMContainerPlacementRackAware policyNoFallback; + // node storage capacity + private final long STORAGE_CAPACITY = 100L; + + @Before + public void setup() { + //initialize network topology instance + Configuration conf = new OzoneConfiguration(); + NodeSchema[] schemas = new NodeSchema[] + {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}; + NodeSchemaManager.getInstance().init(schemas, true); + cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance()); + + // build datanodes, and network topology + String rack = "/rack"; + String hostname = "node"; + for (int i = 0; i < 15; i++) { + // Totally 3 racks, each has 5 datanodes + DatanodeDetails node = TestUtils.createDatanodeDetails( + hostname + i, rack + (i / 5)); + datanodes.add(node); + cluster.add(node); + } + + // create mock node manager + NodeManager nodeManager = Mockito.mock(NodeManager.class); + when(nodeManager.getNodes(NodeState.HEALTHY)) + .thenReturn(new ArrayList<>(datanodes)); + when(nodeManager.getNodeStat(anyObject())) + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 0L, 100L)); + when(nodeManager.getNodeStat(datanodes.get(2))) + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 90L, 10L)); + when(nodeManager.getNodeStat(datanodes.get(3))) + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 80L, 20L)); + when(nodeManager.getNodeStat(datanodes.get(4))) + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 70L, 30L)); + + // create placement policy instances + policy = + new SCMContainerPlacementRackAware(nodeManager, conf, cluster, true); + policyNoFallback = + new SCMContainerPlacementRackAware(nodeManager, conf, cluster, false); + } + + + @Test + public void chooseNodeWithNoExcludedNodes() throws SCMException { + // test choose new datanodes for new pipeline cases + // 1 replica + int nodeNum = 1; + List datanodeDetails = + policy.chooseDatanodes(null, null, nodeNum, 15); + Assert.assertEquals(nodeNum, datanodeDetails.size()); + + // 2 replicas + nodeNum = 2; + datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); + Assert.assertEquals(nodeNum, datanodeDetails.size()); + Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), + datanodeDetails.get(1))); + + // 3 replicas + nodeNum = 3; + datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); + Assert.assertEquals(nodeNum, datanodeDetails.size()); + Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), + datanodeDetails.get(1))); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), + datanodeDetails.get(2))); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1), + datanodeDetails.get(2))); + + // 4 replicas + nodeNum = 4; + datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); + Assert.assertEquals(nodeNum, datanodeDetails.size()); + Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), + datanodeDetails.get(1))); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), + datanodeDetails.get(2))); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1), + datanodeDetails.get(2))); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), + datanodeDetails.get(3))); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(2), + datanodeDetails.get(3))); + } + + @Test + public void chooseNodeWithExcludedNodes() throws SCMException { + // test choose new datanodes for under replicated pipeline + // 3 replicas, two existing datanodes on same rack + int nodeNum = 1; + List excludedNodes = new ArrayList<>(); + + excludedNodes.add(datanodes.get(0)); + excludedNodes.add(datanodes.get(1)); + List datanodeDetails = policy.chooseDatanodes( + excludedNodes, null, nodeNum, 15); + Assert.assertEquals(nodeNum, datanodeDetails.size()); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), + excludedNodes.get(0))); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), + excludedNodes.get(1))); + + // 3 replicas, two existing datanodes on different rack + excludedNodes.clear(); + excludedNodes.add(datanodes.get(0)); + excludedNodes.add(datanodes.get(7)); + datanodeDetails = policy.chooseDatanodes( + excludedNodes, null, nodeNum, 15); + Assert.assertEquals(nodeNum, datanodeDetails.size()); + Assert.assertTrue(cluster.isSameParent( + datanodeDetails.get(0), excludedNodes.get(0)) || + cluster.isSameParent(datanodeDetails.get(0), excludedNodes.get(1))); + + // 3 replicas, one existing datanode + nodeNum = 2; + excludedNodes.clear(); + excludedNodes.add(datanodes.get(0)); + datanodeDetails = policy.chooseDatanodes( + excludedNodes, null, nodeNum, 15); + Assert.assertEquals(nodeNum, datanodeDetails.size()); + Assert.assertTrue(cluster.isSameParent( + datanodeDetails.get(0), excludedNodes.get(0)) || + cluster.isSameParent(datanodeDetails.get(0), excludedNodes.get(1))); + } + + @Test + public void testFallback() throws SCMException { + + // 5 replicas. there are only 3 racks. policy with fallback should + // allocate the 5th datanode though it will break the rack rule(first + // 2 replicas on same rack, others are different racks). + int nodeNum = 5; + List datanodeDetails = + policy.chooseDatanodes(null, null, nodeNum, 15); + Assert.assertEquals(nodeNum, datanodeDetails.size()); + Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), + datanodeDetails.get(1))); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), + datanodeDetails.get(2))); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1), + datanodeDetails.get(2))); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), + datanodeDetails.get(3))); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(2), + datanodeDetails.get(3))); + } + + + @Test(expected = SCMException.class) + public void testNoFallback() throws SCMException { + // 5 replicas. there are only 3 racks. policy prohibit fallback should fail. + int nodeNum = 5; + policyNoFallback.chooseDatanodes(null, null, nodeNum, 15); + } + + @Test + public void chooseNodeWithFavoredNodes() throws SCMException { + int nodeNum = 1; + List excludedNodes = new ArrayList<>(); + List favoredNodes = new ArrayList<>(); + + // no excludedNodes, only favoredNodes + favoredNodes.add(datanodes.get(0)); + List datanodeDetails = policy.chooseDatanodes( + excludedNodes, favoredNodes, nodeNum, 15); + Assert.assertEquals(nodeNum, datanodeDetails.size()); + Assert.assertTrue(datanodeDetails.get(0).getNetworkFullPath() + .equals(favoredNodes.get(0).getNetworkFullPath())); + + // no overlap between excludedNodes and favoredNodes, favoredNodes can been + // chosen. + excludedNodes.clear(); + favoredNodes.clear(); + excludedNodes.add(datanodes.get(0)); + favoredNodes.add(datanodes.get(2)); + datanodeDetails = policy.chooseDatanodes( + excludedNodes, favoredNodes, nodeNum, 15); + Assert.assertEquals(nodeNum, datanodeDetails.size()); + Assert.assertTrue(datanodeDetails.get(0).getNetworkFullPath() + .equals(favoredNodes.get(0).getNetworkFullPath())); + + // there is overlap between excludedNodes and favoredNodes, favoredNodes + // should not be chosen. + excludedNodes.clear(); + favoredNodes.clear(); + excludedNodes.add(datanodes.get(0)); + favoredNodes.add(datanodes.get(0)); + datanodeDetails = policy.chooseDatanodes( + excludedNodes, favoredNodes, nodeNum, 15); + Assert.assertEquals(nodeNum, datanodeDetails.size()); + Assert.assertFalse(datanodeDetails.get(0).getNetworkFullPath() + .equals(favoredNodes.get(0).getNetworkFullPath())); + } + + @Test(expected = SCMException.class) + public void testNoInfiniteLoop() throws SCMException { + int nodeNum = 1; + // request storage space larger than node capability + policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 15); + } +} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java index d285a3f5ab40f..a20c6c019f195 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java @@ -67,8 +67,8 @@ public void chooseDatanodes() throws SCMException { for (int i = 0; i < 100; i++) { //when - List datanodeDetails = - scmContainerPlacementRandom.chooseDatanodes(existingNodes, 1, 15); + List datanodeDetails = scmContainerPlacementRandom + .chooseDatanodes(existingNodes, null, 1, 15); //then Assert.assertEquals(1, datanodeDetails.size()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java index 1c80880d01d9f..bd62111cf1e40 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java @@ -87,12 +87,12 @@ public void testCapacityPlacementYieldsBetterDataDistribution() throws for (int x = 0; x < opsCount; x++) { long containerSize = random.nextInt(100) * OzoneConsts.GB; List nodesCapacity = - capacityPlacer.chooseDatanodes(new ArrayList<>(), nodesRequired, + capacityPlacer.chooseDatanodes(new ArrayList<>(), null, nodesRequired, containerSize); assertEquals(nodesRequired, nodesCapacity.size()); List nodesRandom = - randomPlacer.chooseDatanodes(nodesCapacity, nodesRequired, + randomPlacer.chooseDatanodes(nodesCapacity, null, nodesRequired, containerSize); // One fifth of all calls are delete From 6a0e7dd454d587266a6020f83c6d07efb946d6ce Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Thu, 23 May 2019 13:27:52 -0700 Subject: [PATCH 0029/1308] YARN-9482. DistributedShell job with localization fails in unsecure cluster. Contributed by Prabhu Joseph. --- .../distributedshell/ApplicationMaster.java | 26 +++++++++++++------ 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index 56664cea2d1ac..c30dc4dc01c7c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -774,17 +774,27 @@ private void printUsage(Options opts) { } private void cleanup() { - Path dst = null; try { - FileSystem fs = FileSystem.get(conf); - dst = new Path(fs.getHomeDirectory(), getRelativePath(appName, - appId.toString(), "")); - fs.delete(dst, true); - } catch(IOException e) { - LOG.warn("Failed to remove application staging directory {}", dst); + appSubmitterUgi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws IOException { + FileSystem fs = FileSystem.get(conf); + Path dst = new Path(getAppSubmitterHomeDir(), + getRelativePath(appName, appId.toString(), "")); + fs.delete(dst, true); + return null; + } + }); + } catch(Exception e) { + LOG.warn("Failed to remove application staging directory", e); } } + private Path getAppSubmitterHomeDir() { + return new Path("/user/" + + System.getenv(ApplicationConstants.Environment.USER.name())); + } + /** * Main run function for the application master * @@ -1485,7 +1495,7 @@ public void run() { String relativePath = getRelativePath(appName, appId.toString(), fileName); Path dst = - new Path(fs.getHomeDirectory(), relativePath); + new Path(getAppSubmitterHomeDir(), relativePath); FileStatus fileStatus = fs.getFileStatus(dst); LocalResource localRes = LocalResource.newInstance( URL.fromURI(dst.toUri()), From 4b099b8b890cc578b13630369ef44a42ecd6496c Mon Sep 17 00:00:00 2001 From: avijayanhwx <14299376+avijayanhwx@users.noreply.github.com> Date: Thu, 23 May 2019 15:34:55 -0700 Subject: [PATCH 0030/1308] HDDS-1501 : Create a Recon task interface to update internal DB on updates from OM. (#819) --- .../org/apache/hadoop/utils/db/DBStore.java | 14 ++ .../hadoop/utils/db/DBStoreBuilder.java | 8 +- .../org/apache/hadoop/utils/db/RDBStore.java | 40 +++- .../src/main/resources/ozone-default.xml | 8 + .../apache/hadoop/utils/db/TestRDBStore.java | 43 ---- .../ozone/om/OmMetadataManagerImpl.java | 20 +- .../codegen/ReconSchemaGenerationModule.java | 3 +- .../schema/ReconInternalSchemaDefinition.java | 65 ++++++ .../ozone/recon/ReconControllerModule.java | 5 + .../hadoop/ozone/recon/ReconServer.java | 25 +- .../ozone/recon/ReconServerConfigKeys.java | 4 + .../recovery/ReconOmMetadataManagerImpl.java | 1 - .../recon/spi/ContainerDBServiceProvider.java | 16 ++ .../impl/ContainerDBServiceProviderImpl.java | 11 + .../recon/tasks/ContainerKeyMapperTask.java | 154 +++++++++--- .../ozone/recon/tasks/OMDBUpdateEvent.java | 150 ++++++++++++ .../ozone/recon/tasks/OMDBUpdatesHandler.java | 220 ++++++++++++++++++ .../ozone/recon/tasks/OMUpdateEventBatch.java | 69 ++++++ .../ozone/recon/tasks/ReconDBUpdateTask.java | 66 ++++++ .../recon/tasks/ReconTaskController.java | 46 ++++ .../recon/tasks/ReconTaskControllerImpl.java | 198 ++++++++++++++++ .../recon/api/TestContainerKeyService.java | 7 +- .../persistence/AbstractSqlDatabaseTest.java | 5 +- .../TestReconInternalSchemaDefinition.java | 143 ++++++++++++ .../TestReconOmMetadataManagerImpl.java | 17 -- .../TestContainerDBServiceProviderImpl.java | 25 ++ .../ozone/recon/tasks/DummyReconDBTask.java | 77 ++++++ .../tasks/TestContainerKeyMapperTask.java | 155 ++++++++++-- .../recon/tasks/TestOMDBUpdatesHandler.java | 207 ++++++++++++++++ .../tasks/TestReconTaskControllerImpl.java | 171 ++++++++++++++ 30 files changed, 1814 insertions(+), 159 deletions(-) create mode 100644 hadoop-ozone/ozone-recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconInternalSchemaDefinition.java create mode 100644 hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java create mode 100644 hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java create mode 100644 hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java create mode 100644 hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconDBUpdateTask.java create mode 100644 hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java create mode 100644 hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java create mode 100644 hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java create mode 100644 hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/DummyReconDBTask.java create mode 100644 hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java create mode 100644 hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java index 9e0c4a4b42c26..d01dfe44d30c4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java @@ -22,6 +22,7 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Map; import org.apache.hadoop.classification.InterfaceStability; @@ -158,4 +159,17 @@ void move(KEY sourceKey, KEY destKey, VALUE value, * @return DB file location. */ File getDbLocation(); + + /** + * Get List of Index to Table Names. + * (For decoding table from column family index) + * @return Map of Index -> TableName + */ + Map getTableNames(); + + /** + * Get Codec registry. + * @return codec registry. + */ + CodecRegistry getCodecRegistry(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java index 34bdc5dbc351c..3459b2032edea 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java @@ -57,7 +57,6 @@ public final class DBStoreBuilder { private List tableNames; private Configuration configuration; private CodecRegistry registry; - private boolean readOnly = false; private DBStoreBuilder(Configuration configuration) { tables = new HashSet<>(); @@ -114,11 +113,6 @@ public DBStoreBuilder setPath(Path path) { return this; } - public DBStoreBuilder setReadOnly(boolean rdOnly) { - readOnly = rdOnly; - return this; - } - /** * Builds a DBStore instance and returns that. * @@ -137,7 +131,7 @@ public DBStore build() throws IOException { if (!dbFile.getParentFile().exists()) { throw new IOException("The DB destination directory should exist."); } - return new RDBStore(dbFile, options, tables, registry, readOnly); + return new RDBStore(dbFile, options, tables, registry); } /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java index 07d74c4f465db..d293c1d215dc6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java @@ -64,15 +64,16 @@ public class RDBStore implements DBStore { private ObjectName statMBeanName; private RDBCheckpointManager checkPointManager; private String checkpointsParentDir; + private List columnFamilyHandles; @VisibleForTesting public RDBStore(File dbFile, DBOptions options, Set families) throws IOException { - this(dbFile, options, families, new CodecRegistry(), false); + this(dbFile, options, families, new CodecRegistry()); } public RDBStore(File dbFile, DBOptions options, Set families, - CodecRegistry registry, boolean readOnly) + CodecRegistry registry) throws IOException { Preconditions.checkNotNull(dbFile, "DB file location cannot be null"); Preconditions.checkNotNull(families); @@ -81,7 +82,7 @@ public RDBStore(File dbFile, DBOptions options, Set families, codecRegistry = registry; final List columnFamilyDescriptors = new ArrayList<>(); - final List columnFamilyHandles = new ArrayList<>(); + columnFamilyHandles = new ArrayList<>(); for (TableConfig family : families) { columnFamilyDescriptors.add(family.getDescriptor()); @@ -93,13 +94,8 @@ public RDBStore(File dbFile, DBOptions options, Set families, writeOptions = new WriteOptions(); try { - if (readOnly) { - db = RocksDB.openReadOnly(dbOptions, dbLocation.getAbsolutePath(), - columnFamilyDescriptors, columnFamilyHandles); - } else { - db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath(), - columnFamilyDescriptors, columnFamilyHandles); - } + db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles); for (int x = 0; x < columnFamilyHandles.size(); x++) { handleTable.put( @@ -299,7 +295,31 @@ public File getDbLocation() { return dbLocation; } + @Override + public Map getTableNames() { + Map tableNames = new HashMap<>(); + StringCodec stringCodec = new StringCodec(); + + for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + try { + tableNames.put(columnFamilyHandle.getID(), stringCodec + .fromPersistedFormat(columnFamilyHandle.getName())); + } catch (RocksDBException | IOException e) { + LOG.error("Unexpected exception while reading column family handle " + + "name", e); + } + } + return tableNames; + } + + @Override public CodecRegistry getCodecRegistry() { return codecRegistry; } + + @VisibleForTesting + public RocksDB getDb() { + return db; + } + } \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 305cac5a2cd6f..9b941a00cc5dc 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2472,4 +2472,12 @@ connections. + + ozone.recon.task.thread.count + 1 + OZONE, RECON + + The number of Recon Tasks that are waiting on updates from OM. + + diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBStore.java index 24a9ee50bf12d..6d510342522f8 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBStore.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBStore.java @@ -21,7 +21,6 @@ import javax.management.MBeanServer; -import java.io.File; import java.io.IOException; import java.lang.management.ManagementFactory; import java.nio.charset.StandardCharsets; @@ -290,46 +289,4 @@ public void testRocksDBCheckpointCleanup() throws Exception { checkpoint.getCheckpointLocation())); } } - - @Test - public void testReadOnlyRocksDB() throws Exception { - File dbFile = folder.newFolder(); - byte[] key = "Key1".getBytes(); - byte[] value = "Value1".getBytes(); - - //Create Rdb and write some data into it. - RDBStore newStore = new RDBStore(dbFile, options, configSet); - Assert.assertNotNull("DB Store cannot be null", newStore); - Table firstTable = newStore.getTable(families.get(0)); - Assert.assertNotNull("Table cannot be null", firstTable); - firstTable.put(key, value); - - RocksDBCheckpoint checkpoint = (RocksDBCheckpoint) newStore.getCheckpoint( - true); - - //Create Read Only DB from snapshot of first DB. - RDBStore snapshotStore = new RDBStore(checkpoint.getCheckpointLocation() - .toFile(), options, configSet, new CodecRegistry(), true); - - Assert.assertNotNull("DB Store cannot be null", newStore); - - //Verify read is allowed. - firstTable = snapshotStore.getTable(families.get(0)); - Assert.assertNotNull("Table cannot be null", firstTable); - Assert.assertTrue(Arrays.equals(((byte[])firstTable.get(key)), value)); - - //Verify write is not allowed. - byte[] key2 = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value2 = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - try { - firstTable.put(key2, value2); - Assert.fail(); - } catch (IOException e) { - Assert.assertTrue(e.getMessage() - .contains("Not supported operation in read only mode")); - } - checkpoint.cleanupCheckpoint(); - } } \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index ece04ddf61695..0720a10b2472b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -100,16 +100,16 @@ public class OmMetadataManagerImpl implements OMMetadataManager { * |-------------------------------------------------------------------| */ - private static final String USER_TABLE = "userTable"; - private static final String VOLUME_TABLE = "volumeTable"; - private static final String BUCKET_TABLE = "bucketTable"; - private static final String KEY_TABLE = "keyTable"; - private static final String DELETED_TABLE = "deletedTable"; - private static final String OPEN_KEY_TABLE = "openKeyTable"; - private static final String S3_TABLE = "s3Table"; - private static final String MULTIPARTINFO_TABLE = "multipartInfoTable"; - private static final String S3_SECRET_TABLE = "s3SecretTable"; - private static final String DELEGATION_TOKEN_TABLE = "dTokenTable"; + public static final String USER_TABLE = "userTable"; + public static final String VOLUME_TABLE = "volumeTable"; + public static final String BUCKET_TABLE = "bucketTable"; + public static final String KEY_TABLE = "keyTable"; + public static final String DELETED_TABLE = "deletedTable"; + public static final String OPEN_KEY_TABLE = "openKeyTable"; + public static final String S3_TABLE = "s3Table"; + public static final String MULTIPARTINFO_TABLE = "multipartInfoTable"; + public static final String S3_SECRET_TABLE = "s3SecretTable"; + public static final String DELEGATION_TOKEN_TABLE = "dTokenTable"; private DBStore store; diff --git a/hadoop-ozone/ozone-recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java b/hadoop-ozone/ozone-recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java index fa44e4605735d..5e06b99324eef 100644 --- a/hadoop-ozone/ozone-recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java +++ b/hadoop-ozone/ozone-recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java @@ -17,6 +17,7 @@ */ package org.hadoop.ozone.recon.codegen; +import org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition; import org.hadoop.ozone.recon.schema.ReconSchemaDefinition; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; @@ -34,6 +35,6 @@ protected void configure() { Multibinder schemaBinder = Multibinder.newSetBinder(binder(), ReconSchemaDefinition.class); schemaBinder.addBinding().to(UtilizationSchemaDefinition.class); - + schemaBinder.addBinding().to(ReconInternalSchemaDefinition.class); } } diff --git a/hadoop-ozone/ozone-recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconInternalSchemaDefinition.java b/hadoop-ozone/ozone-recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconInternalSchemaDefinition.java new file mode 100644 index 0000000000000..9ab9e38e95f6c --- /dev/null +++ b/hadoop-ozone/ozone-recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconInternalSchemaDefinition.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.hadoop.ozone.recon.schema; + +import java.sql.Connection; +import java.sql.SQLException; + +import javax.sql.DataSource; + +import org.jooq.impl.DSL; +import org.jooq.impl.SQLDataType; + +import com.google.inject.Inject; + +/** + * Class used to create tables that are required for Recon's internal + * management. + */ +public class ReconInternalSchemaDefinition implements ReconSchemaDefinition { + + public static final String RECON_TASK_STATUS_TABLE_NAME = + "recon_task_status"; + private final DataSource dataSource; + + @Inject + ReconInternalSchemaDefinition(DataSource dataSource) { + this.dataSource = dataSource; + } + + @Override + public void initializeSchema() throws SQLException { + Connection conn = dataSource.getConnection(); + createReconTaskStatus(conn); + } + + /** + * Create the Recon Task Status table. + * @param conn connection + */ + private void createReconTaskStatus(Connection conn) { + DSL.using(conn).createTableIfNotExists(RECON_TASK_STATUS_TABLE_NAME) + .column("task_name", SQLDataType.VARCHAR(1024)) + .column("last_updated_timestamp", SQLDataType.BIGINT) + .column("last_updated_seq_number", SQLDataType.BIGINT) + .constraint(DSL.constraint("pk_task_name") + .primaryKey("task_name")) + .execute(); + } +} diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java index 0576c6b0f3250..3473a6281efc3 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java @@ -39,6 +39,8 @@ import org.apache.hadoop.ozone.recon.spi.impl.ReconContainerDBProvider; import org.apache.hadoop.ozone.recon.spi.impl.ContainerDBServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.tasks.ReconTaskController; +import org.apache.hadoop.ozone.recon.tasks.ReconTaskControllerImpl; import org.apache.hadoop.utils.db.DBStore; import com.google.inject.AbstractModule; @@ -65,6 +67,9 @@ protected void configure() { // Persistence - inject configuration provider install(new JooqPersistenceModule( getProvider(DataSourceConfiguration.class))); + + bind(ReconTaskController.class) + .to(ReconTaskControllerImpl.class).in(Singleton.class); } @Provides diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java index cea168121bbfa..fb2dcc38dcb7c 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INTERVAL; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT; +import java.io.IOException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -67,7 +68,7 @@ public Void call() throws Exception { @Override protected void configureServlets() { rest("/api/*") - .packages("org.apache.hadoop.ozone.recon.api"); + .packages("org.apache.hadoop.ozone.recon.api"); } }); @@ -100,10 +101,6 @@ private void scheduleReconTasks() { OzoneManagerServiceProvider ozoneManagerServiceProvider = injector .getInstance(OzoneManagerServiceProvider.class); - // Schedule the task to read OM DB and write the reverse mapping to Recon - // container DB. - ContainerKeyMapperTask containerKeyMapperTask = new ContainerKeyMapperTask( - ozoneManagerServiceProvider, containerDBServiceProvider); long initialDelay = configuration.getTimeDuration( RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY, RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT, @@ -113,8 +110,22 @@ private void scheduleReconTasks() { RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); - scheduler.scheduleWithFixedDelay(containerKeyMapperTask, initialDelay, - interval, TimeUnit.MILLISECONDS); + + scheduler.scheduleWithFixedDelay(() -> { + try { + ozoneManagerServiceProvider.updateReconOmDBWithNewSnapshot(); + // Schedule the task to read OM DB and write the reverse mapping to + // Recon container DB. + ContainerKeyMapperTask containerKeyMapperTask = + new ContainerKeyMapperTask(containerDBServiceProvider, + ozoneManagerServiceProvider.getOMMetadataManagerInstance()); + containerKeyMapperTask.reprocess( + ozoneManagerServiceProvider.getOMMetadataManagerInstance()); + } catch (IOException e) { + LOG.error("Unable to get OM " + + "Snapshot", e); + } + }, initialDelay, interval, TimeUnit.MILLISECONDS); } void stop() throws Exception { diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java index c779e113b6e29..0501093ae058e 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java @@ -112,6 +112,10 @@ public final class ReconServerConfigKeys { public static final String OZONE_RECON_SQL_MAX_IDLE_CONNECTION_TEST_STMT = "ozone.recon.sql.db.conn.idle.test"; + public static final String OZONE_RECON_TASK_THREAD_COUNT_KEY = + "ozone.recon.task.thread.count"; + public static final int OZONE_RECON_TASK_THREAD_COUNT_DEFAULT = 1; + /** * Private constructor for utility class. */ diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java index 3b0fb49f7c0ba..409a7e9cf99df 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java @@ -64,7 +64,6 @@ private void initializeNewRdbStore(File dbFile) throws IOException { try { DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(ozoneConfiguration) - .setReadOnly(true) .setName(dbFile.getName()) .setPath(dbFile.toPath().getParent()); addOMTablesAndCodecs(dbStoreBuilder); diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java index a1044ec23b4c1..0449e7cf774f7 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; +import org.apache.hadoop.utils.db.TableIterator; /** * The Recon Container DB Service interface. @@ -75,4 +76,19 @@ Map getKeyPrefixesForContainer(long containerId) * @throws IOException */ Map getContainers() throws IOException; + + /** + * Delete an entry in the container DB. + * @param containerKeyPrefix container key prefix to be deleted. + * @throws IOException exception. + */ + void deleteContainerMapping(ContainerKeyPrefix containerKeyPrefix) + throws IOException; + + /** + * Get iterator to the entire container DB. + * @return TableIterator + * @throws IOException exception + */ + TableIterator getContainerTableIterator() throws IOException; } diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java index 3a20e82c9dc67..e79b8044f087a 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java @@ -191,4 +191,15 @@ public Map getContainers() throws IOException { } return containers; } + + @Override + public void deleteContainerMapping(ContainerKeyPrefix containerKeyPrefix) + throws IOException { + containerKeyTable.delete(containerKeyPrefix); + } + + @Override + public TableIterator getContainerTableIterator() throws IOException { + return containerKeyTable.iterator(); + } } \ No newline at end of file diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java index 9ec1a79448839..47dfff0f2c507 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java @@ -21,14 +21,20 @@ import java.io.IOException; import java.time.Duration; import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; -import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider; import org.apache.hadoop.utils.db.Table; import org.apache.hadoop.utils.db.TableIterator; import org.slf4j.Logger; @@ -38,19 +44,24 @@ * Class to iterate over the OM DB and populate the Recon container DB with * the container -> Key reverse mapping. */ -public class ContainerKeyMapperTask implements Runnable { +public class ContainerKeyMapperTask extends ReconDBUpdateTask { private static final Logger LOG = LoggerFactory.getLogger(ContainerKeyMapperTask.class); - private OzoneManagerServiceProvider ozoneManagerServiceProvider; private ContainerDBServiceProvider containerDBServiceProvider; + private Collection tables = new ArrayList<>(); - public ContainerKeyMapperTask( - OzoneManagerServiceProvider ozoneManagerServiceProvider, - ContainerDBServiceProvider containerDBServiceProvider) { - this.ozoneManagerServiceProvider = ozoneManagerServiceProvider; + public ContainerKeyMapperTask(ContainerDBServiceProvider + containerDBServiceProvider, + OMMetadataManager omMetadataManager) { + super("ContainerKeyMapperTask"); this.containerDBServiceProvider = containerDBServiceProvider; + try { + tables.add(omMetadataManager.getKeyTable().getName()); + } catch (IOException ioEx) { + LOG.error("Unable to listen on Key Table updates ", ioEx); + } } /** @@ -58,55 +69,122 @@ public ContainerKeyMapperTask( * (container, key) -> count to Recon Container DB. */ @Override - public void run() { + public Pair reprocess(OMMetadataManager omMetadataManager) { int omKeyCount = 0; - int containerCount = 0; try { - LOG.info("Starting a run of ContainerKeyMapperTask."); + LOG.info("Starting a 'reprocess' run of ContainerKeyMapperTask."); Instant start = Instant.now(); - //Update OM DB Snapshot. - ozoneManagerServiceProvider.updateReconOmDBWithNewSnapshot(); - - OMMetadataManager omMetadataManager = ozoneManagerServiceProvider - .getOMMetadataManagerInstance(); Table omKeyInfoTable = omMetadataManager.getKeyTable(); try (TableIterator> keyIter = omKeyInfoTable.iterator()) { while (keyIter.hasNext()) { Table.KeyValue kv = keyIter.next(); - StringBuilder key = new StringBuilder(kv.getKey()); OmKeyInfo omKeyInfo = kv.getValue(); - for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : omKeyInfo - .getKeyLocationVersions()) { - long keyVersion = omKeyLocationInfoGroup.getVersion(); - for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfoGroup - .getLocationList()) { - long containerId = omKeyLocationInfo.getContainerID(); - ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix( - containerId, key.toString(), keyVersion); - if (containerDBServiceProvider.getCountForForContainerKeyPrefix( - containerKeyPrefix) == 0) { - // Save on writes. No need to save same container-key prefix - // mapping again. - containerDBServiceProvider.storeContainerKeyMapping( - containerKeyPrefix, 1); - } - containerCount++; - } - } + writeOMKeyToContainerDB(kv.getKey(), omKeyInfo); omKeyCount++; } } - LOG.info("Completed the run of ContainerKeyMapperTask."); + LOG.info("Completed 'reprocess' of ContainerKeyMapperTask."); Instant end = Instant.now(); long duration = Duration.between(start, end).toMillis(); - LOG.info("It took me " + (double)duration / 1000.0 + " seconds to " + - "process " + omKeyCount + " keys and " + containerCount + " " + - "containers."); + LOG.info("It took me " + (double) duration / 1000.0 + " seconds to " + + "process " + omKeyCount + " keys."); } catch (IOException ioEx) { LOG.error("Unable to populate Container Key Prefix data in Recon DB. ", ioEx); + return new ImmutablePair<>(getTaskName(), false); + } + return new ImmutablePair<>(getTaskName(), true); + } + + + @Override + protected Collection getTaskTables() { + return tables; + } + + @Override + Pair process(OMUpdateEventBatch events) { + Iterator eventIterator = events.getIterator(); + while (eventIterator.hasNext()) { + OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); + String updatedKey = omdbUpdateEvent.getKey(); + OmKeyInfo updatedKeyValue = omdbUpdateEvent.getValue(); + try { + switch (omdbUpdateEvent.getAction()) { + case PUT: + writeOMKeyToContainerDB(updatedKey, updatedKeyValue); + break; + + case DELETE: + deleteOMKeyFromContainerDB(updatedKey); + break; + + default: LOG.debug("Skipping DB update event : " + omdbUpdateEvent + .getAction()); + } + } catch (IOException e) { + LOG.error("Unexpected exception while updating key data : {} ", e); + return new ImmutablePair<>(getTaskName(), false); + } } + return new ImmutablePair<>(getTaskName(), true); } + + /** + * Delete an OM Key from Container DB. + * @param key key String. + * @throws IOException If Unable to write to container DB. + */ + private void deleteOMKeyFromContainerDB(String key) + throws IOException { + + TableIterator> containerIterator = + containerDBServiceProvider.getContainerTableIterator(); + + Set keysToDeDeleted = new HashSet<>(); + + while (containerIterator.hasNext()) { + Table.KeyValue keyValue = + containerIterator.next(); + String keyPrefix = keyValue.getKey().getKeyPrefix(); + if (keyPrefix.equals(key)) { + keysToDeDeleted.add(keyValue.getKey()); + } + } + + for (ContainerKeyPrefix containerKeyPrefix : keysToDeDeleted) { + containerDBServiceProvider.deleteContainerMapping(containerKeyPrefix); + } + } + + /** + * Write an OM key to container DB. + * @param key key String + * @param omKeyInfo omKeyInfo value + * @throws IOException if unable to write to recon DB. + */ + private void writeOMKeyToContainerDB(String key, OmKeyInfo omKeyInfo) + throws IOException { + for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : omKeyInfo + .getKeyLocationVersions()) { + long keyVersion = omKeyLocationInfoGroup.getVersion(); + for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfoGroup + .getLocationList()) { + long containerId = omKeyLocationInfo.getContainerID(); + ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix( + containerId, key, keyVersion); + if (containerDBServiceProvider.getCountForForContainerKeyPrefix( + containerKeyPrefix) == 0) { + // Save on writes. No need to save same container-key prefix + // mapping again. + containerDBServiceProvider.storeContainerKeyMapping( + containerKeyPrefix, 1); + } + } + } + } + } diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java new file mode 100644 index 0000000000000..82b7a359127ae --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +/** + * A class used to encapsulate a single OM DB update event. + * Currently only PUT and DELETE are supported. + * @param Type of Key. + * @param Type of Value. + */ +public final class OMDBUpdateEvent { + + private final OMDBUpdateAction action; + private final String table; + private final KEY updatedKey; + private final VALUE updatedValue; + private final EventInfo eventInfo; + + private OMDBUpdateEvent(OMDBUpdateAction action, + String table, + KEY updatedKey, + VALUE updatedValue, + EventInfo eventInfo) { + this.action = action; + this.table = table; + this.updatedKey = updatedKey; + this.updatedValue = updatedValue; + this.eventInfo = eventInfo; + } + + public OMDBUpdateAction getAction() { + return action; + } + + public String getTable() { + return table; + } + + public KEY getKey() { + return updatedKey; + } + + public VALUE getValue() { + return updatedValue; + } + + public EventInfo getEventInfo() { + return eventInfo; + } + + /** + * Builder used to construct an OM DB Update event. + * @param Key type. + * @param Value type. + */ + public static class OMUpdateEventBuilder { + + private OMDBUpdateAction action; + private String table; + private KEY updatedKey; + private VALUE updatedValue; + private EventInfo eventInfo; + + OMUpdateEventBuilder setAction(OMDBUpdateAction omdbUpdateAction) { + this.action = omdbUpdateAction; + return this; + } + + OMUpdateEventBuilder setTable(String tableName) { + this.table = tableName; + return this; + } + + OMUpdateEventBuilder setKey(KEY key) { + this.updatedKey = key; + return this; + } + + OMUpdateEventBuilder setValue(VALUE value) { + this.updatedValue = value; + return this; + } + + OMUpdateEventBuilder setEventInfo(long sequenceNumber, + long eventTimestampMillis) { + this.eventInfo = new EventInfo(sequenceNumber, + eventTimestampMillis); + return this; + } + + /** + * Build an OM update event. + * @return OMDBUpdateEvent + */ + public OMDBUpdateEvent build() { + return new OMDBUpdateEvent( + action, + table, + updatedKey, + updatedValue, + eventInfo); + } + } + + /** + * Class used to hold timing information for an event. (Seq number and + * timestamp) + */ + public static class EventInfo { + private long sequenceNumber; + private long eventTimestampMillis; + + public EventInfo(long sequenceNumber, + long eventTimestampMillis) { + this.sequenceNumber = sequenceNumber; + this.eventTimestampMillis = eventTimestampMillis; + } + + public long getSequenceNumber() { + return sequenceNumber; + } + + public long getEventTimestampMillis() { + return eventTimestampMillis; + } + } + + /** + * Supported Actions - PUT, DELETE. + */ + public enum OMDBUpdateAction { + PUT, DELETE + } +} diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java new file mode 100644 index 0000000000000..d2d11b2865fba --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java @@ -0,0 +1,220 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.utils.db.CodecRegistry; +import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.rocksdb.RocksDBException; +import org.rocksdb.WriteBatch; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Class used to listen on OM RocksDB updates. + */ +public class OMDBUpdatesHandler extends WriteBatch.Handler { + + private static final Logger LOG = + LoggerFactory.getLogger(OMDBUpdatesHandler.class); + + private Map tablesNames; + private CodecRegistry codecRegistry; + private List omdbUpdateEvents = new ArrayList<>(); + + public OMDBUpdatesHandler(OMMetadataManager omMetadataManager) { + tablesNames = omMetadataManager.getStore().getTableNames(); + codecRegistry = omMetadataManager.getStore().getCodecRegistry(); + } + + @Override + public void put(int cfIndex, byte[] keyBytes, byte[] valueBytes) throws + RocksDBException { + try { + processEvent(cfIndex, keyBytes, valueBytes, + OMDBUpdateEvent.OMDBUpdateAction.PUT); + } catch (IOException ioEx) { + LOG.error("Exception when reading key : " + ioEx); + } + } + + @Override + public void delete(int cfIndex, byte[] keyBytes) throws RocksDBException { + try { + processEvent(cfIndex, keyBytes, null, + OMDBUpdateEvent.OMDBUpdateAction.DELETE); + } catch (IOException ioEx) { + LOG.error("Exception when reading key : " + ioEx); + } + } + + /** + * + */ + private void processEvent(int cfIndex, byte[] keyBytes, byte[] + valueBytes, OMDBUpdateEvent.OMDBUpdateAction action) + throws IOException { + String tableName = tablesNames.get(cfIndex); + Class keyType = getKeyType(tableName); + Class valueType = getValueType(tableName); + if (valueType != null) { + OMDBUpdateEvent.OMUpdateEventBuilder builder = + new OMDBUpdateEvent.OMUpdateEventBuilder<>(); + builder.setTable(tableName); + + Object key = codecRegistry.asObject(keyBytes, keyType); + builder.setKey(key); + + if (!action.equals(OMDBUpdateEvent.OMDBUpdateAction.DELETE)) { + Object value = codecRegistry.asObject(valueBytes, valueType); + builder.setValue(value); + } + + builder.setAction(action); + OMDBUpdateEvent event = builder.build(); + LOG.info("Generated OM update Event for table : " + event.getTable() + + ", Key = " + event.getKey()); + // Temporarily adding to an event buffer for testing. In subsequent JIRAs, + // a Recon side class will be implemented that requests delta updates + // from OM and calls on this handler. In that case, we will fill up + // this buffer and pass it on to the ReconTaskController which has + // tasks waiting on OM events. + omdbUpdateEvents.add(event); + } + } + + // There are no use cases yet for the remaining methods in Recon. These + // will be implemented as and when need arises. + + @Override + public void put(byte[] bytes, byte[] bytes1) { + + } + + @Override + public void merge(int i, byte[] bytes, byte[] bytes1) + throws RocksDBException { + } + + @Override + public void merge(byte[] bytes, byte[] bytes1) { + } + + @Override + public void delete(byte[] bytes) { + } + + @Override + public void singleDelete(int i, byte[] bytes) throws RocksDBException { + } + + @Override + public void singleDelete(byte[] bytes) { + } + + @Override + public void deleteRange(int i, byte[] bytes, byte[] bytes1) + throws RocksDBException { + } + + @Override + public void deleteRange(byte[] bytes, byte[] bytes1) { + + } + + @Override + public void logData(byte[] bytes) { + + } + + @Override + public void putBlobIndex(int i, byte[] bytes, byte[] bytes1) + throws RocksDBException { + } + + @Override + public void markBeginPrepare() throws RocksDBException { + + } + + @Override + public void markEndPrepare(byte[] bytes) throws RocksDBException { + + } + + @Override + public void markNoop(boolean b) throws RocksDBException { + + } + + @Override + public void markRollback(byte[] bytes) throws RocksDBException { + + } + + @Override + public void markCommit(byte[] bytes) throws RocksDBException { + + } + + /** + * Return Key type class for a given table name. + * @param name table name. + * @return String.class by default. + */ + private Class getKeyType(String name) { + return String.class; + } + + /** + * Return Value type class for a given table. + * @param name table name + * @return Value type based on table name. + */ + @VisibleForTesting + protected Class getValueType(String name) { + switch (name) { + case KEY_TABLE : return OmKeyInfo.class; + case VOLUME_TABLE : return OmVolumeArgs.class; + case BUCKET_TABLE : return OmBucketInfo.class; + default: return null; + } + } + + /** + * Get List of events. (Temporary API to unit test the class). + * @return List of events. + */ + public List getEvents() { + return omdbUpdateEvents; + } + +} diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java new file mode 100644 index 0000000000000..3b7cc5bcf21e8 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Wrapper class to hold multiple OM DB update events. + */ +public class OMUpdateEventBatch { + + private List events; + + OMUpdateEventBatch(Collection e) { + events = new ArrayList<>(e); + } + + /** + * Get Sequence Number and timestamp of last event in this batch. + * @return Event Info instance. + */ + OMDBUpdateEvent.EventInfo getLastEventInfo() { + if (events.isEmpty()) { + return new OMDBUpdateEvent.EventInfo(-1, -1); + } else { + return events.get(events.size() - 1).getEventInfo(); + } + } + + /** + * Return iterator to Event batch. + * @return iterator + */ + public Iterator getIterator() { + return events.iterator(); + } + + /** + * Filter events based on Tables. + * @param tables set of tables to filter on. + * @return trimmed event batch. + */ + public OMUpdateEventBatch filter(Collection tables) { + return new OMUpdateEventBatch(events + .stream() + .filter(e -> tables.contains(e.getTable())) + .collect(Collectors.toList())); + } +} diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconDBUpdateTask.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconDBUpdateTask.java new file mode 100644 index 0000000000000..d828577af6b8b --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconDBUpdateTask.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import java.util.Collection; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.ozone.om.OMMetadataManager; + +/** + * Abstract class used to denote a Recon task that needs to act on OM DB events. + */ +public abstract class ReconDBUpdateTask { + + private String taskName; + + protected ReconDBUpdateTask(String taskName) { + this.taskName = taskName; + } + + /** + * Return task name. + * @return task name + */ + public String getTaskName() { + return taskName; + } + + /** + * Return the list of tables that the task is listening on. + * Empty list means the task is NOT listening on any tables. + * @return Collection of Tables. + */ + protected abstract Collection getTaskTables(); + + /** + * Process a set of OM events on tables that the task is listening on. + * @param events Set of events to be processed by the task. + * @return Pair of task name -> task success. + */ + abstract Pair process(OMUpdateEventBatch events); + + /** + * Process a on tables that the task is listening on. + * @param omMetadataManager OM Metadata manager instance. + * @return Pair of task name -> task success. + */ + abstract Pair reprocess(OMMetadataManager omMetadataManager); + +} diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java new file mode 100644 index 0000000000000..7548cc91160f8 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import java.util.Map; + +/** + * Controller used by Recon to manage Tasks that are waiting on Recon events. + */ +public interface ReconTaskController { + + /** + * Register API used by tasks to register themselves. + * @param task task instance + */ + void registerTask(ReconDBUpdateTask task); + + /** + * Pass on a set of OM DB update events to the registered tasks. + * @param events set of events + * @throws InterruptedException InterruptedException + */ + void consumeOMEvents(OMUpdateEventBatch events) throws InterruptedException; + + /** + * Get set of registered tasks. + * @return Map of Task name -> Task. + */ + Map getRegisteredTasks(); +} diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java new file mode 100644 index 0000000000000..3fd7d966ebb48 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java @@ -0,0 +1,198 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_TASK_THREAD_COUNT_DEFAULT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_TASK_THREAD_COUNT_KEY; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; +import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; +import org.jooq.Configuration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.inject.Inject; + +/** + * Implementation of ReconTaskController. + */ +public class ReconTaskControllerImpl implements ReconTaskController { + + private static final Logger LOG = + LoggerFactory.getLogger(ReconTaskControllerImpl.class); + + private Map reconDBUpdateTasks; + private ExecutorService executorService; + private int threadCount = 1; + private final Semaphore taskSemaphore = new Semaphore(1); + private final ReconOMMetadataManager omMetadataManager; + private Map taskFailureCounter = new HashMap<>(); + private static final int TASK_FAILURE_THRESHOLD = 2; + private ReconTaskStatusDao reconTaskStatusDao; + + @Inject + public ReconTaskControllerImpl(OzoneConfiguration configuration, + ReconOMMetadataManager omMetadataManager, + Configuration sqlConfiguration) { + this.omMetadataManager = omMetadataManager; + reconDBUpdateTasks = new HashMap<>(); + threadCount = configuration.getInt(OZONE_RECON_TASK_THREAD_COUNT_KEY, + OZONE_RECON_TASK_THREAD_COUNT_DEFAULT); + executorService = Executors.newFixedThreadPool(threadCount); + reconTaskStatusDao = new ReconTaskStatusDao(sqlConfiguration); + } + + @Override + public void registerTask(ReconDBUpdateTask task) { + String taskName = task.getTaskName(); + LOG.info("Registered task " + taskName + " with controller."); + + // Store task in Task Map. + reconDBUpdateTasks.put(taskName, task); + // Store Task in Task failure tracker. + taskFailureCounter.put(taskName, new AtomicInteger(0)); + // Create DB record for the task. + ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus(taskName, + 0L, 0L); + reconTaskStatusDao.insert(reconTaskStatusRecord); + } + + /** + * For every registered task, we try process step twice and then reprocess + * once (if process failed twice) to absorb the events. If a task has failed + * reprocess call more than 2 times across events, it is unregistered + * (blacklisted). + * @param events set of events + * @throws InterruptedException + */ + @Override + public void consumeOMEvents(OMUpdateEventBatch events) + throws InterruptedException { + taskSemaphore.acquire(); + + try { + Collection> tasks = new ArrayList<>(); + for (Map.Entry taskEntry : + reconDBUpdateTasks.entrySet()) { + ReconDBUpdateTask task = taskEntry.getValue(); + tasks.add(() -> task.process(events)); + } + + List> results = executorService.invokeAll(tasks); + List failedTasks = processTaskResults(results, events); + + //Retry + List retryFailedTasks = new ArrayList<>(); + if (!failedTasks.isEmpty()) { + tasks.clear(); + for (String taskName : failedTasks) { + ReconDBUpdateTask task = reconDBUpdateTasks.get(taskName); + tasks.add(() -> task.process(events)); + } + results = executorService.invokeAll(tasks); + retryFailedTasks = processTaskResults(results, events); + } + + //Reprocess + //TODO Move to a separate task queue since reprocess may be a heavy + //operation for large OM DB instances + if (!retryFailedTasks.isEmpty()) { + tasks.clear(); + for (String taskName : failedTasks) { + ReconDBUpdateTask task = reconDBUpdateTasks.get(taskName); + tasks.add(() -> task.reprocess(omMetadataManager)); + } + results = executorService.invokeAll(tasks); + List reprocessFailedTasks = processTaskResults(results, events); + for (String taskName : reprocessFailedTasks) { + LOG.info("Reprocess step failed for task : " + taskName); + if (taskFailureCounter.get(taskName).incrementAndGet() > + TASK_FAILURE_THRESHOLD) { + LOG.info("Blacklisting Task since it failed retry and " + + "reprocess more than " + TASK_FAILURE_THRESHOLD + " times."); + reconDBUpdateTasks.remove(taskName); + } + } + } + } catch (ExecutionException e) { + LOG.error("Unexpected error : ", e); + } finally { + taskSemaphore.release(); + } + } + + /** + * Store the last completed event sequence number and timestamp to the DB + * for that task. + * @param taskName taskname to be updated. + * @param eventInfo contains the new sequence number and timestamp. + */ + private void storeLastCompletedTransaction( + String taskName, OMDBUpdateEvent.EventInfo eventInfo) { + ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus(taskName, + eventInfo.getEventTimestampMillis(), eventInfo.getSequenceNumber()); + reconTaskStatusDao.update(reconTaskStatusRecord); + } + + @Override + public Map getRegisteredTasks() { + return reconDBUpdateTasks; + } + + /** + * Wait on results of all tasks. + * @param results Set of Futures. + * @param events Events. + * @return List of failed task names + * @throws ExecutionException execution Exception + * @throws InterruptedException Interrupted Exception + */ + private List processTaskResults(List> results, + OMUpdateEventBatch events) + throws ExecutionException, InterruptedException { + List failedTasks = new ArrayList<>(); + for (Future f : results) { + String taskName = f.get().getLeft().toString(); + if (!(Boolean)f.get().getRight()) { + LOG.info("Failed task : " + taskName); + failedTasks.add(f.get().getLeft().toString()); + } else { + taskFailureCounter.get(taskName).set(0); + storeLastCompletedTransaction(taskName, events.getLastEventInfo()); + } + } + return failedTasks; + } +} diff --git a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java index bd7ea84b192c5..6363e9c4bd65c 100644 --- a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java +++ b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java @@ -190,8 +190,11 @@ protected void configure() { //Generate Recon container DB data. ContainerKeyMapperTask containerKeyMapperTask = new ContainerKeyMapperTask( - ozoneManagerServiceProvider, containerDbServiceProvider); - containerKeyMapperTask.run(); + containerDbServiceProvider, + ozoneManagerServiceProvider.getOMMetadataManagerInstance()); + ozoneManagerServiceProvider.updateReconOmDBWithNewSnapshot(); + containerKeyMapperTask.reprocess(ozoneManagerServiceProvider + .getOMMetadataManagerInstance()); } @Test diff --git a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractSqlDatabaseTest.java b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractSqlDatabaseTest.java index 983f74a3c1834..c2fbce25ca832 100644 --- a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractSqlDatabaseTest.java +++ b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractSqlDatabaseTest.java @@ -80,7 +80,10 @@ protected DSLContext getDslContext() { return dslContext; } - static class DataSourceConfigurationProvider implements + /** + * Local Sqlite datasource provider. + */ + public static class DataSourceConfigurationProvider implements Provider { @Override diff --git a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java new file mode 100644 index 0000000000000..150007ef58bcf --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.persistence; + +import static org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition.RECON_TASK_STATUS_TABLE_NAME; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.Types; +import java.util.ArrayList; +import java.util.List; + +import javax.sql.DataSource; + +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.commons.lang3.tuple.Pair; +import org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition; +import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; +import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; +import org.jooq.Configuration; +import org.junit.Assert; +import org.junit.Test; + +/** + * Class used to test ReconInternalSchemaDefinition. + */ +public class TestReconInternalSchemaDefinition extends AbstractSqlDatabaseTest { + + @Test + public void testSchemaCreated() throws Exception { + ReconInternalSchemaDefinition schemaDefinition = getInjector().getInstance( + ReconInternalSchemaDefinition.class); + + schemaDefinition.initializeSchema(); + + Connection connection = + getInjector().getInstance(DataSource.class).getConnection(); + // Verify table definition + DatabaseMetaData metaData = connection.getMetaData(); + ResultSet resultSet = metaData.getColumns(null, null, + RECON_TASK_STATUS_TABLE_NAME, null); + + List> expectedPairs = new ArrayList<>(); + + expectedPairs.add(new ImmutablePair<>("task_name", Types.VARCHAR)); + expectedPairs.add(new ImmutablePair<>("last_updated_timestamp", + Types.INTEGER)); + expectedPairs.add(new ImmutablePair<>("last_updated_seq_number", + Types.INTEGER)); + + List> actualPairs = new ArrayList<>(); + + while (resultSet.next()) { + actualPairs.add(new ImmutablePair<>( + resultSet.getString("COLUMN_NAME"), + resultSet.getInt("DATA_TYPE"))); + } + + Assert.assertEquals(3, actualPairs.size()); + Assert.assertEquals(expectedPairs, actualPairs); + } + + @Test + public void testReconTaskStatusCRUDOperations() throws Exception { + // Verify table exists + ReconInternalSchemaDefinition schemaDefinition = getInjector().getInstance( + ReconInternalSchemaDefinition.class); + + schemaDefinition.initializeSchema(); + + DataSource ds = getInjector().getInstance(DataSource.class); + Connection connection = ds.getConnection(); + + DatabaseMetaData metaData = connection.getMetaData(); + ResultSet resultSet = metaData.getTables(null, null, + RECON_TASK_STATUS_TABLE_NAME, null); + + while (resultSet.next()) { + Assert.assertEquals(RECON_TASK_STATUS_TABLE_NAME, + resultSet.getString("TABLE_NAME")); + } + + ReconTaskStatusDao dao = new ReconTaskStatusDao(getInjector().getInstance( + Configuration.class)); + + long now = System.currentTimeMillis(); + ReconTaskStatus newRecord = new ReconTaskStatus(); + newRecord.setTaskName("HelloWorldTask"); + newRecord.setLastUpdatedTimestamp(now); + newRecord.setLastUpdatedSeqNumber(100L); + + // Create + dao.insert(newRecord); + + ReconTaskStatus newRecord2 = new ReconTaskStatus(); + newRecord2.setTaskName("GoodbyeWorldTask"); + newRecord2.setLastUpdatedTimestamp(now); + newRecord2.setLastUpdatedSeqNumber(200L); + // Create + dao.insert(newRecord2); + + // Read + ReconTaskStatus dbRecord = dao.findById("HelloWorldTask"); + + Assert.assertEquals("HelloWorldTask", dbRecord.getTaskName()); + Assert.assertEquals(Long.valueOf(now), dbRecord.getLastUpdatedTimestamp()); + Assert.assertEquals(Long.valueOf(100), dbRecord.getLastUpdatedSeqNumber()); + + // Update + dbRecord.setLastUpdatedSeqNumber(150L); + dao.update(dbRecord); + + // Read updated + dbRecord = dao.findById("HelloWorldTask"); + Assert.assertEquals(Long.valueOf(150), dbRecord.getLastUpdatedSeqNumber()); + + // Delete + dao.deleteById("GoodbyeWorldTask"); + + // Verify + dbRecord = dao.findById("GoodbyeWorldTask"); + + Assert.assertNull(dbRecord); + } + +} diff --git a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java index ba2dd0b772af0..78d964dcb12f6 100644 --- a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java @@ -22,7 +22,6 @@ import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; import java.io.File; -import java.io.IOException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -127,22 +126,6 @@ public void testUpdateOmDB() throws Exception { .get("/sampleVol/bucketOne/key_one")); Assert.assertNotNull(reconOMMetadataManager.getKeyTable() .get("/sampleVol/bucketOne/key_two")); - - //Verify that we cannot write data to Recon OM DB (Read Only) - try { - reconOMMetadataManager.getKeyTable().put( - "/sampleVol/bucketOne/fail_key", new OmKeyInfo.Builder() - .setBucketName("bucketOne") - .setVolumeName("sampleVol") - .setKeyName("fail_key") - .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) - .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) - .build()); - Assert.fail(); - } catch (IOException e) { - Assert.assertTrue(e.getMessage() - .contains("Not supported operation in read only mode")); - } } } \ No newline at end of file diff --git a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java index 75664f035786c..85e5eed834a87 100644 --- a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java +++ b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java @@ -203,4 +203,29 @@ public void testGetKeyPrefixesForContainer() throws Exception { assertTrue(keyPrefixMap.size() == 1); assertTrue(keyPrefixMap.get(containerKeyPrefix3) == 3); } + + @Test + public void testDeleteContainerMapping() throws IOException { + long containerId = System.currentTimeMillis(); + + ContainerKeyPrefix containerKeyPrefix1 = new + ContainerKeyPrefix(containerId, "V3/B1/K1", 0); + containerDbServiceProvider.storeContainerKeyMapping(containerKeyPrefix1, + 1); + + ContainerKeyPrefix containerKeyPrefix2 = new ContainerKeyPrefix( + containerId, "V3/B1/K2", 0); + containerDbServiceProvider.storeContainerKeyMapping(containerKeyPrefix2, + 2); + + Map keyPrefixMap = + containerDbServiceProvider.getKeyPrefixesForContainer(containerId); + assertTrue(keyPrefixMap.size() == 2); + + containerDbServiceProvider.deleteContainerMapping(new ContainerKeyPrefix( + containerId, "V3/B1/K2", 0)); + keyPrefixMap = + containerDbServiceProvider.getKeyPrefixesForContainer(containerId); + assertTrue(keyPrefixMap.size() == 1); + } } diff --git a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/DummyReconDBTask.java b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/DummyReconDBTask.java new file mode 100644 index 0000000000000..3073907fc044c --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/DummyReconDBTask.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import java.util.Collection; +import java.util.Collections; + +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.ozone.om.OMMetadataManager; + +/** + * Dummy Recon task that has 3 modes of operations. + * ALWAYS_FAIL / FAIL_ONCE / ALWAYS_PASS + */ +public class DummyReconDBTask extends ReconDBUpdateTask { + + private int numFailuresAllowed = Integer.MIN_VALUE; + private int callCtr = 0; + + public DummyReconDBTask(String taskName, TaskType taskType) { + super(taskName); + if (taskType.equals(TaskType.FAIL_ONCE)) { + numFailuresAllowed = 1; + } else if (taskType.equals(TaskType.ALWAYS_FAIL)) { + numFailuresAllowed = Integer.MAX_VALUE; + } + } + + @Override + protected Collection getTaskTables() { + return Collections.singletonList("volumeTable"); + } + + @Override + Pair process(OMUpdateEventBatch events) { + if (++callCtr <= numFailuresAllowed) { + return new ImmutablePair<>(getTaskName(), false); + } else { + return new ImmutablePair<>(getTaskName(), true); + } + } + + @Override + Pair reprocess(OMMetadataManager omMetadataManager) { + if (++callCtr <= numFailuresAllowed) { + return new ImmutablePair<>(getTaskName(), false); + } else { + return new ImmutablePair<>(getTaskName(), true); + } + } + + /** + * Type of the task. + */ + public enum TaskType { + ALWAYS_PASS, + FAIL_ONCE, + ALWAYS_FAIL + } +} diff --git a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java index 6ee95e62a3d23..c67d7fd46f11f 100644 --- a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java +++ b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java @@ -23,10 +23,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import java.io.File; -import java.io.FileInputStream; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -34,9 +31,10 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.recon.AbstractOMMetadataManagerTest; @@ -48,14 +46,11 @@ import org.apache.hadoop.ozone.recon.spi.impl.ContainerDBServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.ReconContainerDBProvider; -import org.apache.hadoop.utils.db.DBCheckpoint; import org.apache.hadoop.utils.db.DBStore; -import org.apache.http.impl.client.CloseableHttpClient; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; -import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PowerMockIgnore; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; @@ -110,7 +105,7 @@ protected void configure() { } @Test - public void testRun() throws Exception{ + public void testReprocessOMDB() throws Exception{ Map keyPrefixesForContainer = containerDbServiceProvider.getKeyPrefixesForContainer(1); @@ -137,25 +132,17 @@ public void testRun() throws Exception{ OmKeyLocationInfoGroup omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0, omKeyLocationInfoList); - writeDataToOm(omMetadataManager, + writeDataToOm(reconOMMetadataManager, "key_one", "bucketOne", "sampleVol", Collections.singletonList(omKeyLocationInfoGroup)); - //Take snapshot of OM DB and copy over to Recon OM DB. - DBCheckpoint checkpoint = omMetadataManager.getStore() - .getCheckpoint(true); - File tarFile = OmUtils.createTarFile(checkpoint.getCheckpointLocation()); - InputStream inputStream = new FileInputStream(tarFile); - PowerMockito.stub(PowerMockito.method(ReconUtils.class, - "makeHttpCall", - CloseableHttpClient.class, String.class)) - .toReturn(inputStream); - - ContainerKeyMapperTask containerKeyMapperTask = new ContainerKeyMapperTask( - ozoneManagerServiceProvider, containerDbServiceProvider); - containerKeyMapperTask.run(); + ContainerKeyMapperTask containerKeyMapperTask = + new ContainerKeyMapperTask(containerDbServiceProvider, + ozoneManagerServiceProvider.getOMMetadataManagerInstance()); + containerKeyMapperTask.reprocess(ozoneManagerServiceProvider + .getOMMetadataManagerInstance()); keyPrefixesForContainer = containerDbServiceProvider.getKeyPrefixesForContainer(1); @@ -176,6 +163,130 @@ public void testRun() throws Exception{ keyPrefixesForContainer.get(containerKeyPrefix).intValue()); } + @Test + public void testProcessOMEvents() throws IOException { + Map keyPrefixesForContainer = + containerDbServiceProvider.getKeyPrefixesForContainer(1); + assertTrue(keyPrefixesForContainer.isEmpty()); + + keyPrefixesForContainer = containerDbServiceProvider + .getKeyPrefixesForContainer(2); + assertTrue(keyPrefixesForContainer.isEmpty()); + + Pipeline pipeline = getRandomPipeline(); + + List omKeyLocationInfoList = new ArrayList<>(); + BlockID blockID1 = new BlockID(1, 1); + OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1, + pipeline); + + BlockID blockID2 = new BlockID(2, 1); + OmKeyLocationInfo omKeyLocationInfo2 + = getOmKeyLocationInfo(blockID2, pipeline); + + omKeyLocationInfoList.add(omKeyLocationInfo1); + omKeyLocationInfoList.add(omKeyLocationInfo2); + + OmKeyLocationInfoGroup omKeyLocationInfoGroup = new + OmKeyLocationInfoGroup(0, omKeyLocationInfoList); + + String bucket = "bucketOne"; + String volume = "sampleVol"; + String key = "key_one"; + String omKey = omMetadataManager.getOzoneKey(volume, bucket, key); + OmKeyInfo omKeyInfo = buildOmKeyInfo(volume, bucket, key, + omKeyLocationInfoGroup); + + OMDBUpdateEvent keyEvent1 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omKey) + .setValue(omKeyInfo) + .setTable(omMetadataManager.getKeyTable().getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + + BlockID blockID3 = new BlockID(1, 2); + OmKeyLocationInfo omKeyLocationInfo3 = + getOmKeyLocationInfo(blockID3, pipeline); + + BlockID blockID4 = new BlockID(3, 1); + OmKeyLocationInfo omKeyLocationInfo4 + = getOmKeyLocationInfo(blockID4, pipeline); + + omKeyLocationInfoList = new ArrayList<>(); + omKeyLocationInfoList.add(omKeyLocationInfo3); + omKeyLocationInfoList.add(omKeyLocationInfo4); + omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0, + omKeyLocationInfoList); + + String key2 = "key_two"; + writeDataToOm(reconOMMetadataManager, key2, bucket, volume, Collections + .singletonList(omKeyLocationInfoGroup)); + + omKey = omMetadataManager.getOzoneKey(volume, bucket, key2); + OMDBUpdateEvent keyEvent2 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omKey) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE) + .setTable(omMetadataManager.getKeyTable().getName()) + .build(); + + OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(new + ArrayList() {{ + add(keyEvent1); + add(keyEvent2); + }}); + + ContainerKeyMapperTask containerKeyMapperTask = + new ContainerKeyMapperTask(containerDbServiceProvider, + ozoneManagerServiceProvider.getOMMetadataManagerInstance()); + containerKeyMapperTask.reprocess(ozoneManagerServiceProvider + .getOMMetadataManagerInstance()); + + keyPrefixesForContainer = containerDbServiceProvider + .getKeyPrefixesForContainer(1); + assertTrue(keyPrefixesForContainer.size() == 1); + + keyPrefixesForContainer = containerDbServiceProvider + .getKeyPrefixesForContainer(2); + assertTrue(keyPrefixesForContainer.isEmpty()); + + keyPrefixesForContainer = containerDbServiceProvider + .getKeyPrefixesForContainer(3); + assertTrue(keyPrefixesForContainer.size() == 1); + + // Process PUT & DELETE event. + containerKeyMapperTask.process(omUpdateEventBatch); + + keyPrefixesForContainer = containerDbServiceProvider + .getKeyPrefixesForContainer(1); + assertTrue(keyPrefixesForContainer.size() == 1); + + keyPrefixesForContainer = containerDbServiceProvider + .getKeyPrefixesForContainer(2); + assertTrue(keyPrefixesForContainer.size() == 1); + + keyPrefixesForContainer = containerDbServiceProvider + .getKeyPrefixesForContainer(3); + assertTrue(keyPrefixesForContainer.isEmpty()); + + } + + private OmKeyInfo buildOmKeyInfo(String volume, + String bucket, + String key, + OmKeyLocationInfoGroup + omKeyLocationInfoGroup) { + return new OmKeyInfo.Builder() + .setBucketName(bucket) + .setVolumeName(volume) + .setKeyName(key) + .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) + .setOmKeyLocationInfos(Collections.singletonList( + omKeyLocationInfoGroup)) + .build(); + } /** * Get Test OzoneConfiguration instance. * @return OzoneConfiguration diff --git a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java new file mode 100644 index 0000000000000..98feaff4d4c44 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java @@ -0,0 +1,207 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.utils.db.RDBStore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.RocksDB; +import org.rocksdb.TransactionLogIterator; +import org.rocksdb.WriteBatch; + +/** + * Class used to test OMDBUpdatesHandler. + */ +public class TestOMDBUpdatesHandler { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OzoneConfiguration createNewTestPath() throws IOException { + OzoneConfiguration configuration = new OzoneConfiguration(); + File newFolder = folder.newFolder(); + if (!newFolder.exists()) { + assertTrue(newFolder.mkdirs()); + } + ServerUtils.setOzoneMetaDirPath(configuration, newFolder.toString()); + return configuration; + } + + @Test + public void testPut() throws Exception { + OzoneConfiguration configuration = createNewTestPath(); + OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration); + + String volumeKey = metaMgr.getVolumeKey("sampleVol"); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setVolume("sampleVol") + .setAdminName("bilbo") + .setOwnerName("bilbo") + .build(); + metaMgr.getVolumeTable().put(volumeKey, args); + + OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() + .setBucketName("bucketOne") + .setVolumeName("sampleVol") + .setKeyName("key_one") + .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) + .build(); + + metaMgr.getKeyTable().put("/sampleVol/bucketOne/key_one", omKeyInfo); + RDBStore rdbStore = (RDBStore) metaMgr.getStore(); + + RocksDB rocksDB = rdbStore.getDb(); + TransactionLogIterator transactionLogIterator = + rocksDB.getUpdatesSince(0); + List writeBatches = new ArrayList<>(); + + while(transactionLogIterator.isValid()) { + TransactionLogIterator.BatchResult result = + transactionLogIterator.getBatch(); + result.writeBatch().markWalTerminationPoint(); + WriteBatch writeBatch = result.writeBatch(); + writeBatches.add(writeBatch.data()); + transactionLogIterator.next(); + } + + OzoneConfiguration conf2 = createNewTestPath(); + OmMetadataManagerImpl reconOmmetaMgr = new OmMetadataManagerImpl(conf2); + List events = new ArrayList<>(); + for (byte[] data : writeBatches) { + WriteBatch writeBatch = new WriteBatch(data); + OMDBUpdatesHandler omdbUpdatesHandler = + new OMDBUpdatesHandler(reconOmmetaMgr); + writeBatch.iterate(omdbUpdatesHandler); + events.addAll(omdbUpdatesHandler.getEvents()); + } + assertNotNull(events); + assertTrue(events.size() == 2); + + OMDBUpdateEvent volEvent = events.get(0); + assertEquals(OMDBUpdateEvent.OMDBUpdateAction.PUT, volEvent.getAction()); + assertEquals(volumeKey, volEvent.getKey()); + assertEquals(args.getVolume(), ((OmVolumeArgs)volEvent.getValue()) + .getVolume()); + + OMDBUpdateEvent keyEvent = events.get(1); + assertEquals(OMDBUpdateEvent.OMDBUpdateAction.PUT, keyEvent.getAction()); + assertEquals("/sampleVol/bucketOne/key_one", keyEvent.getKey()); + assertEquals(omKeyInfo.getBucketName(), + ((OmKeyInfo)keyEvent.getValue()).getBucketName()); + } + + @Test + public void testDelete() throws Exception { + OzoneConfiguration configuration = createNewTestPath(); + OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration); + + String volumeKey = metaMgr.getVolumeKey("sampleVol"); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setVolume("sampleVol") + .setAdminName("bilbo") + .setOwnerName("bilbo") + .build(); + metaMgr.getVolumeTable().put(volumeKey, args); + + OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() + .setBucketName("bucketOne") + .setVolumeName("sampleVol") + .setKeyName("key_one") + .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) + .build(); + + metaMgr.getKeyTable().put("/sampleVol/bucketOne/key_one", omKeyInfo); + + metaMgr.getKeyTable().delete("/sampleVol/bucketOne/key_one"); + metaMgr.getVolumeTable().delete(volumeKey); + + RDBStore rdbStore = (RDBStore) metaMgr.getStore(); + + RocksDB rocksDB = rdbStore.getDb(); + TransactionLogIterator transactionLogIterator = + rocksDB.getUpdatesSince(0); + List writeBatches = new ArrayList<>(); + + while(transactionLogIterator.isValid()) { + TransactionLogIterator.BatchResult result = + transactionLogIterator.getBatch(); + result.writeBatch().markWalTerminationPoint(); + WriteBatch writeBatch = result.writeBatch(); + writeBatches.add(writeBatch.data()); + transactionLogIterator.next(); + } + + OzoneConfiguration conf2 = createNewTestPath(); + OmMetadataManagerImpl reconOmmetaMgr = new OmMetadataManagerImpl(conf2); + List events = new ArrayList<>(); + for (byte[] data : writeBatches) { + WriteBatch writeBatch = new WriteBatch(data); + OMDBUpdatesHandler omdbUpdatesHandler = + new OMDBUpdatesHandler(reconOmmetaMgr); + writeBatch.iterate(omdbUpdatesHandler); + events.addAll(omdbUpdatesHandler.getEvents()); + } + assertNotNull(events); + assertTrue(events.size() == 4); + + OMDBUpdateEvent keyEvent = events.get(2); + assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, keyEvent.getAction()); + assertEquals("/sampleVol/bucketOne/key_one", keyEvent.getKey()); + + OMDBUpdateEvent volEvent = events.get(3); + assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, volEvent.getAction()); + assertEquals(volumeKey, volEvent.getKey()); + } + + @Test + public void testGetValueType() throws IOException { + OzoneConfiguration configuration = createNewTestPath(); + OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration); + OMDBUpdatesHandler omdbUpdatesHandler = + new OMDBUpdatesHandler(metaMgr); + + assertEquals(OmKeyInfo.class, omdbUpdatesHandler.getValueType( + metaMgr.getKeyTable().getName())); + assertEquals(OmVolumeArgs.class, omdbUpdatesHandler.getValueType( + metaMgr.getVolumeTable().getName())); + assertEquals(OmBucketInfo.class, omdbUpdatesHandler.getValueType( + metaMgr.getBucketTable().getName())); + } +} \ No newline at end of file diff --git a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java new file mode 100644 index 0000000000000..b587c89f5ba0b --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java @@ -0,0 +1,171 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.File; +import java.util.Collections; + +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.recon.persistence.AbstractSqlDatabaseTest; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.recovery.ReconOmMetadataManagerImpl; +import org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition; +import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; +import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; +import org.jooq.Configuration; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Class used to test ReconTaskControllerImpl. + */ +public class TestReconTaskControllerImpl extends AbstractSqlDatabaseTest { + + private ReconTaskController reconTaskController; + + private Configuration sqlConfiguration; + @Before + public void setUp() throws Exception { + + File omDbDir = temporaryFolder.newFolder(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OZONE_OM_DB_DIRS, omDbDir.getAbsolutePath()); + ReconOMMetadataManager omMetadataManager = new ReconOmMetadataManagerImpl( + ozoneConfiguration); + + sqlConfiguration = getInjector() + .getInstance(Configuration.class); + + ReconInternalSchemaDefinition schemaDefinition = getInjector(). + getInstance(ReconInternalSchemaDefinition.class); + schemaDefinition.initializeSchema(); + + reconTaskController = new ReconTaskControllerImpl(ozoneConfiguration, + omMetadataManager, sqlConfiguration); + } + + @Test + public void testRegisterTask() throws Exception { + String taskName = "Dummy_" + System.currentTimeMillis(); + DummyReconDBTask dummyReconDBTask = + new DummyReconDBTask(taskName, DummyReconDBTask.TaskType.ALWAYS_PASS); + reconTaskController.registerTask(dummyReconDBTask); + assertTrue(reconTaskController.getRegisteredTasks().size() == 1); + assertTrue(reconTaskController.getRegisteredTasks() + .get(dummyReconDBTask.getTaskName()) == dummyReconDBTask); + } + + @Test + public void testConsumeOMEvents() throws Exception { + + ReconDBUpdateTask reconDBUpdateTaskMock = mock(ReconDBUpdateTask.class); + when(reconDBUpdateTaskMock.getTaskTables()).thenReturn(Collections + .EMPTY_LIST); + when(reconDBUpdateTaskMock.getTaskName()).thenReturn("MockTask"); + when(reconDBUpdateTaskMock.process(any(OMUpdateEventBatch.class))) + .thenReturn(new ImmutablePair<>("MockTask", true)); + reconTaskController.registerTask(reconDBUpdateTaskMock); + reconTaskController.consumeOMEvents( + new OMUpdateEventBatch(Collections.emptyList())); + + verify(reconDBUpdateTaskMock, times(1)) + .process(any()); + } + + @Test + public void testFailedTaskRetryLogic() throws Exception { + String taskName = "Dummy_" + System.currentTimeMillis(); + DummyReconDBTask dummyReconDBTask = + new DummyReconDBTask(taskName, DummyReconDBTask.TaskType.FAIL_ONCE); + reconTaskController.registerTask(dummyReconDBTask); + + + long currentTime = System.nanoTime(); + OMDBUpdateEvent.EventInfo eventInfoMock = mock( + OMDBUpdateEvent.EventInfo.class); + when(eventInfoMock.getSequenceNumber()).thenReturn(100L); + when(eventInfoMock.getEventTimestampMillis()).thenReturn(currentTime); + + OMUpdateEventBatch omUpdateEventBatchMock = mock(OMUpdateEventBatch.class); + when(omUpdateEventBatchMock.getLastEventInfo()).thenReturn(eventInfoMock); + + reconTaskController.consumeOMEvents(omUpdateEventBatchMock); + assertFalse(reconTaskController.getRegisteredTasks().isEmpty()); + assertEquals(dummyReconDBTask, reconTaskController.getRegisteredTasks() + .get(dummyReconDBTask.getTaskName())); + + ReconTaskStatusDao dao = new ReconTaskStatusDao(sqlConfiguration); + ReconTaskStatus dbRecord = dao.findById(taskName); + + Assert.assertEquals(taskName, dbRecord.getTaskName()); + Assert.assertEquals(Long.valueOf(currentTime), + dbRecord.getLastUpdatedTimestamp()); + Assert.assertEquals(Long.valueOf(100L), dbRecord.getLastUpdatedSeqNumber()); + } + + @Test + public void testBadBehavedTaskBlacklisting() throws Exception { + String taskName = "Dummy_" + System.currentTimeMillis(); + DummyReconDBTask dummyReconDBTask = + new DummyReconDBTask(taskName, DummyReconDBTask.TaskType.ALWAYS_FAIL); + reconTaskController.registerTask(dummyReconDBTask); + + + long currentTime = System.nanoTime(); + OMDBUpdateEvent.EventInfo eventInfoMock = + mock(OMDBUpdateEvent.EventInfo.class); + when(eventInfoMock.getSequenceNumber()).thenReturn(100L); + when(eventInfoMock.getEventTimestampMillis()).thenReturn(currentTime); + + OMUpdateEventBatch omUpdateEventBatchMock = mock(OMUpdateEventBatch.class); + when(omUpdateEventBatchMock.getLastEventInfo()).thenReturn(eventInfoMock); + + for (int i = 0; i < 2; i++) { + reconTaskController.consumeOMEvents(omUpdateEventBatchMock); + + assertFalse(reconTaskController.getRegisteredTasks().isEmpty()); + assertEquals(dummyReconDBTask, reconTaskController.getRegisteredTasks() + .get(dummyReconDBTask.getTaskName())); + } + + //Should be blacklisted now. + reconTaskController.consumeOMEvents( + new OMUpdateEventBatch(Collections.emptyList())); + assertTrue(reconTaskController.getRegisteredTasks().isEmpty()); + + ReconTaskStatusDao dao = new ReconTaskStatusDao(sqlConfiguration); + ReconTaskStatus dbRecord = dao.findById(taskName); + + Assert.assertEquals(taskName, dbRecord.getTaskName()); + Assert.assertEquals(Long.valueOf(0L), dbRecord.getLastUpdatedTimestamp()); + Assert.assertEquals(Long.valueOf(0L), dbRecord.getLastUpdatedSeqNumber()); + } +} \ No newline at end of file From 460ba7fb14114f44e14a660f533f32c54e504478 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Thu, 23 May 2019 18:38:47 -0400 Subject: [PATCH 0031/1308] YARN-9558. Fixed LogAggregation test cases. Contributed by Prabhu Joseph --- .../hadoop/yarn/client/cli/TestLogsCLI.java | 95 ++++++++++- .../AggregatedLogDeletionService.java | 10 +- .../logaggregation/LogAggregationUtils.java | 49 +----- .../yarn/logaggregation/LogCLIHelpers.java | 156 +++++++++++------- .../tfile/LogAggregationTFileController.java | 8 +- .../tfile/TFileAggregatedLogsBlock.java | 9 +- .../TestAggregatedLogDeletionService.java | 19 ++- .../TestAggregatedLogsBlock.java | 2 +- .../TestContainerLogsUtils.java | 12 +- .../TestLogAggregationService.java | 14 +- 10 files changed, 235 insertions(+), 139 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java index 801cf40d933f1..7a229dc992a18 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java @@ -18,6 +18,10 @@ package org.apache.hadoop.yarn.client.cli; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.LOG_AGGREGATION_FILE_CONTROLLER_FMT; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.LOG_AGGREGATION_REMOTE_APP_LOG_DIR_FMT; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.LOG_AGGREGATION_REMOTE_APP_LOG_DIR_SUFFIX_FMT; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -82,6 +86,7 @@ import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController; import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerContext; import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory; +import org.apache.hadoop.yarn.logaggregation.filecontroller.ifile.LogAggregationIndexedFileController; import org.codehaus.jettison.json.JSONArray; import org.codehaus.jettison.json.JSONObject; import org.junit.Assert; @@ -407,7 +412,7 @@ public void testFetchFinishedApplictionLogs() throws Exception { Path path = new Path(remoteLogRootDir + ugi.getShortUserName() - + "/bucket_logs/0001/application_0_0001"); + + "/bucket-logs-tfile/0001/application_0_0001"); if (fs.exists(path)) { fs.delete(path, true); } @@ -966,8 +971,8 @@ public void testFetchApplictionLogsAsAnotherUser() throws Exception { createContainerLogInLocalDir(appLogsDir, containerId, fs, logTypes); // create the remote app dir for app but for a different user testUser - Path path = new Path(remoteLogRootDir + testUser + "/bucket_logs/0001/" - + appId); + Path path = new Path(remoteLogRootDir + testUser + + "/bucket-logs-tfile/0001/" + appId); if (fs.exists(path)) { fs.delete(path, true); } @@ -1049,7 +1054,7 @@ public void testFetchApplictionLogsAsAnotherUser() throws Exception { System.currentTimeMillis(), 1000); String priorityUser = "priority"; Path pathWithoutPerm = new Path(remoteLogRootDir + priorityUser - + "/bucket_logs/1000/" + appTest); + + "/bucket-logs-tfile/1000/" + appTest); if (fs.exists(pathWithoutPerm)) { fs.delete(pathWithoutPerm, true); } @@ -1139,6 +1144,84 @@ public void testLogsCLIWithInvalidArgs() throws Exception { } } + @Test (timeout = 5000) + public void testGuessAppOwnerWithCustomSuffix() throws Exception { + String remoteLogRootDir = "target/logs/"; + String jobUser = "user1"; + String loggedUser = "user2"; + Configuration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true); + conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogRootDir); + conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); + conf.set(YarnConfiguration.YARN_ADMIN_ACL, "admin"); + String controllerName = "indexed"; + conf.set(YarnConfiguration.LOG_AGGREGATION_FILE_FORMATS, controllerName); + conf.set(String.format(LOG_AGGREGATION_FILE_CONTROLLER_FMT, + controllerName), LogAggregationIndexedFileController.class.getName()); + conf.set(String.format(LOG_AGGREGATION_REMOTE_APP_LOG_DIR_SUFFIX_FMT, + controllerName), controllerName); + + FileSystem fs = FileSystem.get(conf); + try { + // Test New App Log Dir Struture (after YARN-6929) with Custom Suffix + ApplicationId appId1 = ApplicationId.newInstance(0, 1); + Path path = new Path(remoteLogRootDir + jobUser + "/bucket-indexed/0001/" + + appId1); + if (fs.exists(path)) { + fs.delete(path, true); + } + assertTrue(fs.mkdirs(path)); + String appOwner = LogCLIHelpers.getOwnerForAppIdOrNull(appId1, + loggedUser, conf); + assertThat(appOwner).isEqualTo(jobUser); + + // Test Old App Log Dir Struture (before YARN-6929) with Custom Suffix + ApplicationId appId2 = ApplicationId.newInstance(0, 2); + path = new Path(remoteLogRootDir + jobUser + "/indexed/" + appId2); + if (fs.exists(path)) { + fs.delete(path, true); + } + assertTrue(fs.mkdirs(path)); + appOwner = LogCLIHelpers.getOwnerForAppIdOrNull(appId2, loggedUser, conf); + assertThat(appOwner).isEqualTo(jobUser); + } finally { + fs.delete(new Path(remoteLogRootDir), true); + } + } + + @Test (timeout = 5000) + public void testGuessAppOwnerWithCustomAppLogDir() throws Exception { + String remoteLogRootDir = "target/logs/"; + String remoteLogRootDir1 = "target/logs1/"; + String jobUser = "user1"; + String loggedUser = "user2"; + Configuration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true); + conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogRootDir); + String controllerName = "indexed"; + conf.set(YarnConfiguration.LOG_AGGREGATION_FILE_FORMATS, controllerName); + conf.set(String.format(LOG_AGGREGATION_FILE_CONTROLLER_FMT, + controllerName), LogAggregationIndexedFileController.class.getName()); + conf.set(String.format(LOG_AGGREGATION_REMOTE_APP_LOG_DIR_FMT, + controllerName), remoteLogRootDir1); + + FileSystem fs = FileSystem.get(conf); + try { + // Test Custom App Log Dir Structure + ApplicationId appId1 = ApplicationId.newInstance(0, 3); + Path path = new Path(remoteLogRootDir1 + jobUser + + "/bucket-logs-indexed/0003/" + appId1); + if (fs.exists(path)) { + fs.delete(path, true); + } + assertTrue(fs.mkdirs(path)); + String appOwner = LogCLIHelpers.getOwnerForAppIdOrNull(appId1, + loggedUser, conf); + assertThat(appOwner).isEqualTo(jobUser); + } finally { + fs.delete(new Path(remoteLogRootDir1), true); + } + } @Test (timeout = 15000) public void testSaveContainerLogsLocally() throws Exception { @@ -1407,7 +1490,7 @@ public void testFetchApplictionLogsHar() throws Exception { assertNotNull(harUrl); Path path = new Path(remoteLogRootDir + ugi.getShortUserName() - + "/logs/application_1440536969523_0001"); + + "/bucket-logs-tfile/0001/application_1440536969523_0001"); if (fs.exists(path)) { fs.delete(path, true); } @@ -1468,7 +1551,7 @@ private void createContainerLogs(Configuration configuration, } Path path = new Path(remoteLogRootDir + ugi.getShortUserName() - + "/logs/application_0_0001"); + + "/bucket-logs-tfile/0001/application_0_0001"); if (fs.exists(path)) { fs.delete(path, true); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java index 033339acd6492..e0233b31647bb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java @@ -22,6 +22,8 @@ import java.util.Timer; import java.util.TimerTask; +import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory; +import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -68,9 +70,11 @@ public LogDeletionTask(Configuration conf, long retentionSecs, ApplicationClient this.conf = conf; this.retentionMillis = retentionSecs * 1000; this.suffix = LogAggregationUtils.getBucketSuffix(); - this.remoteRootLogDir = - new Path(conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, - YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR)); + LogAggregationFileControllerFactory factory = + new LogAggregationFileControllerFactory(conf); + LogAggregationFileController fileController = + factory.getFileControllerForWrite(); + this.remoteRootLogDir = fileController.getRemoteRootLogDir(); this.rmClient = rmClient; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java index deff2aaa75acf..b51be9af14d2e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java @@ -38,7 +38,7 @@ public class LogAggregationUtils { public static final String TMP_FILE_SUFFIX = ".tmp"; - private static final String BUCKET_SUFFIX = "bucket_"; + private static final String BUCKET_SUFFIX = "bucket-"; /** * Constructs the full filename for an application's log file per node. @@ -173,16 +173,6 @@ public static boolean isOlderPathEnabled(Configuration conf) { YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR_INCLUDE_OLDER); } - /** - * Returns the suffix component of the log dir. - * @param conf the configuration - * @return the suffix which will be appended to the user log dir. - */ - public static String getRemoteNodeLogDirSuffix(Configuration conf) { - return conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, - YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX); - } - /** * Returns the bucket suffix component of the log dir. * @return the bucket suffix which appended to user log dir @@ -207,25 +197,6 @@ public static String getNodeString(String nodeId) { return nodeId.toString().replace(":", "_"); } - /** - * Return the remote application log directory. - * @param conf the configuration - * @param appId the application - * @param appOwner the application owner - * @return the remote application log directory path - * @throws IOException if we can not find remote application log directory - */ - public static org.apache.hadoop.fs.Path getRemoteAppLogDir( - Configuration conf, ApplicationId appId, String appOwner) - throws IOException { - String suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(conf); - org.apache.hadoop.fs.Path remoteRootLogDir = - new org.apache.hadoop.fs.Path(conf.get( - YarnConfiguration.NM_REMOTE_APP_LOG_DIR, - YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR)); - return getRemoteAppLogDir(conf, appId, appOwner, remoteRootLogDir, suffix); - } - /** * Return the remote application log directory. * @param conf the configuration @@ -397,22 +368,4 @@ public static List getRemoteNodeFileList( return nodeFiles; } - /** - * Get all available log files under remote app log directory. - * @param conf the configuration - * @param appId the applicationId - * @param appOwner the application owner - * @return the iterator of available log files - * @throws IOException if there is no log file available - */ - public static RemoteIterator getRemoteNodeFileDir( - Configuration conf, ApplicationId appId, String appOwner) - throws IOException { - String suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(conf); - Path remoteRootLogDir = new Path(conf.get( - YarnConfiguration.NM_REMOTE_APP_LOG_DIR, - YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR)); - return getRemoteNodeFileDir(conf, appId, appOwner, - remoteRootLogDir, suffix); - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java index 8a72d80722ef6..385ad094aa908 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java @@ -68,83 +68,113 @@ public int dumpAContainersLogs(String appId, String containerId, return dumpAContainerLogsForLogType(options, false); } - @Private - @VisibleForTesting - /** - * Return the owner for a given AppId - * @param remoteRootLogDir - * @param appId - * @param bestGuess - * @param conf - * @return the owner or null - * @throws IOException - */ - public static String getOwnerForAppIdOrNull( - ApplicationId appId, String bestGuess, - Configuration conf) throws IOException { - Path remoteRootLogDir = new Path(conf.get( - YarnConfiguration.NM_REMOTE_APP_LOG_DIR, - YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR)); - String suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(conf); - Path fullPath = LogAggregationUtils.getRemoteAppLogDir(remoteRootLogDir, - appId, bestGuess, suffix); + public static String guessOwnerWithFileFormat( + LogAggregationFileController fileFormat, ApplicationId appId, + String bestGuess, Configuration conf) throws IOException { + boolean scanOldPath = LogAggregationUtils.isOlderPathEnabled(conf); + Path remoteRootLogDir = fileFormat.getRemoteRootLogDir(); + String suffix = fileFormat.getRemoteRootLogDirSuffix(); + Path fullPath = fileFormat.getRemoteAppLogDir(appId, bestGuess); FileContext fc = FileContext.getFileContext(remoteRootLogDir.toUri(), conf); String pathAccess = fullPath.toString(); + try { if (fc.util().exists(fullPath)) { return bestGuess; } - boolean scanOldPath = LogAggregationUtils.isOlderPathEnabled(conf); if (scanOldPath) { - Path olderAppPath = LogAggregationUtils.getOlderRemoteAppLogDir(appId, - bestGuess, remoteRootLogDir, suffix); + Path olderAppPath = fileFormat.getOlderRemoteAppLogDir(appId, + bestGuess); + pathAccess = olderAppPath.toString(); if (fc.util().exists(olderAppPath)) { return bestGuess; } } + } catch (AccessControlException | AccessDeniedException ex) { + logDirNoAccessPermission(pathAccess, bestGuess, ex.getMessage()); + throw ex; + } + + try { + Path toMatch = fileFormat.getRemoteAppLogDir(appId, null); + FileStatus[] matching = fc.util().globStatus(toMatch); + if (matching != null && matching.length == 1) { + //fetch user from new path /app-logs/user[/suffix]/bucket/app_id + Path parent = matching[0].getPath().getParent(); + //skip the suffix too + if (suffix != null && !StringUtils.isEmpty(suffix)) { + parent = parent.getParent(); + } + //skip the bucket + parent = parent.getParent(); + return parent.getName(); + } + } catch (IOException e) { + // Ignore IOException thrown from wrong file format + } - Path toMatch = LogAggregationUtils. - getRemoteAppLogDir(remoteRootLogDir, appId, "*", suffix); - - pathAccess = toMatch.toString(); - FileStatus[] matching = fc.util().globStatus(toMatch); - if (matching == null || matching.length != 1) { - if (scanOldPath) { - toMatch = LogAggregationUtils.getOlderRemoteAppLogDir(appId, "*", - remoteRootLogDir, suffix); - try { - matching = fc.util().globStatus(toMatch); - if (matching != null && matching.length == 1) { - //fetch the user from the old path /app-logs/user[/suffix]/app_id - Path parent = matching[0].getPath().getParent(); - //skip the suffix too - if (suffix != null && !StringUtils.isEmpty(suffix)) { - parent = parent.getParent(); - } - return parent.getName(); - } - } catch (IOException e) { - // Ignore IOException from accessing older app log dir + if (scanOldPath) { + try { + Path toMatch = fileFormat.getOlderRemoteAppLogDir(appId, null); + FileStatus[] matching = fc.util().globStatus(toMatch); + if (matching != null && matching.length == 1) { + //fetch user from old path /app-logs/user[/suffix]/app_id + Path parent = matching[0].getPath().getParent(); + //skip the suffix too + if (suffix != null && !StringUtils.isEmpty(suffix)) { + parent = parent.getParent(); } + return parent.getName(); } - return null; + } catch (IOException e) { + // Ignore IOException thrown from wrong file format } - //fetch the user from the full path /app-logs/user[/suffix]/bucket/app_id - Path parent = matching[0].getPath().getParent(); - //skip the suffix too - if (suffix != null && !StringUtils.isEmpty(suffix)) { - parent = parent.getParent(); + } + + return null; + } + + @Private + @VisibleForTesting + /** + * Return the owner for a given AppId + * @param remoteRootLogDir + * @param appId + * @param bestGuess + * @param conf + * @return the owner or null + * @throws IOException + */ + public static String getOwnerForAppIdOrNull( + ApplicationId appId, String bestGuess, + Configuration conf) { + LogAggregationFileControllerFactory factory = + new LogAggregationFileControllerFactory(conf); + List fileControllers = factory + .getConfiguredLogAggregationFileControllerList(); + + if (fileControllers != null && !fileControllers.isEmpty()) { + String owner = null; + for (LogAggregationFileController fileFormat : fileControllers) { + try { + owner = guessOwnerWithFileFormat(fileFormat, appId, bestGuess, conf); + if (owner != null) { + return owner; + } + } catch (IOException e) { + return null; + } } - //skip the bucket - parent = parent.getParent(); - return parent.getName(); - } catch (AccessControlException | AccessDeniedException ex) { - logDirNoAccessPermission(pathAccess, bestGuess, ex.getMessage()); - return null; + } else { + System.err.println("Can not find any valid fileControllers. " + + " The configurated fileControllers: " + + YarnConfiguration.LOG_AGGREGATION_FILE_FORMATS); } + + return null; } @Private @@ -215,7 +245,8 @@ public int dumpAllContainersLogs(ContainerLogsRequest options) } if (!foundAnyLogs) { emptyLogDir(LogAggregationUtils.getRemoteAppLogDir( - conf, options.getAppId(), options.getAppOwner()) + conf, options.getAppId(), options.getAppOwner(), + fc.getRemoteRootLogDir(), fc.getRemoteRootLogDirSuffix()) .toString()); return -1; } @@ -286,12 +317,11 @@ public void printNodesList(ContainerLogsRequest options, appOwner, fileFormat.getRemoteRootLogDir(), fileFormat.getRemoteRootLogDirSuffix()); } catch (FileNotFoundException fnf) { - logDirNotExist(LogAggregationUtils.getRemoteAppLogDir( - conf, appId, appOwner).toString()); + logDirNotExist(fileFormat.getRemoteAppLogDir(appId, + appOwner).toString()); } catch (AccessControlException | AccessDeniedException ace) { - logDirNoAccessPermission(LogAggregationUtils.getRemoteAppLogDir( - conf, appId, appOwner).toString(), appOwner, - ace.getMessage()); + logDirNoAccessPermission(fileFormat.getRemoteAppLogDir(appId, + appOwner).toString(), appOwner, ace.getMessage()); } if (nodeFiles == null) { return; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java index ea539e2dc673c..3fb432d773767 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java @@ -173,7 +173,8 @@ public boolean readAggregatedLogs(ContainerLogsRequest logRequest, || containerIdStr.isEmpty()); long size = logRequest.getBytes(); RemoteIterator nodeFiles = LogAggregationUtils - .getRemoteNodeFileDir(conf, appId, logRequest.getAppOwner()); + .getRemoteNodeFileDir(conf, appId, logRequest.getAppOwner(), + remoteRootLogDir, remoteRootLogDirSuffix); byte[] buf = new byte[65535]; while (nodeFiles != null && nodeFiles.hasNext()) { final FileStatus thisNodeFile = nodeFiles.next(); @@ -267,7 +268,8 @@ public List readAggregatedLogsMeta( String nodeIdStr = (nodeId == null) ? null : LogAggregationUtils.getNodeString(nodeId); RemoteIterator nodeFiles = LogAggregationUtils - .getRemoteNodeFileDir(conf, appId, appOwner); + .getRemoteNodeFileDir(conf, appId, appOwner, + remoteRootLogDir, remoteRootLogDirSuffix); if (nodeFiles == null) { throw new IOException("There is no available log file for " + "application:" + appId); @@ -331,7 +333,7 @@ public List readAggregatedLogsMeta( @Override public void renderAggregatedLogsBlock(Block html, ViewContext context) { TFileAggregatedLogsBlock block = new TFileAggregatedLogsBlock( - context, conf); + context, conf, remoteRootLogDir, remoteRootLogDirSuffix); block.render(html); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/TFileAggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/TFileAggregatedLogsBlock.java index 6fb5b90bd8b1a..2c2604b1390a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/TFileAggregatedLogsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/TFileAggregatedLogsBlock.java @@ -48,11 +48,16 @@ public class TFileAggregatedLogsBlock extends LogAggregationHtmlBlock { private final Configuration conf; + private Path remoteRootLogDir; + private String remoteRootLogDirSuffix; @Inject - public TFileAggregatedLogsBlock(ViewContext ctx, Configuration conf) { + public TFileAggregatedLogsBlock(ViewContext ctx, Configuration conf, + Path remoteRootLogDir, String remoteRootLogDirSuffix) { super(ctx); this.conf = conf; + this.remoteRootLogDir = remoteRootLogDir; + this.remoteRootLogDirSuffix = remoteRootLogDirSuffix; } @Override @@ -67,7 +72,7 @@ protected void render(Block html) { try { nodeFiles = LogAggregationUtils .getRemoteNodeFileDir(conf, params.getAppId(), - params.getAppOwner()); + params.getAppOwner(), remoteRootLogDir, remoteRootLogDirSuffix); } catch (RuntimeException e) { throw e; } catch (Exception ex) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java index f6855d6977ece..daa2fc6b01cbc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.logaggregation; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.LOG_AGGREGATION_FILE_CONTROLLER_FMT; + import java.io.IOException; import java.net.URI; import java.util.Arrays; @@ -37,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.logaggregation.filecontroller.tfile.LogAggregationTFileController; import org.junit.Before; import org.junit.Test; import org.junit.Assert; @@ -56,7 +59,6 @@ public void testDeletion() throws Exception { long now = System.currentTimeMillis(); long toDeleteTime = now - (2000*1000); long toKeepTime = now - (1500*1000); - String root = "mockfs://foo/"; String remoteRootLogDir = root+"tmp/logs"; String suffix = "logs"; @@ -67,6 +69,10 @@ public void testDeletion() throws Exception { conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, "1800"); conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteRootLogDir); conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, suffix); + conf.set(YarnConfiguration.LOG_AGGREGATION_FILE_FORMATS, "TFile"); + conf.set(String.format(LOG_AGGREGATION_FILE_CONTROLLER_FMT, "TFile"), + LogAggregationTFileController.class.getName()); + Path rootPath = new Path(root); FileSystem rootFs = rootPath.getFileSystem(conf); @@ -211,6 +217,10 @@ public void testRefreshLogRetentionSettings() throws Exception { "1"); conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteRootLogDir); conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, suffix); + conf.set(YarnConfiguration.LOG_AGGREGATION_FILE_FORMATS, "TFile"); + conf.set(String.format(LOG_AGGREGATION_FILE_CONTROLLER_FMT, "TFile"), + LogAggregationTFileController.class.getName()); + Path rootPath = new Path(root); FileSystem rootFs = rootPath.getFileSystem(conf); @@ -335,6 +345,10 @@ public void testCheckInterval() throws Exception { conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS, "1"); conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteRootLogDir); conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, suffix); + conf.set(YarnConfiguration.LOG_AGGREGATION_FILE_FORMATS, "TFile"); + conf.set(String.format(LOG_AGGREGATION_FILE_CONTROLLER_FMT, "TFile"), + LogAggregationTFileController.class.getName()); + // prevent us from picking up the same mockfs instance from another test FileSystem.closeAll(); @@ -437,6 +451,9 @@ public void testRobustLogDeletion() throws Exception { "1"); conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteRootLogDir); conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, suffix); + conf.set(YarnConfiguration.LOG_AGGREGATION_FILE_FORMATS, "TFile"); + conf.set(String.format(LOG_AGGREGATION_FILE_CONTROLLER_FMT, "TFile"), + LogAggregationTFileController.class.getName()); // prevent us from picking up the same mockfs instance from another test FileSystem.closeAll(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java index bee34e07354f5..1135f9e7e8e35 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java @@ -378,7 +378,7 @@ private static class TFileAggregatedLogsBlockForTest @Inject TFileAggregatedLogsBlockForTest(ViewContext ctx, Configuration conf) { - super(ctx, conf); + super(ctx, conf, new Path("target/logs"), "logs"); } public void render(Block html) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestContainerLogsUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestContainerLogsUtils.java index 231e0e21ab4c6..e1f1914d3ab34 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestContainerLogsUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestContainerLogsUtils.java @@ -35,7 +35,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; -import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController; import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerContext; import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory; @@ -87,9 +86,14 @@ public static void createContainerLogFileInRemoteFS(Configuration conf, createContainerLogInLocalDir(appLogsDir, containerId, fs, fileName, content); // upload container logs to remote log dir - Path path = LogAggregationUtils.getRemoteAppLogDir( - new Path(conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR)), - appId, user, "logs"); + + LogAggregationFileControllerFactory factory = + new LogAggregationFileControllerFactory(conf); + LogAggregationFileController fileController = + factory.getFileControllerForWrite(); + + Path path = fileController.getRemoteAppLogDir(appId, user); + if (fs.exists(path) && deleteRemoteLogDir) { fs.delete(path, true); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java index adcec8db36f86..484cad13f885c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java @@ -695,13 +695,12 @@ public void testRemoteRootLogDirIsCreatedWithCorrectGroupOwner() @Test public void testAppLogDirCreation() throws Exception { - final String logSuffix = "bucket_logs"; - final String inputSuffix = "logs"; + final String inputSuffix = "logs-tfile"; this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath()); - this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, inputSuffix); + this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, "logs"); InlineDispatcher dispatcher = new InlineDispatcher(); dispatcher.init(this.conf); @@ -734,10 +733,10 @@ public LogAggregationFileController getLogAggregationFileController( ApplicationId appId = BuilderUtils.newApplicationId(1, 1); Path userDir = fs.makeQualified(new Path( remoteRootLogDir.getAbsolutePath(), this.user)); - Path suffixDir = new Path(userDir, logSuffix); Path bucketDir = fs.makeQualified(LogAggregationUtils.getRemoteBucketDir( new Path(remoteRootLogDir.getAbsolutePath()), this.user, inputSuffix, appId)); + Path suffixDir = bucketDir.getParent(); Path appDir = fs.makeQualified(LogAggregationUtils.getRemoteAppLogDir( new Path(remoteRootLogDir.getAbsolutePath()), appId, this.user, inputSuffix)); @@ -775,13 +774,12 @@ public LogAggregationFileController getLogAggregationFileController( // Verify we do not create bucket dir again ApplicationId appId4 = BuilderUtils.newApplicationId(2, 10003); - Path bucketDir4 = fs.makeQualified(LogAggregationUtils.getRemoteBucketDir( - new Path(remoteRootLogDir.getAbsolutePath()), - this.user, logSuffix, appId4)); - new File(bucketDir4.toUri().getPath()).mkdir(); Path appDir4 = fs.makeQualified(LogAggregationUtils.getRemoteAppLogDir( new Path(remoteRootLogDir.getAbsolutePath()), appId4, this.user, inputSuffix)); + Path bucketDir4 = appDir4.getParent(); + new File(bucketDir4.toUri().getPath()).mkdir(); + aggSvc.handle(new LogHandlerAppStartedEvent(appId4, this.user, null, this.acls, contextWithAllContainers)); verify(spyFs, never()).mkdirs(eq(bucketDir4), isA(FsPermission.class)); From 93d7630062247793a4860704fad4a31437693de1 Mon Sep 17 00:00:00 2001 From: Shweta Date: Fri, 24 May 2019 18:41:45 +0200 Subject: [PATCH 0032/1308] HDFS-14451. Incorrect header or version mismatch log message. Contributed by Shweta. Signed-off-by: Wei-Chiu Chuang --- .../main/java/org/apache/hadoop/ipc/Server.java | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 91cc4a60265bb..8229b5aee48b8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -2233,11 +2233,17 @@ public int readAndProcess() throws IOException, InterruptedException { setupHttpRequestOnIpcPortResponse(); return -1; } - - if (!RpcConstants.HEADER.equals(dataLengthBuffer) - || version != CURRENT_VERSION) { + + if(!RpcConstants.HEADER.equals(dataLengthBuffer)) { + LOG.warn("Incorrect RPC Header length from {}:{} " + + "expected length: {} got length: {}", + hostAddress, remotePort, RpcConstants.HEADER, dataLengthBuffer); + setupBadVersionResponse(version); + return -1; + } + if (version != CURRENT_VERSION) { //Warning is ok since this is not supposed to happen. - LOG.warn("Incorrect header or version mismatch from " + + LOG.warn("Version mismatch from " + hostAddress + ":" + remotePort + " got version " + version + " expected version " + CURRENT_VERSION); From c9393dd17f25ee62ceac0c66b96ce26b2789cc8a Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Fri, 24 May 2019 10:23:47 -0700 Subject: [PATCH 0033/1308] Revert "HDFS-14353. Erasure Coding: metrics xmitsInProgress become to negative. Contributed by maobaolong." This reverts commit 1cb2eb0df30d4fbaa090c68022833063f3d225cc. --- .../server/datanode/erasurecode/ErasureCodingWorker.java | 4 ---- .../datanode/erasurecode/StripedBlockReconstructor.java | 6 +----- .../server/datanode/erasurecode/StripedReconstructor.java | 4 ---- .../org/apache/hadoop/hdfs/TestReconstructStripedFile.java | 6 ------ 4 files changed, 1 insertion(+), 19 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java index f4506cf470719..f9063b7a8929f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java @@ -170,8 +170,4 @@ public void shutDown() { stripedReconstructionPool.shutdown(); stripedReadPool.shutdown(); } - - public float getXmitWeight() { - return xmitWeight; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java index 1af2380886ac3..29c0078e95710 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java @@ -67,11 +67,7 @@ public void run() { LOG.warn("Failed to reconstruct striped block: {}", getBlockGroup(), e); getDatanode().getMetrics().incrECFailedReconstructionTasks(); } finally { - float xmitWeight = getErasureCodingWorker().getXmitWeight(); - // if the xmits is smaller than 1, the xmitsSubmitted should be set to 1 - // because if it set to zero, we cannot to measure the xmits submitted - int xmitsSubmitted = Math.max((int) (getXmits() * xmitWeight), 1); - getDatanode().decrementXmitsInProgress(xmitsSubmitted); + getDatanode().decrementXmitsInProgress(getXmits()); final DataNodeMetrics metrics = getDatanode().getMetrics(); metrics.incrECReconstructionTasks(); metrics.incrECReconstructionBytesRead(getBytesRead()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java index 4c8be827f4354..a1f4c7ff55e37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java @@ -275,8 +275,4 @@ Configuration getConf() { DataNode getDatanode() { return datanode; } - - public ErasureCodingWorker getErasureCodingWorker() { - return erasureCodingWorker; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index 0b490b5c3bcc6..2abfff7876c13 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -514,8 +514,6 @@ private void testNNSendsErasureCodingTasks(int deadDN) throws Exception { @Test(timeout = 180000) public void testErasureCodingWorkerXmitsWeight() throws Exception { - testErasureCodingWorkerXmitsWeight(0.5f, - (int) (ecPolicy.getNumDataUnits() * 0.5f)); testErasureCodingWorkerXmitsWeight(1f, ecPolicy.getNumDataUnits()); testErasureCodingWorkerXmitsWeight(0f, 1); testErasureCodingWorkerXmitsWeight(10f, 10 * ecPolicy.getNumDataUnits()); @@ -569,10 +567,6 @@ public void stripedBlockReconstruction() throws IOException { } finally { barrier.await(); DataNodeFaultInjector.set(oldInjector); - for (final DataNode curDn : cluster.getDataNodes()) { - GenericTestUtils.waitFor(() -> curDn.getXceiverCount() > 1, 10, 60000); - assertEquals(0, curDn.getXmitsInProgress()); - } } } } From 6d0e79c1216f342b4463131f6f997fea8e3ce247 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Fri, 24 May 2019 10:50:54 -0700 Subject: [PATCH 0034/1308] HDDS-1512. Implement DoubleBuffer in OzoneManager. (#810) --- .../om/ratis/OzoneManagerDoubleBuffer.java | 212 +++++++++ .../om/ratis/helpers/DoubleBufferEntry.java | 44 ++ .../ozone/om/ratis/helpers/package-info.java | 20 + .../om/response/OMBucketCreateResponse.java | 52 +++ .../om/response/OMBucketDeleteResponse.java | 57 +++ .../ozone/om/response/OMClientResponse.java | 45 ++ .../om/response/OMVolumeCreateResponse.java | 64 +++ .../om/response/OMVolumeDeleteResponse.java | 59 +++ .../ozone/om/response/package-info.java | 24 + ...eManagerDoubleBufferWithDummyResponse.java | 130 ++++++ ...zoneManagerDoubleBufferWithOMResponse.java | 409 ++++++++++++++++++ 11 files changed, 1116 insertions(+) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/DoubleBufferEntry.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/package-info.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketCreateResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketDeleteResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeCreateResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeDeleteResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java new file mode 100644 index 0000000000000..a68b94a9813c1 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -0,0 +1,212 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.ratis; + +import java.io.IOException; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.ratis.helpers.DoubleBufferEntry; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.utils.db.BatchOperation; + +import org.apache.ratis.util.ExitUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class implements DoubleBuffer implementation of OMClientResponse's. In + * DoubleBuffer it has 2 buffers one is currentBuffer and other is + * readyBuffer. The current OM requests will be always added to currentBuffer. + * Flush thread will be running in background, it check's if currentBuffer has + * any entries, it swaps the buffer and creates a batch and commit to DB. + * Adding OM request to doubleBuffer and swap of buffer are synchronized + * methods. + * + */ +public class OzoneManagerDoubleBuffer { + + private static final Logger LOG = + LoggerFactory.getLogger(OzoneManagerDoubleBuffer.class.getName()); + + // Taken unbounded queue, if sync thread is taking too long time, we + // might end up taking huge memory to add entries to the buffer. + // TODO: We can avoid this using unbounded queue and use queue with + // capacity, if queue is full we can wait for sync to be completed to + // add entries. But in this also we might block rpc handlers, as we + // clear entries after sync. Or we can come up with a good approach to + // solve this. + private Queue> currentBuffer; + private Queue> readyBuffer; + + private Daemon daemon; + private final OMMetadataManager omMetadataManager; + private final AtomicLong flushedTransactionCount = new AtomicLong(0); + private final AtomicLong flushIterations = new AtomicLong(0); + private volatile boolean isRunning; + + + public OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager) { + this.currentBuffer = new ConcurrentLinkedQueue<>(); + this.readyBuffer = new ConcurrentLinkedQueue<>(); + this.omMetadataManager = omMetadataManager; + + isRunning = true; + // Daemon thread which runs in back ground and flushes transactions to DB. + daemon = new Daemon(this::flushTransactions); + daemon.setName("OMDoubleBufferFlushThread"); + daemon.start(); + + } + + /** + * Runs in a background thread and batches the transaction in currentBuffer + * and commit to DB. + */ + private void flushTransactions() { + while(isRunning) { + try { + if (canFlush()) { + setReadyBuffer(); + final BatchOperation batchOperation = omMetadataManager.getStore() + .initBatchOperation(); + + readyBuffer.iterator().forEachRemaining((entry) -> { + try { + entry.getResponse().addToDBBatch(omMetadataManager, + batchOperation); + } catch (IOException ex) { + // During Adding to RocksDB batch entry got an exception. + // We should terminate the OM. + terminate(ex); + } + }); + + omMetadataManager.getStore().commitBatchOperation(batchOperation); + int flushedTransactionsSize = readyBuffer.size(); + flushedTransactionCount.addAndGet(flushedTransactionsSize); + flushIterations.incrementAndGet(); + + LOG.debug("Sync Iteration {} flushed transactions in this " + + "iteration{}", flushIterations.get(), + flushedTransactionsSize); + readyBuffer.clear(); + // TODO: update the last updated index in OzoneManagerStateMachine. + } + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + if (isRunning) { + final String message = "OMDoubleBuffer flush thread " + + Thread.currentThread().getName() + " encountered Interrupted " + + "exception while running"; + ExitUtils.terminate(1, message, ex, LOG); + } else { + LOG.info("OMDoubleBuffer flush thread " + + Thread.currentThread().getName() + " is interrupted and will " + + "exit. {}", Thread.currentThread().getName()); + } + } catch (IOException ex) { + terminate(ex); + } catch (Throwable t) { + final String s = "OMDoubleBuffer flush thread" + + Thread.currentThread().getName() + "encountered Throwable error"; + ExitUtils.terminate(2, s, t, LOG); + } + } + } + + /** + * Stop OM DoubleBuffer flush thread. + */ + public synchronized void stop() { + if (isRunning) { + LOG.info("Stopping OMDoubleBuffer flush thread"); + isRunning = false; + daemon.interrupt(); + } else { + LOG.info("OMDoubleBuffer flush thread is not running."); + } + + } + + private void terminate(IOException ex) { + String message = "During flush to DB encountered error in " + + "OMDoubleBuffer flush thread " + Thread.currentThread().getName(); + ExitUtils.terminate(1, message, ex, LOG); + } + + /** + * Returns the flushed transaction count to OM DB. + * @return flushedTransactionCount + */ + public long getFlushedTransactionCount() { + return flushedTransactionCount.get(); + } + + /** + * Returns total number of flush iterations run by sync thread. + * @return flushIterations + */ + public long getFlushIterations() { + return flushIterations.get(); + } + + /** + * Add OmResponseBufferEntry to buffer. + * @param response + * @param transactionIndex + */ + public synchronized void add(OMClientResponse response, + long transactionIndex) { + currentBuffer.add(new DoubleBufferEntry<>(transactionIndex, response)); + notify(); + } + + /** + * Check can we flush transactions or not. This method wait's until + * currentBuffer size is greater than zero, once currentBuffer size is + * greater than zero it gets notify signal, and it returns true + * indicating that we are ready to flush. + * + * @return boolean + */ + private synchronized boolean canFlush() throws InterruptedException { + // When transactions are added to buffer it notifies, then we check if + // currentBuffer size once and return from this method. + while (currentBuffer.size() == 0) { + wait(Long.MAX_VALUE); + } + return true; + } + + /** + * Prepares the readyBuffer which is used by sync thread to flush + * transactions to OM DB. This method swaps the currentBuffer and readyBuffer. + */ + private synchronized void setReadyBuffer() { + Queue> temp = currentBuffer; + currentBuffer = readyBuffer; + readyBuffer = temp; + } + +} + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/DoubleBufferEntry.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/DoubleBufferEntry.java new file mode 100644 index 0000000000000..cd4c5ae8b25e5 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/DoubleBufferEntry.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.ratis.helpers; + +import org.apache.hadoop.ozone.om.response.OMClientResponse; + +/** + * Entry in OzoneManagerDouble Buffer. + * @param + */ +public class DoubleBufferEntry { + + private long trxLogIndex; + private Response response; + + public DoubleBufferEntry(long trxLogIndex, Response response) { + this.trxLogIndex = trxLogIndex; + this.response = response; + } + + public long getTrxLogIndex() { + return trxLogIndex; + } + + public Response getResponse() { + return response; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/package-info.java new file mode 100644 index 0000000000000..b12a324d681c0 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/package-info.java @@ -0,0 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +/** + * package which contains helper classes for each OM request response. + */ +package org.apache.hadoop.ozone.om.ratis.helpers; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketCreateResponse.java new file mode 100644 index 0000000000000..7e222edb8ff74 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketCreateResponse.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response; + +import java.io.IOException; + +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.utils.db.BatchOperation; + +/** + * Response for CreateBucket request. + */ +public final class OMBucketCreateResponse implements OMClientResponse { + + private final OmBucketInfo omBucketInfo; + + public OMBucketCreateResponse(OmBucketInfo omBucketInfo) { + this.omBucketInfo = omBucketInfo; + } + + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + String dbBucketKey = + omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), + omBucketInfo.getBucketName()); + omMetadataManager.getBucketTable().putWithBatch(batchOperation, dbBucketKey, + omBucketInfo); + } + + public OmBucketInfo getOmBucketInfo() { + return omBucketInfo; + } +} + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketDeleteResponse.java new file mode 100644 index 0000000000000..fd3842db7e6ba --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketDeleteResponse.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response; + +import java.io.IOException; + +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.utils.db.BatchOperation; + +/** + * Response for DeleteBucket request. + */ +public final class OMBucketDeleteResponse implements OMClientResponse { + + private String volumeName; + private String bucketName; + + public OMBucketDeleteResponse( + String volumeName, String bucketName) { + this.volumeName = volumeName; + this.bucketName = bucketName; + } + + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + String dbBucketKey = + omMetadataManager.getBucketKey(volumeName, bucketName); + omMetadataManager.getBucketTable().deleteWithBatch(batchOperation, + dbBucketKey); + } + + public String getVolumeName() { + return volumeName; + } + + public String getBucketName() { + return bucketName; + } +} + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java new file mode 100644 index 0000000000000..2603421a59a7f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response; + +import org.apache.commons.lang.NotImplementedException; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.utils.db.BatchOperation; + +import java.io.IOException; + +/** + * Interface for OM Responses, each OM response should implement this interface. + */ +public interface OMClientResponse { + + /** + * Implement logic to add the response to batch. + * @param omMetadataManager + * @param batchOperation + * @throws IOException + */ + default void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + throw new NotImplementedException("Not implemented, Each OM Response " + + "should implement this method"); + } + +} + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeCreateResponse.java new file mode 100644 index 0000000000000..857f03aa01abf --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeCreateResponse.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response; + +import java.io.IOException; + +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.utils.db.BatchOperation; + +/** + * Response for CreateBucket request. + */ +public class OMVolumeCreateResponse implements OMClientResponse { + + private OzoneManagerProtocolProtos.VolumeList volumeList; + private OmVolumeArgs omVolumeArgs; + + public OMVolumeCreateResponse(OmVolumeArgs omVolumeArgs, + OzoneManagerProtocolProtos.VolumeList volumeList) { + this.omVolumeArgs = omVolumeArgs; + this.volumeList = volumeList; + } + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + + String dbVolumeKey = + omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()); + String dbUserKey = + omMetadataManager.getUserKey(omVolumeArgs.getOwnerName()); + + omMetadataManager.getVolumeTable().putWithBatch(batchOperation, dbVolumeKey, + omVolumeArgs); + omMetadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey, + volumeList); + } + + public OzoneManagerProtocolProtos.VolumeList getVolumeList() { + return volumeList; + } + + public OmVolumeArgs getOmVolumeArgs() { + return omVolumeArgs; + } +} + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeDeleteResponse.java new file mode 100644 index 0000000000000..02663cb8887fc --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeDeleteResponse.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response; + +import java.io.IOException; + +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.utils.db.BatchOperation; + +/** + * Response for CreateVolume request. + */ +public class OMVolumeDeleteResponse implements OMClientResponse { + private String volume; + private String owner; + private OzoneManagerProtocolProtos.VolumeList updatedVolumeList; + + public OMVolumeDeleteResponse(String volume, String owner, + OzoneManagerProtocolProtos.VolumeList updatedVolumeList) { + this.volume = volume; + this.owner = owner; + this.updatedVolumeList = updatedVolumeList; + } + + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + String dbUserKey = omMetadataManager.getUserKey(owner); + OzoneManagerProtocolProtos.VolumeList volumeList = + updatedVolumeList; + if (updatedVolumeList.getVolumeNamesList().size() == 0) { + omMetadataManager.getUserTable().deleteWithBatch(batchOperation, + dbUserKey); + } else { + omMetadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey, + volumeList); + } + omMetadataManager.getVolumeTable().deleteWithBatch(batchOperation, + omMetadataManager.getVolumeKey(volume)); + } +} + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java new file mode 100644 index 0000000000000..d66cac7c021c2 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response; + + +/** + * This package contains classes for the OM Responses. + */ \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java new file mode 100644 index 0000000000000..a0162e8aa81eb --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -0,0 +1,130 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.ratis; + +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.utils.db.BatchOperation; + +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; + +/** + * This class tests OzoneManagerDoubleBuffer implementation with + * dummy response class. + */ +public class TestOzoneManagerDoubleBufferWithDummyResponse { + + private OMMetadataManager omMetadataManager; + private OzoneManagerDoubleBuffer doubleBuffer; + private AtomicLong trxId = new AtomicLong(0); + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + @Before + public void setup() throws IOException { + OzoneConfiguration configuration = new OzoneConfiguration(); + configuration.set(OZONE_METADATA_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = + new OmMetadataManagerImpl(configuration); + doubleBuffer = new OzoneManagerDoubleBuffer(omMetadataManager); + } + + @After + public void stop() { + doubleBuffer.stop(); + } + + /** + * This tests add's 100 bucket creation responses to doubleBuffer, and + * check OM DB bucket table has 100 entries or not. In addition checks + * flushed transaction count is matching with expected count or not. + * @throws Exception + */ + @Test(timeout = 300_000) + public void testDoubleBufferWithDummyResponse() throws Exception { + String volumeName = UUID.randomUUID().toString(); + int bucketCount = 100; + for (int i=0; i < bucketCount; i++) { + doubleBuffer.add(createDummyBucketResponse(volumeName, + UUID.randomUUID().toString()), trxId.incrementAndGet()); + } + GenericTestUtils.waitFor(() -> + doubleBuffer.getFlushedTransactionCount() == bucketCount, 100, + 60000); + Assert.assertTrue(omMetadataManager.countRowsInTable( + omMetadataManager.getBucketTable()) == (bucketCount)); + Assert.assertTrue(doubleBuffer.getFlushIterations() > 0); + } + + /** + * Create DummyBucketCreate response. + * @param volumeName + * @param bucketName + * @return OMDummyCreateBucketResponse + */ + private OMDummyCreateBucketResponse createDummyBucketResponse( + String volumeName, String bucketName) { + OmBucketInfo omBucketInfo = + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName).setCreationTime(Time.now()).build(); + return new OMDummyCreateBucketResponse(omBucketInfo); + } + + + /** + * DummyCreatedBucket Response class used in testing. + */ + public static class OMDummyCreateBucketResponse implements OMClientResponse { + private final OmBucketInfo omBucketInfo; + + public OMDummyCreateBucketResponse(OmBucketInfo omBucketInfo) { + this.omBucketInfo = omBucketInfo; + } + + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + String dbBucketKey = + omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), + omBucketInfo.getBucketName()); + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + dbBucketKey, omBucketInfo); + } + + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java new file mode 100644 index 0000000000000..1926b65629753 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -0,0 +1,409 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.ratis; + +import java.io.IOException; +import java.util.Queue; +import java.util.UUID; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicLong; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.response.OMVolumeCreateResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList; +import org.apache.hadoop.ozone.om.response.OMBucketCreateResponse; +import org.apache.hadoop.ozone.om.response.OMBucketDeleteResponse; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.Time; + +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; +import static org.junit.Assert.fail; + +/** + * This class tests OzoneManagerDouble Buffer with actual OMResponse classes. + */ +public class TestOzoneManagerDoubleBufferWithOMResponse { + + private OMMetadataManager omMetadataManager; + private OzoneManagerDoubleBuffer doubleBuffer; + private AtomicLong trxId = new AtomicLong(0); + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + @Before + public void setup() throws IOException { + OzoneConfiguration configuration = new OzoneConfiguration(); + configuration.set(OZONE_METADATA_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = + new OmMetadataManagerImpl(configuration); + doubleBuffer = new OzoneManagerDoubleBuffer(omMetadataManager); + } + + @After + public void stop() { + doubleBuffer.stop(); + } + + /** + * This tests OzoneManagerDoubleBuffer implementation. It calls + * testDoubleBuffer with number of iterations to do transactions and + * number of buckets to be created in each iteration. It then + * verifies OM DB entries count is matching with total number of + * transactions or not. + * @throws Exception + */ + @Test(timeout = 300_000) + public void testDoubleBuffer() throws Exception { + // This test checks whether count in tables are correct or not. + testDoubleBuffer(1, 10); + testDoubleBuffer(10, 100); + testDoubleBuffer(100, 100); + testDoubleBuffer(1000, 1000); + } + + /** + * This test first creates a volume, and then does a mix of transactions + * like create/delete buckets and add them to double buffer. Then it + * verifies OM DB entries are matching with actual responses added to + * double buffer or not. + * @throws Exception + */ + @Test + public void testDoubleBufferWithMixOfTransactions() throws Exception { + // This test checks count, data in table is correct or not. + Queue< OMBucketCreateResponse > bucketQueue = + new ConcurrentLinkedQueue<>(); + Queue< OMBucketDeleteResponse > deleteBucketQueue = + new ConcurrentLinkedQueue<>(); + + String volumeName = UUID.randomUUID().toString(); + OMVolumeCreateResponse omVolumeCreateResponse = createVolume(volumeName); + doubleBuffer.add(omVolumeCreateResponse, trxId.incrementAndGet()); + + + int bucketCount = 10; + + doMixTransactions(volumeName, 10, deleteBucketQueue, bucketQueue); + + // As for every 2 transactions of create bucket we add deleted bucket. + final int deleteCount = 5; + + // We are doing +1 for volume transaction. + GenericTestUtils.waitFor(() -> + doubleBuffer.getFlushedTransactionCount() == + (bucketCount + deleteCount + 1), 100, 120000); + + Assert.assertTrue(omMetadataManager.countRowsInTable( + omMetadataManager.getVolumeTable()) == 1); + + Assert.assertTrue(omMetadataManager.countRowsInTable( + omMetadataManager.getBucketTable()) == 5); + + // Now after this in our DB we should have 5 buckets and one volume + + checkVolume(volumeName, omVolumeCreateResponse); + + checkCreateBuckets(bucketQueue); + + checkDeletedBuckets(deleteBucketQueue); + } + + /** + * This test first creates a volume, and then does a mix of transactions + * like create/delete buckets in parallel and add to double buffer. Then it + * verifies OM DB entries are matching with actual responses added to + * double buffer or not. + * @throws Exception + */ + @Test + public void testDoubleBufferWithMixOfTransactionsParallel() throws Exception { + // This test checks count, data in table is correct or not. + + Queue< OMBucketCreateResponse > bucketQueue = + new ConcurrentLinkedQueue<>(); + Queue< OMBucketDeleteResponse > deleteBucketQueue = + new ConcurrentLinkedQueue<>(); + + String volumeName1 = UUID.randomUUID().toString(); + OMVolumeCreateResponse omVolumeCreateResponse1 = + createVolume(volumeName1); + + String volumeName2 = UUID.randomUUID().toString(); + OMVolumeCreateResponse omVolumeCreateResponse2 = + createVolume(volumeName2); + + doubleBuffer.add(omVolumeCreateResponse1, trxId.incrementAndGet()); + + doubleBuffer.add(omVolumeCreateResponse2, trxId.incrementAndGet()); + + Daemon daemon1 = new Daemon(() -> doMixTransactions(volumeName1, 10, + deleteBucketQueue, bucketQueue)); + Daemon daemon2 = new Daemon(() -> doMixTransactions(volumeName2, 10, + deleteBucketQueue, bucketQueue)); + + daemon1.start(); + daemon2.start(); + + int bucketCount = 20; + + // As for every 2 transactions of create bucket we add deleted bucket. + final int deleteCount = 10; + + // We are doing +1 for volume transaction. + GenericTestUtils.waitFor(() -> doubleBuffer.getFlushedTransactionCount() + == (bucketCount + deleteCount + 2), 100, 120000); + + Assert.assertTrue(omMetadataManager.countRowsInTable( + omMetadataManager.getVolumeTable()) == 2); + + Assert.assertTrue(omMetadataManager.countRowsInTable( + omMetadataManager.getBucketTable()) == 10); + + // Now after this in our DB we should have 5 buckets and one volume + + + checkVolume(volumeName1, omVolumeCreateResponse1); + checkVolume(volumeName2, omVolumeCreateResponse2); + + checkCreateBuckets(bucketQueue); + + checkDeletedBuckets(deleteBucketQueue); + } + + /** + * This method add's a mix of createBucket/DeleteBucket responses to double + * buffer. Total number of responses added is specified by bucketCount. + * @param volumeName + * @param bucketCount + * @param deleteBucketQueue + * @param bucketQueue + */ + private void doMixTransactions(String volumeName, int bucketCount, + Queue deleteBucketQueue, + Queue bucketQueue) { + for (int i=0; i < bucketCount; i++) { + String bucketName = UUID.randomUUID().toString(); + OMBucketCreateResponse omBucketCreateResponse = createBucket(volumeName, + bucketName); + doubleBuffer.add(omBucketCreateResponse, trxId.incrementAndGet()); + // For every 2 transactions have a deleted bucket. + if (i % 2 == 0) { + OMBucketDeleteResponse omBucketDeleteResponse = + deleteBucket(volumeName, bucketName); + doubleBuffer.add(omBucketDeleteResponse, trxId.incrementAndGet()); + deleteBucketQueue.add(omBucketDeleteResponse); + } else { + bucketQueue.add(omBucketCreateResponse); + } + } + } + + /** + * Verifies volume table data is matching with actual response added to + * double buffer. + * @param volumeName + * @param omVolumeCreateResponse + * @throws Exception + */ + private void checkVolume(String volumeName, + OMVolumeCreateResponse omVolumeCreateResponse) throws Exception { + OmVolumeArgs tableVolumeArgs = omMetadataManager.getVolumeTable().get( + omMetadataManager.getVolumeKey(volumeName)); + Assert.assertTrue(tableVolumeArgs != null); + + OmVolumeArgs omVolumeArgs = omVolumeCreateResponse.getOmVolumeArgs(); + + Assert.assertEquals(omVolumeArgs.getVolume(), tableVolumeArgs.getVolume()); + Assert.assertEquals(omVolumeArgs.getAdminName(), + tableVolumeArgs.getAdminName()); + Assert.assertEquals(omVolumeArgs.getOwnerName(), + tableVolumeArgs.getOwnerName()); + Assert.assertEquals(omVolumeArgs.getCreationTime(), + tableVolumeArgs.getCreationTime()); + } + + /** + * Verifies bucket table data is matching with actual response added to + * double buffer. + * @param bucketQueue + */ + private void checkCreateBuckets(Queue bucketQueue) { + bucketQueue.forEach((omBucketCreateResponse) -> { + OmBucketInfo omBucketInfo = omBucketCreateResponse.getOmBucketInfo(); + String bucket = omBucketInfo.getBucketName(); + OmBucketInfo tableBucketInfo = null; + try { + tableBucketInfo = + omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), + bucket)); + } catch (IOException ex) { + fail("testDoubleBufferWithMixOfTransactions failed"); + } + Assert.assertNotNull(tableBucketInfo); + + Assert.assertEquals(omBucketInfo.getVolumeName(), + tableBucketInfo.getVolumeName()); + Assert.assertEquals(omBucketInfo.getBucketName(), + tableBucketInfo.getBucketName()); + Assert.assertEquals(omBucketInfo.getCreationTime(), + tableBucketInfo.getCreationTime()); + }); + } + + /** + * Verifies deleted bucket responses added to double buffer are actually + * removed from the OM DB or not. + * @param deleteBucketQueue + */ + private void checkDeletedBuckets(Queue + deleteBucketQueue) { + deleteBucketQueue.forEach((omBucketDeleteResponse -> { + try { + Assert.assertNull(omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey( + omBucketDeleteResponse.getVolumeName(), + omBucketDeleteResponse.getBucketName()))); + } catch (IOException ex) { + fail("testDoubleBufferWithMixOfTransactions failed"); + } + })); + } + + /** + * Create bucketCount number of createBucket responses for each iteration. + * All these iterations are run in parallel. Then verify OM DB has correct + * number of entries or not. + * @param iterations + * @param bucketCount + * @throws Exception + */ + public void testDoubleBuffer(int iterations, int bucketCount) + throws Exception { + try { + // Calling setup and stop here because this method is called from a + // single test multiple times. + setup(); + for (int i = 0; i < iterations; i++) { + Daemon d1 = new Daemon(() -> + doTransactions(UUID.randomUUID().toString(), bucketCount)); + d1.start(); + } + + // We are doing +1 for volume transaction. + GenericTestUtils.waitFor(() -> + doubleBuffer.getFlushedTransactionCount() == + (bucketCount + 1) * iterations, 100, + 120000); + + Assert.assertTrue(omMetadataManager.countRowsInTable( + omMetadataManager.getVolumeTable()) == iterations); + + Assert.assertTrue(omMetadataManager.countRowsInTable( + omMetadataManager.getBucketTable()) == (bucketCount) * iterations); + + Assert.assertTrue(doubleBuffer.getFlushIterations() > 0); + } finally { + stop(); + } + } + + /** + * This method adds bucketCount number of createBucket responses to double + * buffer. + * @param volumeName + * @param bucketCount + */ + public void doTransactions(String volumeName, int bucketCount) { + doubleBuffer.add(createVolume(volumeName), trxId.incrementAndGet()); + for (int i=0; i< bucketCount; i++) { + doubleBuffer.add(createBucket(volumeName, UUID.randomUUID().toString()), + trxId.incrementAndGet()); + // For every 100 buckets creation adding 100ms delay + + if (i % 100 == 0) { + try { + Thread.sleep(100); + } catch (Exception ex) { + + } + } + } + } + + /** + * Create OMVolumeCreateResponse for specified volume. + * @param volumeName + * @return OMVolumeCreateResponse + */ + private OMVolumeCreateResponse createVolume(String volumeName) { + OmVolumeArgs omVolumeArgs = + OmVolumeArgs.newBuilder() + .setAdminName(UUID.randomUUID().toString()) + .setOwnerName(UUID.randomUUID().toString()) + .setVolume(volumeName) + .setCreationTime(Time.now()).build(); + + VolumeList volumeList = VolumeList.newBuilder() + .addVolumeNames(volumeName).build(); + return new OMVolumeCreateResponse(omVolumeArgs, volumeList); + } + + /** + * Create OMBucketCreateResponse for specified volume and bucket. + * @param volumeName + * @param bucketName + * @return OMBucketCreateResponse + */ + private OMBucketCreateResponse createBucket(String volumeName, + String bucketName) { + OmBucketInfo omBucketInfo = + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName).setCreationTime(Time.now()).build(); + return new OMBucketCreateResponse(omBucketInfo); + } + + /** + * Create OMBucketDeleteResponse for specified volume and bucket. + * @param volumeName + * @param bucketName + * @return OMBucketDeleteResponse + */ + private OMBucketDeleteResponse deleteBucket(String volumeName, + String bucketName) { + return new OMBucketDeleteResponse(volumeName, bucketName); + } + + +} + From c31b7b8d525b88cba7079b29d5ed9615286f5f59 Mon Sep 17 00:00:00 2001 From: Konstantin V Shvachko Date: Fri, 24 May 2019 12:34:29 -0700 Subject: [PATCH 0035/1308] HDFS-14502. keepResults option in NNThroughputBenchmark should call saveNamespace(). Contributed by Konstantin V Shvachko. --- .../hadoop/hdfs/server/namenode/NNThroughputBenchmark.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index e5d9826c3eb70..653b906e54721 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -288,6 +288,11 @@ void cleanUp() throws IOException { false); if(!keepResults) clientProto.delete(getBaseDir(), true); + else { + clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, + true); + clientProto.saveNamespace(0, 0); + } } int getNumOpsExecuted() { From 55e0c134f002f74cb4a0360b6682a1b6796d1598 Mon Sep 17 00:00:00 2001 From: Erik Krogen Date: Fri, 17 May 2019 12:24:51 -0700 Subject: [PATCH 0036/1308] HDFS-14500. NameNode StartupProgress should not allow new steps in an already-completed phase. Contributed by Erik Krogen. --- .../startupprogress/StartupProgress.java | 35 ++++++----- .../startupprogress/TestStartupProgress.java | 59 +++++++++++++------ 2 files changed, 63 insertions(+), 31 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java index 1b529b90c5051..b8244a156a45c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java @@ -84,13 +84,14 @@ public void beginPhase(Phase phase) { } /** - * Begins execution of the specified step within the specified phase. + * Begins execution of the specified step within the specified phase. This is + * a no-op if the phase is already completed. * - * @param phase Phase to begin + * @param phase Phase within which the step should be started * @param step Step to begin */ public void beginStep(Phase phase, Step step) { - if (!isComplete()) { + if (!isComplete(phase)) { lazyInitStep(phase, step).beginTime = monotonicNow(); } } @@ -107,13 +108,14 @@ public void endPhase(Phase phase) { } /** - * Ends execution of the specified step within the specified phase. - * - * @param phase Phase to end + * Ends execution of the specified step within the specified phase. This is + * a no-op if the phase is already completed. + * + * @param phase Phase within which the step should be ended * @param step Step to end */ public void endStep(Phase phase, Step step) { - if (!isComplete()) { + if (!isComplete(phase)) { lazyInitStep(phase, step).endTime = monotonicNow(); } } @@ -149,7 +151,7 @@ public Status getStatus(Phase phase) { * @return Counter associated with phase and step */ public Counter getCounter(Phase phase, Step step) { - if (!isComplete()) { + if (!isComplete(phase)) { final StepTracking tracking = lazyInitStep(phase, step); return new Counter() { @Override @@ -242,12 +244,17 @@ public StartupProgressView createView() { * @return boolean true if the entire startup process has completed */ private boolean isComplete() { - for (Phase phase: EnumSet.allOf(Phase.class)) { - if (getStatus(phase) != Status.COMPLETE) { - return false; - } - } - return true; + return EnumSet.allOf(Phase.class).stream().allMatch(this::isComplete); + } + + /** + * Returns true if the given startup phase has been completed. + * + * @param phase Which phase to check for completion + * @return boolean true if the given startup phase has completed. + */ + private boolean isComplete(Phase phase) { + return getStatus(phase) == Status.COMPLETE; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java index db778a3dafb6e..c01844d63a774 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java @@ -159,6 +159,14 @@ public void testFrozenAfterStartupCompletes() { startupProgress.endStep(LOADING_FSIMAGE, step); startupProgress.endPhase(LOADING_FSIMAGE); + StartupProgressView beforePhaseUpdate = startupProgress.createView(); + + // LOADING_FSIMAGE phase has been completed, but attempt more updates to it + Step fsimageStep2 = new Step(INODES); + startupProgress.beginStep(LOADING_FSIMAGE, fsimageStep2); + incrementCounter(startupProgress, LOADING_FSIMAGE, fsimageStep2, 1000000L); + startupProgress.endStep(LOADING_FSIMAGE, fsimageStep2); + // Force completion of phases, so that entire startup process is completed. for (Phase phase: EnumSet.allOf(Phase.class)) { if (startupProgress.getStatus(phase) != Status.COMPLETE) { @@ -191,26 +199,39 @@ public void testFrozenAfterStartupCompletes() { // Expect that data was frozen after completion of entire startup process, so // second set of updates and counter increments should have had no effect. - assertEquals(before.getCount(LOADING_FSIMAGE), - after.getCount(LOADING_FSIMAGE)); - assertEquals(before.getCount(LOADING_FSIMAGE, step), - after.getCount(LOADING_FSIMAGE, step)); + assertViewEquals(before, after, LOADING_FSIMAGE, step, fsimageStep2); assertEquals(before.getElapsedTime(), after.getElapsedTime()); - assertEquals(before.getElapsedTime(LOADING_FSIMAGE), - after.getElapsedTime(LOADING_FSIMAGE)); - assertEquals(before.getElapsedTime(LOADING_FSIMAGE, step), - after.getElapsedTime(LOADING_FSIMAGE, step)); - assertEquals(before.getFile(LOADING_FSIMAGE), - after.getFile(LOADING_FSIMAGE)); - assertEquals(before.getSize(LOADING_FSIMAGE), - after.getSize(LOADING_FSIMAGE)); - assertEquals(before.getTotal(LOADING_FSIMAGE), - after.getTotal(LOADING_FSIMAGE)); - assertEquals(before.getTotal(LOADING_FSIMAGE, step), - after.getTotal(LOADING_FSIMAGE, step)); + + // After the phase was completed but before startup was completed, + // everything should be equal, except for the total elapsed time + assertViewEquals(beforePhaseUpdate, after, LOADING_FSIMAGE, + step, fsimageStep2); + assertFalse(after.getSteps(LOADING_EDITS).iterator().hasNext()); } + private void assertViewEquals(StartupProgressView view1, + StartupProgressView view2, Phase phaseToVerify, Step... stepsToVerify) { + assertEquals(view1.getCount(phaseToVerify), + view2.getCount(phaseToVerify)); + assertEquals(view1.getElapsedTime(phaseToVerify), + view2.getElapsedTime(phaseToVerify)); + assertEquals(view1.getFile(phaseToVerify), + view2.getFile(phaseToVerify)); + assertEquals(view1.getSize(phaseToVerify), + view2.getSize(phaseToVerify)); + assertEquals(view1.getTotal(phaseToVerify), + view2.getTotal(phaseToVerify)); + for (Step step : stepsToVerify) { + assertEquals(view1.getCount(phaseToVerify, step), + view2.getCount(phaseToVerify, step)); + assertEquals(view1.getElapsedTime(phaseToVerify, step), + view2.getElapsedTime(phaseToVerify, step)); + assertEquals(view1.getTotal(phaseToVerify, step), + view2.getTotal(phaseToVerify, step)); + } + } + @Test(timeout=10000) public void testInitialState() { StartupProgressView view = startupProgress.createView(); @@ -375,7 +396,6 @@ public Void call() { startupProgress.setTotal(phase, step, total); incrementCounter(startupProgress, phase, step, 100L); startupProgress.endStep(phase, step); - startupProgress.endPhase(phase); return null; } }); @@ -384,6 +404,11 @@ public Void call() { exec.shutdown(); assertTrue(exec.awaitTermination(10000L, TimeUnit.MILLISECONDS)); } + // Once a phase ends, future modifications to the steps in that phase are + // ignored. Thus do not end the phases until after the other ops are done. + for (Phase phase : phases) { + startupProgress.endPhase(phase); + } StartupProgressView view = startupProgress.createView(); assertNotNull(view); From 37900c5639f8ba8d41b9fedc3d41ee0fbda7d5db Mon Sep 17 00:00:00 2001 From: Rakesh Radhakrishnan Date: Sun, 26 May 2019 14:30:11 +0530 Subject: [PATCH 0037/1308] HDFS-14402. Use FileChannel.transferTo() method for transferring block to SCM cache. Contributed by Feilong He. --- .../fsdataset/impl/MappableBlockLoader.java | 59 ++++++++++ .../impl/MemoryMappableBlockLoader.java | 59 ---------- .../impl/PmemMappableBlockLoader.java | 110 +++--------------- 3 files changed, 75 insertions(+), 153 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java index 044e5c59273d2..3ec84164c879e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java @@ -18,10 +18,16 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; +import com.google.common.base.Preconditions; +import org.apache.commons.io.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.ExtendedBlockId; +import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; +import org.apache.hadoop.util.DataChecksum; +import java.io.BufferedInputStream; +import java.io.DataInputStream; import java.io.FileInputStream; import java.io.IOException; import java.nio.ByteBuffer; @@ -107,6 +113,59 @@ void shutdown() { // Do nothing. } + /** + * Verifies the block's checksum. This is an I/O intensive operation. + */ + protected void verifyChecksum(long length, FileInputStream metaIn, + FileChannel blockChannel, String blockFileName) + throws IOException { + // Verify the checksum from the block's meta file + // Get the DataChecksum from the meta file header + BlockMetadataHeader header = + BlockMetadataHeader.readHeader(new DataInputStream( + new BufferedInputStream(metaIn, BlockMetadataHeader + .getHeaderSize()))); + FileChannel metaChannel = null; + try { + metaChannel = metaIn.getChannel(); + if (metaChannel == null) { + throw new IOException( + "Block InputStream meta file has no FileChannel."); + } + DataChecksum checksum = header.getChecksum(); + final int bytesPerChecksum = checksum.getBytesPerChecksum(); + final int checksumSize = checksum.getChecksumSize(); + final int numChunks = (8 * 1024 * 1024) / bytesPerChecksum; + ByteBuffer blockBuf = ByteBuffer.allocate(numChunks * bytesPerChecksum); + ByteBuffer checksumBuf = ByteBuffer.allocate(numChunks * checksumSize); + // Verify the checksum + int bytesVerified = 0; + while (bytesVerified < length) { + Preconditions.checkState(bytesVerified % bytesPerChecksum == 0, + "Unexpected partial chunk before EOF"); + assert bytesVerified % bytesPerChecksum == 0; + int bytesRead = fillBuffer(blockChannel, blockBuf); + if (bytesRead == -1) { + throw new IOException("checksum verification failed: premature EOF"); + } + blockBuf.flip(); + // Number of read chunks, including partial chunk at end + int chunks = (bytesRead + bytesPerChecksum - 1) / bytesPerChecksum; + checksumBuf.limit(chunks * checksumSize); + fillBuffer(metaChannel, checksumBuf); + checksumBuf.flip(); + checksum.verifyChunkedSums(blockBuf, checksumBuf, blockFileName, + bytesVerified); + // Success + bytesVerified += bytesRead; + blockBuf.clear(); + checksumBuf.clear(); + } + } finally { + IOUtils.closeQuietly(metaChannel); + } + } + /** * Reads bytes into a buffer until EOF or the buffer's limit is reached. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappableBlockLoader.java index 919835a5ee23a..52d8d931c0490 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappableBlockLoader.java @@ -18,22 +18,16 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import com.google.common.base.Preconditions; import org.apache.commons.io.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.ExtendedBlockId; -import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.io.nativeio.NativeIO; -import org.apache.hadoop.util.DataChecksum; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.BufferedInputStream; -import java.io.DataInputStream; import java.io.FileInputStream; import java.io.IOException; -import java.nio.ByteBuffer; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; @@ -98,59 +92,6 @@ MappableBlock load(long length, FileInputStream blockIn, return mappableBlock; } - /** - * Verifies the block's checksum. This is an I/O intensive operation. - */ - private void verifyChecksum(long length, FileInputStream metaIn, - FileChannel blockChannel, String blockFileName) - throws IOException { - // Verify the checksum from the block's meta file - // Get the DataChecksum from the meta file header - BlockMetadataHeader header = - BlockMetadataHeader.readHeader(new DataInputStream( - new BufferedInputStream(metaIn, BlockMetadataHeader - .getHeaderSize()))); - FileChannel metaChannel = null; - try { - metaChannel = metaIn.getChannel(); - if (metaChannel == null) { - throw new IOException( - "Block InputStream meta file has no FileChannel."); - } - DataChecksum checksum = header.getChecksum(); - final int bytesPerChecksum = checksum.getBytesPerChecksum(); - final int checksumSize = checksum.getChecksumSize(); - final int numChunks = (8 * 1024 * 1024) / bytesPerChecksum; - ByteBuffer blockBuf = ByteBuffer.allocate(numChunks * bytesPerChecksum); - ByteBuffer checksumBuf = ByteBuffer.allocate(numChunks * checksumSize); - // Verify the checksum - int bytesVerified = 0; - while (bytesVerified < length) { - Preconditions.checkState(bytesVerified % bytesPerChecksum == 0, - "Unexpected partial chunk before EOF"); - assert bytesVerified % bytesPerChecksum == 0; - int bytesRead = fillBuffer(blockChannel, blockBuf); - if (bytesRead == -1) { - throw new IOException("checksum verification failed: premature EOF"); - } - blockBuf.flip(); - // Number of read chunks, including partial chunk at end - int chunks = (bytesRead + bytesPerChecksum - 1) / bytesPerChecksum; - checksumBuf.limit(chunks * checksumSize); - fillBuffer(metaChannel, checksumBuf); - checksumBuf.flip(); - checksum.verifyChunkedSums(blockBuf, checksumBuf, blockFileName, - bytesVerified); - // Success - bytesVerified += bytesRead; - blockBuf.clear(); - checksumBuf.clear(); - } - } finally { - IOUtils.closeQuietly(metaChannel); - } - } - @Override public long getCacheUsed() { return memCacheStats.getCacheUsed(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappableBlockLoader.java index 05a9ba717e278..239fff815b1bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappableBlockLoader.java @@ -18,25 +18,17 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import com.google.common.base.Preconditions; import org.apache.commons.io.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.ExtendedBlockId; -import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.server.datanode.DNConf; -import org.apache.hadoop.io.nativeio.NativeIO; -import org.apache.hadoop.util.DataChecksum; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.BufferedInputStream; -import java.io.DataInputStream; import java.io.FileInputStream; import java.io.IOException; import java.io.RandomAccessFile; -import java.nio.ByteBuffer; -import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; /** @@ -79,112 +71,42 @@ void initialize(FsDatasetCache cacheManager) throws IOException { */ @Override MappableBlock load(long length, FileInputStream blockIn, - FileInputStream metaIn, String blockFileName, - ExtendedBlockId key) + FileInputStream metaIn, String blockFileName, + ExtendedBlockId key) throws IOException { PmemMappedBlock mappableBlock = null; - String filePath = null; + String cachePath = null; FileChannel blockChannel = null; - RandomAccessFile file = null; - MappedByteBuffer out = null; + RandomAccessFile cacheFile = null; try { blockChannel = blockIn.getChannel(); if (blockChannel == null) { throw new IOException("Block InputStream has no FileChannel."); } + cachePath = pmemVolumeManager.getCachePath(key); + cacheFile = new RandomAccessFile(cachePath, "rw"); + blockChannel.transferTo(0, length, cacheFile.getChannel()); + + // Verify checksum for the cached data instead of block file. + // The file channel should be repositioned. + cacheFile.getChannel().position(0); + verifyChecksum(length, metaIn, cacheFile.getChannel(), blockFileName); - filePath = pmemVolumeManager.getCachePath(key); - file = new RandomAccessFile(filePath, "rw"); - out = file.getChannel(). - map(FileChannel.MapMode.READ_WRITE, 0, length); - if (out == null) { - throw new IOException("Failed to map the block " + blockFileName + - " to persistent storage."); - } - verifyChecksumAndMapBlock(out, length, metaIn, blockChannel, - blockFileName); mappableBlock = new PmemMappedBlock(length, key); LOG.info("Successfully cached one replica:{} into persistent memory" - + ", [cached path={}, length={}]", key, filePath, length); + + ", [cached path={}, length={}]", key, cachePath, length); } finally { IOUtils.closeQuietly(blockChannel); - if (out != null) { - NativeIO.POSIX.munmap(out); - } - IOUtils.closeQuietly(file); + IOUtils.closeQuietly(cacheFile); if (mappableBlock == null) { - LOG.debug("Delete {} due to unsuccessful mapping.", filePath); - FsDatasetUtil.deleteMappedFile(filePath); + LOG.debug("Delete {} due to unsuccessful mapping.", cachePath); + FsDatasetUtil.deleteMappedFile(cachePath); } } return mappableBlock; } - /** - * Verifies the block's checksum meanwhile maps block to persistent memory. - * This is an I/O intensive operation. - */ - private void verifyChecksumAndMapBlock( - MappedByteBuffer out, long length, FileInputStream metaIn, - FileChannel blockChannel, String blockFileName) - throws IOException { - // Verify the checksum from the block's meta file - // Get the DataChecksum from the meta file header - BlockMetadataHeader header = - BlockMetadataHeader.readHeader(new DataInputStream( - new BufferedInputStream(metaIn, BlockMetadataHeader - .getHeaderSize()))); - FileChannel metaChannel = null; - try { - metaChannel = metaIn.getChannel(); - if (metaChannel == null) { - throw new IOException("Cannot get FileChannel from " + - "Block InputStream meta file."); - } - DataChecksum checksum = header.getChecksum(); - final int bytesPerChecksum = checksum.getBytesPerChecksum(); - final int checksumSize = checksum.getChecksumSize(); - final int numChunks = (8 * 1024 * 1024) / bytesPerChecksum; - ByteBuffer blockBuf = ByteBuffer.allocate(numChunks * bytesPerChecksum); - ByteBuffer checksumBuf = ByteBuffer.allocate(numChunks * checksumSize); - // Verify the checksum - int bytesVerified = 0; - while (bytesVerified < length) { - Preconditions.checkState(bytesVerified % bytesPerChecksum == 0, - "Unexpected partial chunk before EOF"); - assert bytesVerified % bytesPerChecksum == 0; - int bytesRead = fillBuffer(blockChannel, blockBuf); - if (bytesRead == -1) { - throw new IOException( - "Checksum verification failed for the block " + blockFileName + - ": premature EOF"); - } - blockBuf.flip(); - // Number of read chunks, including partial chunk at end - int chunks = (bytesRead + bytesPerChecksum - 1) / bytesPerChecksum; - checksumBuf.limit(chunks * checksumSize); - fillBuffer(metaChannel, checksumBuf); - checksumBuf.flip(); - checksum.verifyChunkedSums(blockBuf, checksumBuf, blockFileName, - bytesVerified); - - // / Copy data to persistent file - out.put(blockBuf); - // positioning the - bytesVerified += bytesRead; - - // Clear buffer - blockBuf.clear(); - checksumBuf.clear(); - } - // Forces to write data to storage device containing the mapped file - out.force(); - } finally { - IOUtils.closeQuietly(metaChannel); - } - } - @Override public long getCacheUsed() { return pmemVolumeManager.getCacheUsed(); From 9f056d905f3d21faf0dc9bd42e14ea61313ee9e8 Mon Sep 17 00:00:00 2001 From: Weiwei Yang Date: Sun, 26 May 2019 09:56:36 -0400 Subject: [PATCH 0038/1308] YARN-9497. Support grouping by diagnostics for query results of scheduler and app activities. Contributed by Tao Yang. --- .../activities/ActivitiesManager.java | 13 +- .../scheduler/activities/ActivitiesUtils.java | 61 ++++++ .../scheduler/activities/ActivityNode.java | 16 +- .../resourcemanager/webapp/RMWSConsts.java | 9 + .../webapp/RMWebServiceProtocol.java | 9 +- .../resourcemanager/webapp/RMWebServices.java | 39 +++- .../webapp/dao/ActivitiesInfo.java | 6 +- .../webapp/dao/ActivityNodeInfo.java | 50 ++++- .../webapp/dao/AppActivitiesInfo.java | 6 +- .../webapp/dao/AppAllocationInfo.java | 6 +- .../webapp/dao/AppRequestAllocationInfo.java | 15 +- .../webapp/dao/NodeAllocationInfo.java | 6 +- .../activities/TestActivitiesManager.java | 4 +- ...edulerActivitiesWithMultiNodesEnabled.java | 179 ++++++++++++++++++ .../webapp/DefaultRequestInterceptorREST.java | 5 +- .../webapp/FederationInterceptorREST.java | 5 +- .../router/webapp/RouterWebServices.java | 11 +- .../webapp/BaseRouterWebServicesTest.java | 4 +- .../webapp/MockRESTRequestInterceptor.java | 5 +- .../PassThroughRESTRequestInterceptor.java | 9 +- 20 files changed, 403 insertions(+), 55 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesUtils.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java index 7d1dd6954e15f..b8ef263747477 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java @@ -23,6 +23,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.commons.collections.CollectionUtils; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,7 +59,7 @@ public class ActivitiesManager extends AbstractService { // An empty node ID, we use this variable as a placeholder // in the activity records when recording multiple nodes assignments. public static final NodeId EMPTY_NODE_ID = NodeId.newInstance("", 0); - public static final String DIAGNOSTICS_DETAILS_SEPARATOR = "\n"; + public static final char DIAGNOSTICS_DETAILS_SEPARATOR = '\n'; public static final String EMPTY_DIAGNOSTICS = ""; private ThreadLocal>> recordingNodesAllocation; @@ -119,7 +120,8 @@ private void setupConfForCleanup(Configuration conf) { } public AppActivitiesInfo getAppActivitiesInfo(ApplicationId applicationId, - Set requestPriorities, Set allocationRequestIds) { + Set requestPriorities, Set allocationRequestIds, + RMWSConsts.ActivitiesGroupBy groupBy) { RMApp app = rmContext.getRMApps().get(applicationId); if (app != null && app.getFinalApplicationStatus() == FinalApplicationStatus.UNDEFINED) { @@ -138,7 +140,7 @@ public AppActivitiesInfo getAppActivitiesInfo(ApplicationId applicationId, allocations = new ArrayList(curAllocations); } } - return new AppActivitiesInfo(allocations, applicationId); + return new AppActivitiesInfo(allocations, applicationId, groupBy); } else { return new AppActivitiesInfo( "fail to get application activities after finished", @@ -146,14 +148,15 @@ public AppActivitiesInfo getAppActivitiesInfo(ApplicationId applicationId, } } - public ActivitiesInfo getActivitiesInfo(String nodeId) { + public ActivitiesInfo getActivitiesInfo(String nodeId, + RMWSConsts.ActivitiesGroupBy groupBy) { List allocations; if (nodeId == null) { allocations = lastAvailableNodeActivities; } else { allocations = completedNodeAllocations.get(NodeId.fromString(nodeId)); } - return new ActivitiesInfo(allocations, nodeId); + return new ActivitiesInfo(allocations, nodeId, groupBy); } public void recordNextNodeUpdateActivities(String nodeId) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesUtils.java new file mode 100644 index 0000000000000..4cdaac8e9142e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesUtils.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities; + +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivityNodeInfo; + +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Utilities for activities. + */ +public final class ActivitiesUtils { + + private ActivitiesUtils(){} + + public static List getRequestActivityNodeInfos( + List activityNodes, + RMWSConsts.ActivitiesGroupBy groupBy) { + if (activityNodes == null) { + return null; + } + if (groupBy == RMWSConsts.ActivitiesGroupBy.DIAGNOSTIC) { + Map>> groupingResults = + activityNodes.stream().collect(Collectors + .groupingBy(ActivityNode::getState, Collectors + .groupingBy(ActivityNode::getShortDiagnostic, + Collectors.mapping(e -> e.getNodeId() == null ? + "" : + e.getNodeId().toString(), Collectors.toList())))); + return groupingResults.entrySet().stream().flatMap( + stateMap -> stateMap.getValue().entrySet().stream().map( + diagMap -> new ActivityNodeInfo(stateMap.getKey(), + diagMap.getKey().isEmpty() ? null : diagMap.getKey(), + diagMap.getValue()))) + .collect(Collectors.toList()); + } else { + return activityNodes.stream().map( + e -> new ActivityNodeInfo(e.getName(), e.getState(), + e.getDiagnostic(), e.getNodeId())).collect(Collectors.toList()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivityNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivityNode.java index 3edfddd43a872..e658d2fbefd17 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivityNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivityNode.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.NodeId; import java.util.LinkedList; @@ -108,7 +109,7 @@ public String getAllocationRequestId() { return allocationRequestId; } - public boolean getType() { + public boolean isAppType() { if (appPriority != null) { return true; } else { @@ -116,6 +117,19 @@ public boolean getType() { } } + public boolean isRequestType() { + return requestPriority != null && nodeId == null; + } + + public String getShortDiagnostic() { + if (this.diagnostic == null) { + return ""; + } else { + return StringUtils.split(this.diagnostic, + ActivitiesManager.DIAGNOSTICS_DETAILS_SEPARATOR)[0]; + } + } + public String toString() { StringBuilder sb = new StringBuilder(); sb.append(this.activityNodeName + " ") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java index 41a91ee7843cb..3c36fe82e8be8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java @@ -228,9 +228,18 @@ public final class RMWSConsts { public static final String QUEUE_ACL_TYPE = "queue-acl-type"; public static final String REQUEST_PRIORITIES = "requestPriorities"; public static final String ALLOCATION_REQUEST_IDS = "allocationRequestIds"; + public static final String GROUP_BY = "groupBy"; private RMWSConsts() { // not called } + /** + * Defines the groupBy types of activities, currently only support + * DIAGNOSTIC with which user can query aggregated activities + * grouped by allocation state and diagnostic. + */ + public enum ActivitiesGroupBy { + DIAGNOSTIC + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java index c5024d1487eea..7b49ed4ac2289 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java @@ -204,9 +204,12 @@ AppsInfo getApps(HttpServletRequest hsr, String stateQuery, * @param hsr the servlet request * @param nodeId the node we want to retrieve the activities. It is a * QueryParam. + * @param groupBy the groupBy type by which the activities should be + * aggregated. It is a QueryParam. * @return all the activities in the specific node */ - ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId); + ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId, + String groupBy); /** * This method retrieves all the activities for a specific app for a specific @@ -222,11 +225,13 @@ AppsInfo getApps(HttpServletRequest hsr, String stateQuery, * activities. It is a QueryParam. * @param allocationRequestIds the allocation request ids we want to retrieve * the activities. It is a QueryParam. + * @param groupBy the groupBy type by which the activities should be + * aggregated. It is a QueryParam. * @return all the activities about a specific app for a specific time */ AppActivitiesInfo getAppActivities(HttpServletRequest hsr, String appId, String time, Set requestPriorities, - Set allocationRequestIds); + Set allocationRequestIds, String groupBy); /** * This method retrieves all the statistics for a specific app, and it is diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index 7417eb818691f..9b36995eb2640 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -56,6 +56,7 @@ import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; +import org.apache.commons.lang3.EnumUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.http.JettyUtils; @@ -632,7 +633,8 @@ public AppsInfo getApps(@Context HttpServletRequest hsr, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) @Override public ActivitiesInfo getActivities(@Context HttpServletRequest hsr, - @QueryParam(RMWSConsts.NODEID) String nodeId) { + @QueryParam(RMWSConsts.NODEID) String nodeId, + @QueryParam(RMWSConsts.GROUP_BY) String groupBy) { initForReadableEndpoints(); YarnScheduler scheduler = rm.getRMContext().getScheduler(); @@ -649,6 +651,13 @@ public ActivitiesInfo getActivities(@Context HttpServletRequest hsr, return new ActivitiesInfo(errMessage, nodeId); } + RMWSConsts.ActivitiesGroupBy activitiesGroupBy; + try { + activitiesGroupBy = parseActivitiesGroupBy(groupBy); + } catch (IllegalArgumentException e) { + return new ActivitiesInfo(e.getMessage(), nodeId); + } + List nodeList = abstractYarnScheduler.getNodeTracker().getAllNodes(); @@ -689,7 +698,7 @@ public ActivitiesInfo getActivities(@Context HttpServletRequest hsr, if (!illegalInput) { activitiesManager.recordNextNodeUpdateActivities(nodeId); - return activitiesManager.getActivitiesInfo(nodeId); + return activitiesManager.getActivitiesInfo(nodeId, activitiesGroupBy); } // Return a activities info with error message @@ -709,7 +718,8 @@ public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr, @QueryParam(RMWSConsts.MAX_TIME) String time, @QueryParam(RMWSConsts.REQUEST_PRIORITIES) Set requestPriorities, @QueryParam(RMWSConsts.ALLOCATION_REQUEST_IDS) - Set allocationRequestIds) { + Set allocationRequestIds, + @QueryParam(RMWSConsts.GROUP_BY) String groupBy) { initForReadableEndpoints(); YarnScheduler scheduler = rm.getRMContext().getScheduler(); @@ -729,6 +739,13 @@ public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr, return new AppActivitiesInfo(errMessage, null); } + RMWSConsts.ActivitiesGroupBy activitiesGroupBy; + try { + activitiesGroupBy = parseActivitiesGroupBy(groupBy); + } catch (IllegalArgumentException e) { + return new AppActivitiesInfo(e.getMessage(), appId); + } + double maxTime = 3.0; if (time != null) { @@ -745,7 +762,7 @@ public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr, activitiesManager.turnOnAppActivitiesRecording(applicationId, maxTime); AppActivitiesInfo appActivitiesInfo = activitiesManager.getAppActivitiesInfo(applicationId, - requestPriorities, allocationRequestIds); + requestPriorities, allocationRequestIds, activitiesGroupBy); return appActivitiesInfo; } catch (Exception e) { @@ -758,6 +775,20 @@ public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr, return null; } + private RMWSConsts.ActivitiesGroupBy parseActivitiesGroupBy(String groupBy) { + if (groupBy != null) { + if (!EnumUtils.isValidEnum(RMWSConsts.ActivitiesGroupBy.class, + groupBy.toUpperCase())) { + String errMesasge = + "Got invalid groupBy: " + groupBy + ", valid groupBy types: " + + Arrays.asList(RMWSConsts.ActivitiesGroupBy.values()); + throw new IllegalArgumentException(errMesasge); + } + return RMWSConsts.ActivitiesGroupBy.valueOf(groupBy.toUpperCase()); + } + return null; + } + @GET @Path(RMWSConsts.APP_STATISTICS) @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivitiesInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivitiesInfo.java index a78247f71f9e5..4ea4cd1e16030 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivitiesInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivitiesInfo.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; import com.google.common.base.Strings; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.api.records.NodeId; @@ -53,7 +54,8 @@ public ActivitiesInfo(String errorMessage, String nodeId) { this.nodeId = nodeId; } - public ActivitiesInfo(List nodeAllocations, String nodeId) { + public ActivitiesInfo(List nodeAllocations, String nodeId, + RMWSConsts.ActivitiesGroupBy groupBy) { this.nodeId = nodeId; this.allocations = new ArrayList<>(); @@ -78,7 +80,7 @@ public ActivitiesInfo(List nodeAllocations, String nodeId) { for (int i = 0; i < nodeAllocations.size(); i++) { NodeAllocation nodeAllocation = nodeAllocations.get(i); NodeAllocationInfo allocationInfo = new NodeAllocationInfo( - nodeAllocation); + nodeAllocation, groupBy); this.allocations.add(allocationInfo); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivityNodeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivityNodeInfo.java index e660fa57276db..b6e0a533b23d3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivityNodeInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivityNodeInfo.java @@ -20,14 +20,16 @@ import com.google.common.base.Strings; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; -import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; /* * DAO object to display node information in allocation tree. @@ -44,6 +46,10 @@ public class ActivityNodeInfo { private String nodeId; private String allocationRequestId; + // Used for groups of activities + private String count; + private List nodeIds; + protected List children; ActivityNodeInfo() { @@ -57,7 +63,16 @@ public ActivityNodeInfo(String name, ActivityState allocationState, setNodeId(nId); } - ActivityNodeInfo(ActivityNode node) { + public ActivityNodeInfo(ActivityState groupAllocationState, + String groupDiagnostic, List groupNodeIds) { + this.allocationState = groupAllocationState.name(); + this.diagnostic = groupDiagnostic; + this.count = String.valueOf(groupNodeIds.size()); + this.nodeIds = groupNodeIds; + } + + ActivityNodeInfo(ActivityNode node, + RMWSConsts.ActivitiesGroupBy groupBy) { this.name = node.getName(); setPriority(node); setNodeId(node.getNodeId()); @@ -65,11 +80,14 @@ public ActivityNodeInfo(String name, ActivityState allocationState, this.diagnostic = node.getDiagnostic(); this.requestPriority = node.getRequestPriority(); this.allocationRequestId = node.getAllocationRequestId(); - this.children = new ArrayList<>(); - - for (ActivityNode child : node.getChildren()) { - ActivityNodeInfo containerInfo = new ActivityNodeInfo(child); - this.children.add(containerInfo); + // only consider grouping for request type + if (node.isRequestType()) { + this.children = ActivitiesUtils + .getRequestActivityNodeInfos(node.getChildren(), groupBy); + } else { + this.children = node.getChildren().stream() + .map(e -> new ActivityNodeInfo(e, groupBy)) + .collect(Collectors.toList()); } } @@ -80,7 +98,7 @@ public void setNodeId(NodeId nId) { } private void setPriority(ActivityNode node) { - if (node.getType()) { + if (node.isAppType()) { this.appPriority = node.getAppPriority(); } else { this.requestPriority = node.getRequestPriority(); @@ -91,7 +109,23 @@ public String getNodeId() { return nodeId; } + public void setNodeIds(List nodeIds) { + this.nodeIds = nodeIds; + } + public String getAllocationRequestId() { return allocationRequestId; } + + public String getCount() { + return count; + } + + public List getNodeIds() { + return nodeIds; + } + + public List getChildren() { + return children; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppActivitiesInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppActivitiesInfo.java index 1b4cafe8119bd..c2777499acf59 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppActivitiesInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppActivitiesInfo.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -59,7 +60,8 @@ public AppActivitiesInfo(String errorMessage, String applicationId) { } public AppActivitiesInfo(List appAllocations, - ApplicationId applicationId) { + ApplicationId applicationId, + RMWSConsts.ActivitiesGroupBy groupBy) { this.applicationId = applicationId.toString(); this.allocations = new ArrayList<>(); @@ -73,7 +75,7 @@ public AppActivitiesInfo(List appAllocations, for (int i = appAllocations.size() - 1; i > -1; i--) { AppAllocation appAllocation = appAllocations.get(i); AppAllocationInfo appAllocationInfo = new AppAllocationInfo( - appAllocation); + appAllocation, groupBy); this.allocations.add(appAllocationInfo); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAllocationInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAllocationInfo.java index 48a209427d992..da2be57184ebf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAllocationInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAllocationInfo.java @@ -20,6 +20,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AppAllocation; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; @@ -47,7 +48,8 @@ public class AppAllocationInfo { AppAllocationInfo() { } - AppAllocationInfo(AppAllocation allocation) { + AppAllocationInfo(AppAllocation allocation, + RMWSConsts.ActivitiesGroupBy groupBy) { this.requestAllocation = new ArrayList<>(); this.nodeId = allocation.getNodeId(); this.queueName = allocation.getQueueName(); @@ -62,7 +64,7 @@ public class AppAllocationInfo { for (List requestActivityNodes : requestToActivityNodes .values()) { AppRequestAllocationInfo requestAllocationInfo = - new AppRequestAllocationInfo(requestActivityNodes); + new AppRequestAllocationInfo(requestActivityNodes, groupBy); this.requestAllocation.add(requestAllocationInfo); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppRequestAllocationInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppRequestAllocationInfo.java index 4b20c9038f5c3..09251283aebf8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppRequestAllocationInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppRequestAllocationInfo.java @@ -19,12 +19,13 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; import com.google.common.collect.Iterables; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityNode; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; -import java.util.ArrayList; import java.util.List; /** @@ -41,18 +42,14 @@ public class AppRequestAllocationInfo { AppRequestAllocationInfo() { } - AppRequestAllocationInfo(List activityNodes) { - this.allocationAttempt = new ArrayList<>(); + AppRequestAllocationInfo(List activityNodes, + RMWSConsts.ActivitiesGroupBy groupBy) { ActivityNode lastActivityNode = Iterables.getLast(activityNodes); this.requestPriority = lastActivityNode.getRequestPriority(); this.allocationRequestId = lastActivityNode.getAllocationRequestId(); this.allocationState = lastActivityNode.getState().name(); - for (ActivityNode attempt : activityNodes) { - ActivityNodeInfo containerInfo = - new ActivityNodeInfo(attempt.getName(), attempt.getState(), - attempt.getDiagnostic(), attempt.getNodeId()); - this.allocationAttempt.add(containerInfo); - } + this.allocationAttempt = ActivitiesUtils + .getRequestActivityNodeInfos(activityNodes, groupBy); } public String getRequestPriority() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeAllocationInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeAllocationInfo.java index c8f1c6599e672..71c576d74c44b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeAllocationInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeAllocationInfo.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.NodeAllocation; @@ -42,11 +43,12 @@ public class NodeAllocationInfo { NodeAllocationInfo() { } - NodeAllocationInfo(NodeAllocation allocation) { + NodeAllocationInfo(NodeAllocation allocation, + RMWSConsts.ActivitiesGroupBy groupBy) { this.allocatedContainerId = allocation.getContainerId(); this.finalAllocationState = allocation.getFinalAllocationState().name(); - root = new ActivityNodeInfo(allocation.getRoot()); + root = new ActivityNodeInfo(allocation.getRoot(), groupBy); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/TestActivitiesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/TestActivitiesManager.java index c9ce73771a9fc..495c7e248b069 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/TestActivitiesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/TestActivitiesManager.java @@ -286,14 +286,14 @@ public void testAppActivitiesTTL() throws Exception { ActivityDiagnosticConstant.SKIPPED_ALL_PRIORITIES); } AppActivitiesInfo appActivitiesInfo = newActivitiesManager - .getAppActivitiesInfo(app.getApplicationId(), null, null); + .getAppActivitiesInfo(app.getApplicationId(), null, null, null); Assert.assertEquals(numActivities, appActivitiesInfo.getAllocations().size()); // sleep until all app activities expired Thread.sleep(cleanupIntervalMs + appActivitiesTTL); // there should be no remaining app activities appActivitiesInfo = newActivitiesManager - .getAppActivitiesInfo(app.getApplicationId(), null, null); + .getAppActivitiesInfo(app.getApplicationId(), null, null, null); Assert.assertEquals(0, appActivitiesInfo.getAllocations().size()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivitiesWithMultiNodesEnabled.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivitiesWithMultiNodesEnabled.java index 6157d0beab932..525925bb4d221 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivitiesWithMultiNodesEnabled.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivitiesWithMultiNodesEnabled.java @@ -36,6 +36,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityDiagnosticConstant; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; @@ -46,6 +47,7 @@ import org.apache.hadoop.yarn.webapp.JerseyTestBase; import org.codehaus.jettison.json.JSONArray; import org.codehaus.jettison.json.JSONObject; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -61,6 +63,7 @@ import static org.apache.hadoop.yarn.server.resourcemanager.webapp.ActivitiesTestUtils.verifyNumberOfAllocations; import static org.apache.hadoop.yarn.server.resourcemanager.webapp.ActivitiesTestUtils.verifyStateOfAllocations; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; /** @@ -444,4 +447,180 @@ public void testAppInsufficientResourceDiagnostic() throws Exception { rm.stop(); } } + + @Test (timeout=30000) + public void testGroupByDiagnostics() throws Exception { + rm.start(); + CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); + + MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * 1024); + MockNM nm2 = rm.registerNode("127.0.0.2:1234", 2 * 1024); + MockNM nm3 = rm.registerNode("127.0.0.3:1234", 2 * 1024); + MockNM nm4 = rm.registerNode("127.0.0.4:1234", 2 * 1024); + + try { + RMApp app1 = rm.submitApp(3072, "app1", "user1", null, "b"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); + + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(RMWSConsts.SCHEDULER_ACTIVITIES); + MultivaluedMapImpl params = new MultivaluedMapImpl(); + + /* + * test non-exist groupBy + */ + params.add(RMWSConsts.GROUP_BY, "NON-EXIST-GROUP-BY"); + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); + Assert.assertTrue(json.getString("diagnostic") + .startsWith("Got invalid groupBy:")); + params.remove(RMWSConsts.GROUP_BY); + + /* + * test groupBy: DIAGNOSTIC + */ + params.add(RMWSConsts.GROUP_BY, RMWSConsts.ActivitiesGroupBy. + DIAGNOSTIC.name().toLowerCase()); + json = ActivitiesTestUtils.requestWebResource(r, params); + assertEquals("waiting for next allocation", json.getString("diagnostic")); + + //Request a container for am2, will reserve a container on nm1 + am1.allocate("*", 4096, 1, new ArrayList<>()); + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); + + json = ActivitiesTestUtils.requestWebResource(r, params); + + //Check activities + verifyNumberOfAllocations(json, 1); + JSONObject allocationObj = json.getJSONObject("allocations"); + //Check diagnostic for request of app1 + Predicate findReqPred = + (obj) -> obj.optString("name").equals("request_1_-1"); + List reqObjs = + findInAllocations(allocationObj, findReqPred); + assertEquals(1, reqObjs.size()); + JSONArray reqChildren = reqObjs.get(0).getJSONArray("children"); + assertEquals(2, reqChildren.length()); + for (int i = 0; i < reqChildren.length(); i++) { + JSONObject reqChild = reqChildren.getJSONObject(i); + if (reqChild.getString("allocationState") + .equals(AllocationState.SKIPPED.name())) { + assertEquals("3", reqChild.getString("count")); + assertEquals(3, reqChild.getJSONArray("nodeIds").length()); + assertTrue(reqChild.optString("diagnostic") + .contains(INSUFFICIENT_RESOURCE_DIAGNOSTIC_PREFIX)); + } else if (reqChild.getString("allocationState") + .equals(AllocationState.RESERVED.name())) { + assertEquals("1", reqChild.getString("count")); + assertNotNull(reqChild.getString("nodeIds")); + } else { + Assert.fail("Allocation state should be " + + AllocationState.SKIPPED.name() + " or " + + AllocationState.RESERVED.name() + "!"); + } + } + } finally { + rm.stop(); + } + } + + @Test (timeout=30000) + public void testAppGroupByDiagnostics() throws Exception { + rm.start(); + CapacityScheduler cs = (CapacityScheduler)rm.getResourceScheduler(); + + MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * 1024); + MockNM nm2 = rm.registerNode("127.0.0.2:1234", 2 * 1024); + MockNM nm3 = rm.registerNode("127.0.0.3:1234", 2 * 1024); + MockNM nm4 = rm.registerNode("127.0.0.4:1234", 2 * 1024); + + try { + RMApp app1 = rm.submitApp(3072, "app1", "user1", null, "b"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); + + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(RMWSConsts.SCHEDULER_APP_ACTIVITIES); + MultivaluedMapImpl params = new MultivaluedMapImpl(); + params.add(RMWSConsts.APP_ID, app1.getApplicationId().toString()); + + /* + * test non-exist groupBy + */ + params.add(RMWSConsts.GROUP_BY, "NON-EXIST-GROUP-BY"); + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); + Assert.assertTrue(json.getString("diagnostic") + .startsWith("Got invalid groupBy:")); + params.remove(RMWSConsts.GROUP_BY); + + /* + * test groupBy: DIAGNOSTIC + */ + params.add(RMWSConsts.GROUP_BY, RMWSConsts.ActivitiesGroupBy. + DIAGNOSTIC.name().toLowerCase()); + json = ActivitiesTestUtils.requestWebResource(r, params); + assertEquals("waiting for display", json.getString("diagnostic")); + + //Request two containers with different priority for am1 + am1.allocate(Arrays.asList(ResourceRequest + .newInstance(Priority.newInstance(0), "*", + Resources.createResource(1024), 1), ResourceRequest + .newInstance(Priority.newInstance(1), "*", + Resources.createResource(4096), 1)), null); + + //Trigger scheduling, will allocate a container with priority 0 + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); + + //Trigger scheduling, will reserve a container with priority 1 on nm1 + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); + + json = ActivitiesTestUtils.requestWebResource(r, params); + + //Check app activities + verifyNumberOfAllocations(json, 2); + JSONArray allocationArray = json.getJSONArray("allocations"); + //Check first activity is for second allocation with RESERVED state + JSONObject allocationObj = allocationArray.getJSONObject(0); + verifyStateOfAllocations(allocationObj, "allocationState", "RESERVED"); + JSONObject requestAllocationObj = + allocationObj.getJSONObject("requestAllocation"); + verifyNumberOfAllocationAttempts(requestAllocationObj, 2); + JSONArray allocationAttemptArray = + requestAllocationObj.getJSONArray("allocationAttempt"); + for (int i=0; i requestPriorities, - Set allocationRequestIds) { + Set allocationRequestIds, String groupBy) { // time and appId are specified inside hsr return RouterWebServiceUtil.genericForward(webAppAddress, hsr, AppActivitiesInfo.class, HTTPMethods.GET, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java index b6f92f24cd503..ec4cb8b21e0be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java @@ -1138,14 +1138,15 @@ public String dumpSchedulerLogs(String time, HttpServletRequest hsr) } @Override - public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId) { + public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId, + String groupBy) { throw new NotImplementedException("Code is not implemented"); } @Override public AppActivitiesInfo getAppActivities(HttpServletRequest hsr, String appId, String time, Set requestPriorities, - Set allocationRequestIds) { + Set allocationRequestIds, String groupBy) { throw new NotImplementedException("Code is not implemented"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java index 7654d2d22928c..ce45f21560cf6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java @@ -446,10 +446,12 @@ public AppsInfo getApps(@Context HttpServletRequest hsr, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) @Override public ActivitiesInfo getActivities(@Context HttpServletRequest hsr, - @QueryParam(RMWSConsts.NODEID) String nodeId) { + @QueryParam(RMWSConsts.NODEID) String nodeId, + @QueryParam(RMWSConsts.GROUP_BY) String groupBy) { init(); RequestInterceptorChainWrapper pipeline = getInterceptorChain(hsr); - return pipeline.getRootInterceptor().getActivities(hsr, nodeId); + return pipeline.getRootInterceptor() + .getActivities(hsr, nodeId, groupBy); } @GET @@ -462,11 +464,12 @@ public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr, @QueryParam(RMWSConsts.MAX_TIME) String time, @QueryParam(RMWSConsts.REQUEST_PRIORITIES) Set requestPriorities, @QueryParam(RMWSConsts.ALLOCATION_REQUEST_IDS) - Set allocationRequestIds) { + Set allocationRequestIds, + @QueryParam(RMWSConsts.GROUP_BY) String groupBy) { init(); RequestInterceptorChainWrapper pipeline = getInterceptorChain(hsr); return pipeline.getRootInterceptor().getAppActivities(hsr, appId, time, - requestPriorities, allocationRequestIds); + requestPriorities, allocationRequestIds, groupBy); } @GET diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/BaseRouterWebServicesTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/BaseRouterWebServicesTest.java index c0d7a63199731..535c579a85d99 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/BaseRouterWebServicesTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/BaseRouterWebServicesTest.java @@ -175,13 +175,13 @@ protected AppsInfo getApps(String user) protected ActivitiesInfo getActivities(String user) throws IOException, InterruptedException { return routerWebService.getActivities( - createHttpServletRequest(user), null); + createHttpServletRequest(user), null, null); } protected AppActivitiesInfo getAppActivities(String user) throws IOException, InterruptedException { return routerWebService.getAppActivities( - createHttpServletRequest(user), null, null, null, null); + createHttpServletRequest(user), null, null, null, null, null); } protected ApplicationStatisticsInfo getAppStatistics(String user) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java index a351928e1e77b..b3e18a9206167 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java @@ -133,14 +133,15 @@ public AppsInfo getApps(HttpServletRequest hsr, String stateQuery, } @Override - public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId) { + public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId, + String groupBy) { return new ActivitiesInfo(); } @Override public AppActivitiesInfo getAppActivities(HttpServletRequest hsr, String appId, String time, Set requestPriorities, - Set allocationRequestIds) { + Set allocationRequestIds, String groupBy) { return new AppActivitiesInfo(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java index 9dce277de5929..400bf714a86cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java @@ -161,16 +161,17 @@ public AppsInfo getApps(HttpServletRequest hsr, String stateQuery, } @Override - public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId) { - return getNextInterceptor().getActivities(hsr, nodeId); + public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId, + String groupBy) { + return getNextInterceptor().getActivities(hsr, nodeId, groupBy); } @Override public AppActivitiesInfo getAppActivities(HttpServletRequest hsr, String appId, String time, Set requestPriorities, - Set allocationRequestIds) { + Set allocationRequestIds, String groupBy) { return getNextInterceptor().getAppActivities(hsr, appId, time, - requestPriorities, allocationRequestIds); + requestPriorities, allocationRequestIds, groupBy); } @Override From 9f933e6446c0c61c72fd05338a1e36e7ba37af8b Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Mon, 27 May 2019 15:24:59 +0900 Subject: [PATCH 0039/1308] HADOOP-16323. https everywhere in Maven settings. --- hadoop-assemblies/pom.xml | 4 ++-- hadoop-build-tools/pom.xml | 4 ++-- hadoop-client-modules/hadoop-client-api/pom.xml | 4 ++-- .../hadoop-client-check-invariants/pom.xml | 4 ++-- .../hadoop-client-check-test-invariants/pom.xml | 4 ++-- .../hadoop-client-integration-tests/pom.xml | 4 ++-- .../hadoop-client-minicluster/pom.xml | 4 ++-- hadoop-client-modules/hadoop-client-runtime/pom.xml | 4 ++-- hadoop-client-modules/hadoop-client/pom.xml | 4 ++-- hadoop-client-modules/pom.xml | 4 ++-- .../hadoop-cloud-storage/pom.xml | 4 ++-- hadoop-cloud-storage-project/pom.xml | 4 ++-- hadoop-common-project/hadoop-annotations/pom.xml | 4 ++-- hadoop-common-project/hadoop-auth-examples/pom.xml | 4 ++-- hadoop-common-project/hadoop-auth/pom.xml | 4 ++-- hadoop-common-project/hadoop-common/pom.xml | 4 ++-- hadoop-common-project/hadoop-kms/pom.xml | 4 ++-- hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++-- hadoop-common-project/hadoop-nfs/pom.xml | 4 ++-- hadoop-common-project/hadoop-registry/pom.xml | 4 ++-- hadoop-common-project/pom.xml | 4 ++-- hadoop-dist/pom.xml | 4 ++-- hadoop-hdds/client/pom.xml | 6 +++--- hadoop-hdds/common/pom.xml | 4 ++-- hadoop-hdds/config/pom.xml | 4 ++-- hadoop-hdds/container-service/pom.xml | 4 ++-- hadoop-hdds/docs/pom.xml | 4 ++-- hadoop-hdds/framework/pom.xml | 4 ++-- hadoop-hdds/pom.xml | 4 ++-- hadoop-hdds/server-scm/pom.xml | 4 ++-- hadoop-hdds/tools/pom.xml | 4 ++-- hadoop-hdfs-project/hadoop-hdfs-client/pom.xml | 4 ++-- hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 4 ++-- hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml | 4 ++-- hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml | 4 ++-- hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml | 6 +++--- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 6 +++--- hadoop-hdfs-project/pom.xml | 4 ++-- .../hadoop-mapreduce-client-app/pom.xml | 4 ++-- .../hadoop-mapreduce-client-common/pom.xml | 4 ++-- .../hadoop-mapreduce-client-core/pom.xml | 4 ++-- .../hadoop-mapreduce-client-hs-plugins/pom.xml | 4 ++-- .../hadoop-mapreduce-client-hs/pom.xml | 4 ++-- .../hadoop-mapreduce-client-jobclient/pom.xml | 4 ++-- .../hadoop-mapreduce-client-nativetask/pom.xml | 4 ++-- .../hadoop-mapreduce-client-shuffle/pom.xml | 4 ++-- .../hadoop-mapreduce-client-uploader/pom.xml | 6 +++--- .../hadoop-mapreduce-client/pom.xml | 6 +++--- .../hadoop-mapreduce-examples/pom.xml | 4 ++-- hadoop-mapreduce-project/pom.xml | 6 +++--- hadoop-maven-plugins/pom.xml | 4 ++-- hadoop-minicluster/pom.xml | 4 ++-- hadoop-ozone/client/pom.xml | 6 +++--- hadoop-ozone/common/pom.xml | 4 ++-- hadoop-ozone/datanode/pom.xml | 4 ++-- hadoop-ozone/dist/pom.xml | 4 ++-- hadoop-ozone/integration-test/pom.xml | 4 ++-- hadoop-ozone/objectstore-service/pom.xml | 4 ++-- hadoop-ozone/ozone-manager/pom.xml | 4 ++-- hadoop-ozone/ozone-recon-codegen/pom.xml | 6 +++--- hadoop-ozone/ozone-recon/pom.xml | 6 +++--- hadoop-ozone/ozonefs-lib-current/pom.xml | 4 ++-- hadoop-ozone/ozonefs-lib-legacy/pom.xml | 4 ++-- hadoop-ozone/ozonefs/pom.xml | 4 ++-- hadoop-ozone/pom.xml | 4 ++-- hadoop-ozone/s3gateway/pom.xml | 4 ++-- hadoop-ozone/tools/pom.xml | 4 ++-- hadoop-ozone/upgrade/pom.xml | 4 ++-- hadoop-project-dist/pom.xml | 6 +++--- hadoop-project/pom.xml | 4 ++-- hadoop-submarine/hadoop-submarine-all/pom.xml | 4 ++-- hadoop-submarine/hadoop-submarine-core/pom.xml | 4 ++-- hadoop-submarine/hadoop-submarine-dist/pom.xml | 4 ++-- hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml | 4 ++-- .../hadoop-submarine-yarnservice-runtime/pom.xml | 4 ++-- hadoop-submarine/pom.xml | 4 ++-- hadoop-tools/hadoop-aliyun/pom.xml | 4 ++-- hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++-- hadoop-tools/hadoop-archives/pom.xml | 4 ++-- hadoop-tools/hadoop-aws/pom.xml | 4 ++-- hadoop-tools/hadoop-azure-datalake/pom.xml | 6 +++--- hadoop-tools/hadoop-azure/pom.xml | 6 +++--- hadoop-tools/hadoop-datajoin/pom.xml | 4 ++-- hadoop-tools/hadoop-distcp/pom.xml | 4 ++-- hadoop-tools/hadoop-extras/pom.xml | 4 ++-- hadoop-tools/hadoop-fs2img/pom.xml | 2 +- hadoop-tools/hadoop-gridmix/pom.xml | 4 ++-- hadoop-tools/hadoop-kafka/pom.xml | 4 ++-- hadoop-tools/hadoop-openstack/pom.xml | 4 ++-- hadoop-tools/hadoop-pipes/pom.xml | 4 ++-- hadoop-tools/hadoop-resourceestimator/pom.xml | 4 ++-- hadoop-tools/hadoop-rumen/pom.xml | 4 ++-- hadoop-tools/hadoop-sls/pom.xml | 4 ++-- hadoop-tools/hadoop-streaming/pom.xml | 4 ++-- hadoop-tools/hadoop-tools-dist/pom.xml | 4 ++-- hadoop-tools/pom.xml | 4 ++-- .../hadoop-yarn/hadoop-yarn-api/pom.xml | 4 ++-- .../hadoop-yarn-applications-catalog-docker/pom.xml | 6 +++--- .../hadoop-yarn-applications-catalog-webapp/pom.xml | 6 +++--- .../hadoop-yarn-applications-catalog/pom.xml | 6 +++--- .../hadoop-yarn-applications-distributedshell/pom.xml | 4 ++-- .../hadoop-yarn-applications-mawo-core/pom.xml | 6 +++--- .../hadoop-yarn-applications-mawo/pom.xml | 6 +++--- .../pom.xml | 4 ++-- .../hadoop-yarn-services-api/pom.xml | 4 ++-- .../hadoop-yarn-services-core/pom.xml | 4 ++-- .../hadoop-yarn-services/pom.xml | 4 ++-- .../hadoop-yarn/hadoop-yarn-applications/pom.xml | 4 ++-- .../hadoop-yarn/hadoop-yarn-client/pom.xml | 2 +- .../hadoop-yarn/hadoop-yarn-common/pom.xml | 4 ++-- .../hadoop-yarn/hadoop-yarn-csi/pom.xml | 4 ++-- .../hadoop-yarn/hadoop-yarn-registry/pom.xml | 4 ++-- .../pom.xml | 4 ++-- .../hadoop-yarn-server-common/pom.xml | 4 ++-- .../hadoop-yarn-server-nodemanager/pom.xml | 4 ++-- .../hadoop-yarn-server-resourcemanager/pom.xml | 4 ++-- .../hadoop-yarn-server-router/pom.xml | 4 ++-- .../hadoop-yarn-server-sharedcachemanager/pom.xml | 2 +- .../hadoop-yarn-server-tests/pom.xml | 4 ++-- .../hadoop-yarn-server-timeline-pluginstorage/pom.xml | 4 ++-- .../pom.xml | 4 ++-- .../pom.xml | 4 ++-- .../pom.xml | 4 ++-- .../pom.xml | 6 +++--- .../pom.xml | 6 +++--- .../pom.xml | 6 +++--- .../pom.xml | 6 +++--- .../hadoop-yarn-server-timelineservice-hbase/pom.xml | 6 +++--- .../hadoop-yarn-server-timelineservice/pom.xml | 4 ++-- .../hadoop-yarn-server-web-proxy/pom.xml | 4 ++-- .../hadoop-yarn/hadoop-yarn-server/pom.xml | 4 ++-- .../hadoop-yarn/hadoop-yarn-site/pom.xml | 4 ++-- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml | 4 ++-- hadoop-yarn-project/hadoop-yarn/pom.xml | 6 +++--- hadoop-yarn-project/pom.xml | 6 +++--- pom.xml | 10 +++++----- 136 files changed, 296 insertions(+), 296 deletions(-) diff --git a/hadoop-assemblies/pom.xml b/hadoop-assemblies/pom.xml index aabef34579609..2421ec12b3d76 100644 --- a/hadoop-assemblies/pom.xml +++ b/hadoop-assemblies/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-build-tools/pom.xml b/hadoop-build-tools/pom.xml index b94dd32038953..39abbc9412fdb 100644 --- a/hadoop-build-tools/pom.xml +++ b/hadoop-build-tools/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-main org.apache.hadoop diff --git a/hadoop-client-modules/hadoop-client-api/pom.xml b/hadoop-client-modules/hadoop-client-api/pom.xml index 9e4406d39d1b1..7aee190b00648 100644 --- a/hadoop-client-modules/hadoop-client-api/pom.xml +++ b/hadoop-client-modules/hadoop-client-api/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,7 @@ limitations under the License. See accompanying LICENSE file. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml index f0629867d5120..f2504d84067ce 100644 --- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml +++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,7 @@ limitations under the License. See accompanying LICENSE file. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml index c02ce5658dcc4..a4c56f4d41a87 100644 --- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml +++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,7 @@ limitations under the License. See accompanying LICENSE file. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml index 558372d5ae42b..1a72df470c914 100644 --- a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml +++ b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,7 @@ limitations under the License. See accompanying LICENSE file. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml index 9007462771486..4589c2d55977b 100644 --- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml +++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,7 @@ limitations under the License. See accompanying LICENSE file. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml index 1e98a61a94c74..fe7fe40ac2fc3 100644 --- a/hadoop-client-modules/hadoop-client-runtime/pom.xml +++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,7 @@ limitations under the License. See accompanying LICENSE file. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-client-modules/hadoop-client/pom.xml b/hadoop-client-modules/hadoop-client/pom.xml index 86b23fe82f4b8..6d2daa81ebc4c 100644 --- a/hadoop-client-modules/hadoop-client/pom.xml +++ b/hadoop-client-modules/hadoop-client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,7 @@ limitations under the License. See accompanying LICENSE file. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-client-modules/pom.xml b/hadoop-client-modules/pom.xml index 3273240a730f2..912fde91cc42a 100644 --- a/hadoop-client-modules/pom.xml +++ b/hadoop-client-modules/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,7 @@ limitations under the License. See accompanying LICENSE file. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml index 16e9582434eac..dc4161ec1d31e 100644 --- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,7 @@ limitations under the License. See accompanying LICENSE file. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-cloud-storage-project/pom.xml b/hadoop-cloud-storage-project/pom.xml index 50dd70ef6169a..ea899cf01f43a 100644 --- a/hadoop-cloud-storage-project/pom.xml +++ b/hadoop-cloud-storage-project/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml index d8373c801f5c6..5d64e83fad643 100644 --- a/hadoop-common-project/hadoop-annotations/pom.xml +++ b/hadoop-common-project/hadoop-annotations/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml index a105885654a1a..7bb0bcf2e5483 100644 --- a/hadoop-common-project/hadoop-auth-examples/pom.xml +++ b/hadoop-common-project/hadoop-auth-examples/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index d9ea33dfb2189..eba5c650998f3 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 54efeeb0602b6..fc089eb012a7c 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index 0eed09a256fa4..31062c5c0aaa3 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml index f1566f1f1c69b..8323ae440aa33 100644 --- a/hadoop-common-project/hadoop-minikdc/pom.xml +++ b/hadoop-common-project/hadoop-minikdc/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-project diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml index fb3fe68ba9d42..62fa0f7594d32 100644 --- a/hadoop-common-project/hadoop-nfs/pom.xml +++ b/hadoop-common-project/hadoop-nfs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-common-project/hadoop-registry/pom.xml b/hadoop-common-project/hadoop-registry/pom.xml index 7ca1c9e7a8a8c..d0c17cfc96b8d 100644 --- a/hadoop-common-project/hadoop-registry/pom.xml +++ b/hadoop-common-project/hadoop-registry/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-project org.apache.hadoop diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml index e96d1ba9e1608..03c051aae6226 100644 --- a/hadoop-common-project/pom.xml +++ b/hadoop-common-project/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index 1d3db65930845..e2a9b678989bd 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index 040332e504ddf..d996a9fc90834 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop @@ -41,4 +41,4 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> - \ No newline at end of file + diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 51560ca3af92d..8f9432c58fb0a 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml index 075f587a35046..7b79e50e6f812 100644 --- a/hadoop-hdds/config/pom.xml +++ b/hadoop-hdds/config/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index c74d68690e360..f402dfd7feddd 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml index edb4665091ea8..e8ef9fc351998 100644 --- a/hadoop-hdds/docs/pom.xml +++ b/hadoop-hdds/docs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index c107eebcd6b19..05c92606b3b8d 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index c063cbf5db9ea..6a7cb611deea3 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 5b3a2af927b3f..c9afd0c53c436 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index 689bca7a771f1..44d20b9e1c716 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml index 2b18885fc42c4..678ababb360ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index 76e9fb01b1dd9..60700b5ac0f94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml index 0482071fcfdff..481066ed9c126 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index bffa54986dcf7..3f2d9b7f90a4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index 6886f00705b67..014dceeaed2bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop @@ -234,4 +234,4 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> - \ No newline at end of file + diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 0d4d6103bcda7..9fdb9312a311f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop @@ -475,7 +475,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> - + diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml index b26ced0a3073a..af27cf8e6fb4d 100644 --- a/hadoop-hdfs-project/pom.xml +++ b/hadoop-hdfs-project/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml index 2d7ea982ac8eb..94cba6edc8103 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-mapreduce-client org.apache.hadoop diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml index 77fc45edfd1bd..924190d47b59b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-mapreduce-client org.apache.hadoop diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml index edd2b242f1e18..e368b0de9eb34 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-mapreduce-client org.apache.hadoop diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml index 1a64912da5f14..331dcd694351f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-mapreduce-client org.apache.hadoop diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml index b371750546114..c75c866b150eb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-mapreduce-client org.apache.hadoop diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml index b956fd4fa1874..246c9f363cb61 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-mapreduce-client org.apache.hadoop diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml index 4689e198f187a..87938730364d4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-mapreduce-client org.apache.hadoop diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml index a68c166e4013e..6ef6bafdd5802 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-mapreduce-client org.apache.hadoop diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml index 5c9a24fe5a230..eb7ea8b0b7eb8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-mapreduce-client org.apache.hadoop @@ -70,4 +70,4 @@ - \ No newline at end of file + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index 08b945728f6ea..ed2a503a38f9a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop @@ -291,7 +291,7 @@ + value="https://hadoop.apache.org/docs/r${jdiff.stable.api}/api/"/> diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml index 39c50e965c250..ae40c04cc75c9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index 130c027ec863c..7742b7c51fa67 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,7 @@ limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.hadoop @@ -25,7 +25,7 @@ 3.3.0-SNAPSHOT pom Apache Hadoop MapReduce - http://hadoop.apache.org/ + https://hadoop.apache.org/ true diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml index 3c8b0eab25dcf..401c26b043e1b 100644 --- a/hadoop-maven-plugins/pom.xml +++ b/hadoop-maven-plugins/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-minicluster/pom.xml b/hadoop-minicluster/pom.xml index a138f87b4f6fd..eb7746444fd80 100644 --- a/hadoop-minicluster/pom.xml +++ b/hadoop-minicluster/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,7 @@ limitations under the License. See accompanying LICENSE file. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index b6f6fb3954da9..4fb54054d2d6a 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop @@ -39,4 +39,4 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> test - \ No newline at end of file + diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index 6098b4d7b895a..50f89b1e51291 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index c76b9c56b2318..b5b4f1d59fae5 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index e90efcaf9a972..51e512f461d48 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index cec8d62414538..c8d8143af41a9 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-ozone/objectstore-service/pom.xml b/hadoop-ozone/objectstore-service/pom.xml index c543387d1d6fd..f81154aa21ddd 100644 --- a/hadoop-ozone/objectstore-service/pom.xml +++ b/hadoop-ozone/objectstore-service/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index aaee18dcd2ad9..23663ce0760c1 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-ozone/ozone-recon-codegen/pom.xml b/hadoop-ozone/ozone-recon-codegen/pom.xml index 4f86f3247887f..0e36e5ef001cd 100644 --- a/hadoop-ozone/ozone-recon-codegen/pom.xml +++ b/hadoop-ozone/ozone-recon-codegen/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-ozone org.apache.hadoop @@ -67,4 +67,4 @@ ${guice.version} - \ No newline at end of file + diff --git a/hadoop-ozone/ozone-recon/pom.xml b/hadoop-ozone/ozone-recon/pom.xml index e84c2dc2e06f5..791e76e1e1016 100644 --- a/hadoop-ozone/ozone-recon/pom.xml +++ b/hadoop-ozone/ozone-recon/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-ozone org.apache.hadoop @@ -324,4 +324,4 @@ 1.1.1 - \ No newline at end of file + diff --git a/hadoop-ozone/ozonefs-lib-current/pom.xml b/hadoop-ozone/ozonefs-lib-current/pom.xml index 834d6ea41af31..ad642ce1670ad 100644 --- a/hadoop-ozone/ozonefs-lib-current/pom.xml +++ b/hadoop-ozone/ozonefs-lib-current/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-ozone/ozonefs-lib-legacy/pom.xml b/hadoop-ozone/ozonefs-lib-legacy/pom.xml index 03d7d6c4e29c8..1cc01882c2495 100644 --- a/hadoop-ozone/ozonefs-lib-legacy/pom.xml +++ b/hadoop-ozone/ozonefs-lib-legacy/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml index a249318ab12c6..19443bd83ec02 100644 --- a/hadoop-ozone/ozonefs/pom.xml +++ b/hadoop-ozone/ozonefs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 050d0a665adaa..aefdd9fc8e013 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -4,14 +4,14 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. ---> +--> 4.0.0 org.apache.hadoop diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index b4df2b88d0640..658cee2ea005d 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index cc3af7cddf330..5b10389b5f127 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-ozone/upgrade/pom.xml b/hadoop-ozone/upgrade/pom.xml index 072f2b18c39c0..a4763442d86aa 100644 --- a/hadoop-ozone/upgrade/pom.xml +++ b/hadoop-ozone/upgrade/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ +https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml index ca2c59771f75a..b845f9cd10ad4 100644 --- a/hadoop-project-dist/pom.xml +++ b/hadoop-project-dist/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop @@ -271,7 +271,7 @@ + value="https://hadoop.apache.org/docs/r${jdiff.stable.api}/api/"/> diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 9ecbb4abfa730..f442f5b356a6f 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-submarine/hadoop-submarine-all/pom.xml b/hadoop-submarine/hadoop-submarine-all/pom.xml index e2d2e172d07c6..342fa5e47beee 100644 --- a/hadoop-submarine/hadoop-submarine-all/pom.xml +++ b/hadoop-submarine/hadoop-submarine-all/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 hadoop-submarine diff --git a/hadoop-submarine/hadoop-submarine-core/pom.xml b/hadoop-submarine/hadoop-submarine-core/pom.xml index 3b267fa436d17..47a4a64f9758b 100644 --- a/hadoop-submarine/hadoop-submarine-core/pom.xml +++ b/hadoop-submarine/hadoop-submarine-core/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 hadoop-submarine diff --git a/hadoop-submarine/hadoop-submarine-dist/pom.xml b/hadoop-submarine/hadoop-submarine-dist/pom.xml index 423fd0b6bcdbc..8bcd510bdf3d7 100644 --- a/hadoop-submarine/hadoop-submarine-dist/pom.xml +++ b/hadoop-submarine/hadoop-submarine-dist/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 hadoop-submarine diff --git a/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml b/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml index a0042018aaeba..d449a6075e8e7 100644 --- a/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml +++ b/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-submarine org.apache.hadoop diff --git a/hadoop-submarine/hadoop-submarine-yarnservice-runtime/pom.xml b/hadoop-submarine/hadoop-submarine-yarnservice-runtime/pom.xml index 3609b5ac9c7a8..af4a57f9eb706 100644 --- a/hadoop-submarine/hadoop-submarine-yarnservice-runtime/pom.xml +++ b/hadoop-submarine/hadoop-submarine-yarnservice-runtime/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 hadoop-submarine diff --git a/hadoop-submarine/pom.xml b/hadoop-submarine/pom.xml index 951e9d3b12f71..f09be42cd10b2 100644 --- a/hadoop-submarine/pom.xml +++ b/hadoop-submarine/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-aliyun/pom.xml b/hadoop-tools/hadoop-aliyun/pom.xml index eefdbf9c389ba..d09af89873b4f 100644 --- a/hadoop-tools/hadoop-aliyun/pom.xml +++ b/hadoop-tools/hadoop-aliyun/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,7 @@ limitations under the License. See accompanying LICENSE file. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/maven-v4_0_0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-archive-logs/pom.xml b/hadoop-tools/hadoop-archive-logs/pom.xml index 54de9d396aa66..c6325f3a396a5 100644 --- a/hadoop-tools/hadoop-archive-logs/pom.xml +++ b/hadoop-tools/hadoop-archive-logs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-archives/pom.xml b/hadoop-tools/hadoop-archives/pom.xml index 5a0c285e81a7f..87b34c5e5cbfb 100644 --- a/hadoop-tools/hadoop-archives/pom.xml +++ b/hadoop-tools/hadoop-archives/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index 016ce40ccc771..b2e4a6b639100 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml b/hadoop-tools/hadoop-azure-datalake/pom.xml index 92a8025b7b8c9..8515360234e28 100644 --- a/hadoop-tools/hadoop-azure-datalake/pom.xml +++ b/hadoop-tools/hadoop-azure-datalake/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop @@ -71,7 +71,7 @@ diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index 3ae82545f2c75..a79e0ddc87e81 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop @@ -108,7 +108,7 @@ diff --git a/hadoop-tools/hadoop-datajoin/pom.xml b/hadoop-tools/hadoop-datajoin/pom.xml index 19c6786d3d487..2a14da144f5e8 100644 --- a/hadoop-tools/hadoop-datajoin/pom.xml +++ b/hadoop-tools/hadoop-datajoin/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml index 5b73bf0a6ef6a..593d2fd5245ae 100644 --- a/hadoop-tools/hadoop-distcp/pom.xml +++ b/hadoop-tools/hadoop-distcp/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-extras/pom.xml b/hadoop-tools/hadoop-extras/pom.xml index fe9f41cb4601f..ba2c3a6859fc2 100644 --- a/hadoop-tools/hadoop-extras/pom.xml +++ b/hadoop-tools/hadoop-extras/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-fs2img/pom.xml b/hadoop-tools/hadoop-fs2img/pom.xml index feda6500189f2..35c1e1d3e4196 100644 --- a/hadoop-tools/hadoop-fs2img/pom.xml +++ b/hadoop-tools/hadoop-fs2img/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-gridmix/pom.xml b/hadoop-tools/hadoop-gridmix/pom.xml index de6736861ec56..7a70f2ea01b6c 100644 --- a/hadoop-tools/hadoop-gridmix/pom.xml +++ b/hadoop-tools/hadoop-gridmix/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-kafka/pom.xml b/hadoop-tools/hadoop-kafka/pom.xml index a707aab0cb64e..65bc9c22b107d 100644 --- a/hadoop-tools/hadoop-kafka/pom.xml +++ b/hadoop-tools/hadoop-kafka/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-openstack/pom.xml b/hadoop-tools/hadoop-openstack/pom.xml index 466b01b6a6231..7c624f20e18a4 100644 --- a/hadoop-tools/hadoop-openstack/pom.xml +++ b/hadoop-tools/hadoop-openstack/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-pipes/pom.xml b/hadoop-tools/hadoop-pipes/pom.xml index 2ce1a9b123e5b..3bc8a7bb331a7 100644 --- a/hadoop-tools/hadoop-pipes/pom.xml +++ b/hadoop-tools/hadoop-pipes/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-resourceestimator/pom.xml b/hadoop-tools/hadoop-resourceestimator/pom.xml index 2c0c091b7b0a5..463b087f2e3c8 100644 --- a/hadoop-tools/hadoop-resourceestimator/pom.xml +++ b/hadoop-tools/hadoop-resourceestimator/pom.xml @@ -9,7 +9,7 @@ ~ "License"); you may not use this file except in compliance ~ with the License. You may obtain a copy of the License at ~ - ~ http://www.apache.org/licenses/LICENSE-2.0 + ~ https://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-rumen/pom.xml b/hadoop-tools/hadoop-rumen/pom.xml index 15bbae7bbed1d..824a87efa77be 100644 --- a/hadoop-tools/hadoop-rumen/pom.xml +++ b/hadoop-tools/hadoop-rumen/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml index d98d784c24432..95f09467d8a19 100644 --- a/hadoop-tools/hadoop-sls/pom.xml +++ b/hadoop-tools/hadoop-sls/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml index 303ada6f4df74..7949b9d590b94 100644 --- a/hadoop-tools/hadoop-streaming/pom.xml +++ b/hadoop-tools/hadoop-streaming/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml index 0deae88f5bdea..8b88a54bd8778 100644 --- a/hadoop-tools/hadoop-tools-dist/pom.xml +++ b/hadoop-tools/hadoop-tools-dist/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index 655b5a5d5c03b..33bcc60ef9a33 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml index 092f85da4a011..22076a3bc1ff7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml index 15d8508c4aea9..03a608d044d85 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml @@ -6,7 +6,7 @@ The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,7 +14,7 @@ limitations under the License. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/maven-v4_0_0.xsd"> 4.0.0 org.apache.hadoop hadoop-yarn-applications-catalog-docker @@ -27,7 +27,7 @@ Apache Hadoop YARN Application Catalog Docker Image - http://maven.apache.org + https://maven.apache.org org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml index 9915422d364de..2dd70701b451e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml @@ -6,7 +6,7 @@ The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,7 +14,7 @@ limitations under the License. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/maven-v4_0_0.xsd"> 4.0.0 org.apache.hadoop hadoop-yarn-applications-catalog-webapp @@ -28,7 +28,7 @@ Apache Hadoop YARN Application Catalog Webapp - http://hadoop.apache.org + https://hadoop.apache.org app diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml index da395b84a913a..d279c169e46e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml @@ -6,7 +6,7 @@ The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,7 +14,7 @@ limitations under the License. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/maven-v4_0_0.xsd"> 4.0.0 hadoop-yarn-applications @@ -28,7 +28,7 @@ Apache Hadoop YARN Application Catalog - http://hadoop.apache.org + https://hadoop.apache.org hadoop-yarn-applications-catalog-webapp diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml index d64ad0db1c813..769af490a07d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-applications org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml index 02e0fdce70fae..cf63740eb76f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml @@ -2,7 +2,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -11,7 +11,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-applications-mawo org.apache.hadoop.applications.mawo @@ -24,7 +24,7 @@ jar Apache Hadoop YARN Application MaWo Core - http://maven.apache.org + https://maven.apache.org UTF-8 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml index 4b710226c2fed..d56c19ecdc032 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml @@ -2,7 +2,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -11,7 +11,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-applications org.apache.hadoop @@ -24,7 +24,7 @@ pom Apache Hadoop YARN Application MaWo - http://maven.apache.org + https://maven.apache.org UTF-8 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml index 4a12a91b40184..f1ab46bccfa63 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-applications org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml index 82c41efb7e56f..a33a72e8d0131 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml @@ -6,7 +6,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml index fea9b92c9d88a..de7bf00816d2d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml @@ -6,7 +6,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml index 5b5026697a631..a637c2c82df9b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-applications org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml index 17a5b495fc60d..1bc6685eada8b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml index 81ff752e98a9d..b4870924961a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index c28c29792761d..d8196a1ccaa25 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml index aa03cbf24a827..6389efd5a2fd8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml index b7c63023ae970..40bb84728a497 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml index 38e8cf130bb9f..d26e2636eb951 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml index 7377cd7cad7bf..ed06300f08889 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml index 1a86a9af12e51..eae8a68c4df0a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml index 256e14272f5cd..2ed8292e91163 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml index a6291f90ff99d..716d7edf4cc21 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml index 7788fac509aa7..441c7ffb9d714 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml index da240b34dc3a8..e6c5f4c2a7c58 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml @@ -3,7 +3,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 hadoop-yarn-server diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml index 105d59c54dd59..1e41c3b5b0069 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml index be668e2312f18..bed6aa0e7fba1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml index 43021d38ad562..a93a007a0847f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml index 4225519a55e98..3a2c6f856efee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server-timelineservice-hbase org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml index 81a8f4efce10d..7d42823c6267e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server-timelineservice-hbase org.apache.hadoop @@ -129,4 +129,4 @@ - \ No newline at end of file + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml index 771580b186f6e..4833d95e2744c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server-timelineservice-hbase-server org.apache.hadoop @@ -162,4 +162,4 @@ - \ No newline at end of file + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml index 984cac911c8e4..0dde629196fe6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server-timelineservice-hbase-server org.apache.hadoop @@ -181,4 +181,4 @@ - \ No newline at end of file + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml index 816bf37e77f47..1763c14b2d104 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server-timelineservice-hbase org.apache.hadoop @@ -82,4 +82,4 @@ - \ No newline at end of file + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml index b3f974a6939fd..ed67661fc55c3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server org.apache.hadoop @@ -38,4 +38,4 @@ - \ No newline at end of file + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml index 7293a0699d234..c0d5be2d70d7a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml index 17715a55748c2..be8e70e2a080d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn-server org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml index 08551058afff8..3eec5628b228a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml index 6a7523523cfec..32d70703b3dec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ + https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-yarn org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml index b76132d68ce8e..dabbd34c15040 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml @@ -6,7 +6,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +16,7 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/maven-v4_0_0.xsd"> hadoop-yarn org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index a7b699720ef52..d7b28e5f9bc62 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -4,14 +4,14 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. ---> +--> 4.0.0 org.apache.hadoop @@ -214,7 +214,7 @@ + value="https://hadoop.apache.org/docs/r${jdiff.stable.api}/api/"/> diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml index 0882027565917..ecbd65b2da3ef 100644 --- a/hadoop-yarn-project/pom.xml +++ b/hadoop-yarn-project/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,7 @@ limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.hadoop @@ -25,7 +25,7 @@ 3.3.0-SNAPSHOT pom Apache Hadoop YARN Project - http://hadoop.apache.org/yarn/ + https://hadoop.apache.org/yarn/ true diff --git a/pom.xml b/pom.xml index d5b85956a799f..0ad0cefb9be06 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ --> +xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.hadoop hadoop-main @@ -59,7 +59,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs repository.jboss.org - http://repository.jboss.org/nexus/content/groups/public/ + https://repository.jboss.org/nexus/content/groups/public/ false @@ -69,13 +69,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt + https://www.apache.org/licenses/LICENSE-2.0.txt Apache Software Foundation - http://www.apache.org + https://www.apache.org From a3745c59a380bc95dc853a55d1d5a61593cdaf1d Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Mon, 27 May 2019 10:17:47 +0200 Subject: [PATCH 0040/1308] HDDS-1588. Update Ozone dist image to use jdk11. Contributed by Eric Yang. --- hadoop-ozone/dist/src/main/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/dist/src/main/Dockerfile b/hadoop-ozone/dist/src/main/Dockerfile index 9c14f498bef5e..5c65e433a263a 100644 --- a/hadoop-ozone/dist/src/main/Dockerfile +++ b/hadoop-ozone/dist/src/main/Dockerfile @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM apache/hadoop-runner:latest +FROM apache/hadoop-runner:jdk11 ADD --chown=hadoop . /opt/hadoop From f0e44b3a3fa20b0be5b1e1c2bae7b5a8b73f4828 Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Mon, 27 May 2019 16:26:21 +0530 Subject: [PATCH 0041/1308] HDDS-1584. Fix TestFailureHandlingByClient tests. Contributed by Shashikant Banerjee (#845). --- .../rpc/TestFailureHandlingByClient.java | 67 ++++++++++--------- 1 file changed, 37 insertions(+), 30 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index 4bd4506b55f9d..d95807689b7f8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.ContainerTestHelper; @@ -38,7 +39,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.junit.Assert; -import org.junit.Ignore; import org.junit.Test; import java.io.IOException; @@ -52,9 +52,7 @@ /** * Tests Close Container Exception handling by Ozone Client. - * XXX Disabled [HDDS-1323] */ -@Ignore public class TestFailureHandlingByClient { private MiniOzoneCluster cluster; @@ -214,12 +212,17 @@ public void testMultiBlockWritesWithIntermittentDnFailures() // get the name of a valid container Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream groupOutputStream = + KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - groupOutputStream.getLocationInfoList(); - Assert.assertTrue(locationInfoList.size() == 6); - long containerId = locationInfoList.get(1).getContainerID(); + List streamEntryList = + keyOutputStream.getStreamEntries(); + + // Assert that 6 block will be preallocated + Assert.assertEquals(6, streamEntryList.size()); + key.write(data.getBytes()); + key.flush(); + long containerId = streamEntryList.get(0).getBlockID().getContainerID(); + BlockID blockId = streamEntryList.get(0).getBlockID(); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() .getContainer(ContainerID.valueof(containerId)); @@ -243,8 +246,9 @@ public void testMultiBlockWritesWithIntermittentDnFailures() .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(3 * data.getBytes().length, keyInfo.getDataSize()); - validateData(keyName, data.concat(data).concat(data).getBytes()); + Assert.assertEquals(4 * data.getBytes().length, keyInfo.getDataSize()); + validateData(keyName, + data.concat(data).concat(data).concat(data).getBytes()); shutdown(); } @@ -309,15 +313,15 @@ public void testContainerExclusionWithClosedContainerException() Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - keyOutputStream.getLocationInfoList(); + List streamEntryList = + keyOutputStream.getStreamEntries(); // Assert that 1 block will be preallocated - Assert.assertEquals(1, locationInfoList.size()); + Assert.assertEquals(1, streamEntryList.size()); key.write(data.getBytes()); key.flush(); - long containerId = locationInfoList.get(0).getContainerID(); - BlockID blockId = locationInfoList.get(0).getBlockID(); + long containerId = streamEntryList.get(0).getBlockID().getContainerID(); + BlockID blockId = streamEntryList.get(0).getBlockID(); List containerIdList = new ArrayList<>(); containerIdList.add(containerId); @@ -368,15 +372,15 @@ public void testDatanodeExclusionWithMajorityCommit() throws Exception { Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - keyOutputStream.getLocationInfoList(); + List streamEntryList = + keyOutputStream.getStreamEntries(); // Assert that 1 block will be preallocated - Assert.assertEquals(1, locationInfoList.size()); + Assert.assertEquals(1, streamEntryList.size()); key.write(data.getBytes()); key.flush(); - long containerId = locationInfoList.get(0).getContainerID(); - BlockID blockId = locationInfoList.get(0).getBlockID(); + long containerId = streamEntryList.get(0).getBlockID().getContainerID(); + BlockID blockId = streamEntryList.get(0).getBlockID(); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() .getContainer(ContainerID.valueof(containerId)); @@ -391,14 +395,16 @@ public void testDatanodeExclusionWithMajorityCommit() throws Exception { key.write(data.getBytes()); key.write(data.getBytes()); - // The close will just write to the buffer - key.close(); + key.flush(); + Assert.assertTrue(keyOutputStream.getExcludeList().getDatanodes() .contains(datanodes.get(0))); Assert.assertTrue( keyOutputStream.getExcludeList().getContainerIds().isEmpty()); Assert.assertTrue( keyOutputStream.getExcludeList().getPipelineIds().isEmpty()); + // The close will just write to the buffer + key.close(); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) @@ -430,15 +436,15 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - keyOutputStream.getLocationInfoList(); + List streamEntryList = + keyOutputStream.getStreamEntries(); // Assert that 1 block will be preallocated - Assert.assertEquals(1, locationInfoList.size()); + Assert.assertEquals(1, streamEntryList.size()); key.write(data.getBytes()); key.flush(); - long containerId = locationInfoList.get(0).getContainerID(); - BlockID blockId = locationInfoList.get(0).getBlockID(); + long containerId = streamEntryList.get(0).getBlockID().getContainerID(); + BlockID blockId = streamEntryList.get(0).getBlockID(); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() .getContainer(ContainerID.valueof(containerId)); @@ -447,21 +453,22 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { .getPipeline(container.getPipelineID()); List datanodes = pipeline.getNodes(); - // Two nodes, next write will hit AlraedyClosedException , the pipeline + // Two nodes, next write will hit AlreadyClosedException , the pipeline // will be added in the exclude list cluster.shutdownHddsDatanode(datanodes.get(0)); cluster.shutdownHddsDatanode(datanodes.get(1)); key.write(data.getBytes()); key.write(data.getBytes()); - // The close will just write to the buffer - key.close(); + key.flush(); Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds() .contains(pipeline.getId())); Assert.assertTrue( keyOutputStream.getExcludeList().getContainerIds().isEmpty()); Assert.assertTrue( keyOutputStream.getExcludeList().getDatanodes().isEmpty()); + // The close will just write to the buffer + key.close(); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) From 83549dbbea4f79a51b1289590f10f43794b09c17 Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Mon, 27 May 2019 16:31:44 +0530 Subject: [PATCH 0042/1308] HDDS-1509. TestBlockOutputStreamWithFailures#test2DatanodesFailure fails intermittently. Contributed by Shashikant Banerjee (#805). --- .../ozone/client/io/KeyOutputStream.java | 25 +++++++---- .../TestBlockOutputStreamWithFailures.java | 42 +++++++++++++------ 2 files changed, 48 insertions(+), 19 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java index 00c4d0271597c..3a92cf475a174 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; @@ -37,6 +38,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.ratis.protocol.AlreadyClosedException; import org.apache.ratis.protocol.GroupMismatchException; +import org.apache.ratis.protocol.NotReplicatedException; import org.apache.ratis.protocol.RaftRetryFailureException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -259,15 +261,24 @@ private void handleException(BlockOutputStreamEntry streamEntry, if (!retryFailure) { closedContainerException = checkIfContainerIsClosed(t); } - PipelineID pipelineId = null; + Pipeline pipeline = streamEntry.getPipeline(); + PipelineID pipelineId = pipeline.getId(); long totalSuccessfulFlushedData = streamEntry.getTotalAckDataLength(); //set the correct length for the current stream streamEntry.setCurrentPosition(totalSuccessfulFlushedData); long bufferedDataLen = blockOutputStreamEntryPool.computeBufferData(); - LOG.debug( - "Encountered exception {}. The last committed block length is {}, " - + "uncommitted data length is {} retry count {}", exception, - totalSuccessfulFlushedData, bufferedDataLen, retryCount); + if (closedContainerException) { + LOG.debug( + "Encountered exception {}. The last committed block length is {}, " + + "uncommitted data length is {} retry count {}", exception, + totalSuccessfulFlushedData, bufferedDataLen, retryCount); + } else { + LOG.warn( + "Encountered exception {} on the pipeline {}. " + + "The last committed block length is {}, " + + "uncommitted data length is {} retry count {}", exception, + pipeline, totalSuccessfulFlushedData, bufferedDataLen, retryCount); + } Preconditions.checkArgument( bufferedDataLen <= blockOutputStreamEntryPool.getStreamBufferMaxSize()); Preconditions.checkArgument( @@ -282,8 +293,8 @@ private void handleException(BlockOutputStreamEntry streamEntry, if (closedContainerException) { excludeList.addConatinerId(ContainerID.valueof(containerId)); } else if (retryFailure || t instanceof TimeoutException - || t instanceof GroupMismatchException) { - pipelineId = streamEntry.getPipeline().getId(); + || t instanceof GroupMismatchException + || t instanceof NotReplicatedException) { excludeList.addPipeline(pipelineId); } // just clean up the current stream. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java index dfccb98ea0215..7a69e273821f6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java @@ -36,6 +36,7 @@ import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.ContainerTestHelper; +import org.apache.ratis.protocol.GroupMismatchException; import org.apache.ratis.protocol.RaftRetryFailureException; import org.junit.After; import org.junit.Assert; @@ -75,7 +76,8 @@ public class TestBlockOutputStreamWithFailures { * * @throws IOException */ - @Before public void init() throws Exception { + @Before + public void init() throws Exception { chunkSize = 100; flushSize = 2 * chunkSize; maxFlushSize = 2 * flushSize; @@ -110,13 +112,15 @@ private String getKeyName() { /** * Shutdown MiniDFSCluster. */ - @After public void shutdown() { + @After + public void shutdown() { if (cluster != null) { cluster.shutdown(); } } - @Test public void testWatchForCommitWithCloseContainerException() + @Test + public void testWatchForCommitWithCloseContainerException() throws Exception { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); @@ -256,7 +260,8 @@ private String getKeyName() { validateData(keyName, dataString.concat(dataString).getBytes()); } - @Test public void testWatchForCommitDatanodeFailure() throws Exception { + @Test + public void testWatchForCommitDatanodeFailure() throws Exception { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); long writeChunkCount = @@ -388,7 +393,8 @@ private String getKeyName() { validateData(keyName, dataString.concat(dataString).getBytes()); } - @Test public void test2DatanodesFailure() throws Exception { + @Test + public void test2DatanodesFailure() throws Exception { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); long writeChunkCount = @@ -494,8 +500,15 @@ private String getKeyName() { // rewritten plus one partial chunk plus two putBlocks for flushSize // and one flush for partial chunk key.flush(); - Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream - .getIoException()) instanceof RaftRetryFailureException); + + // Since, 2 datanodes went down, if the pipeline gets destroyed quickly, + // it will hit GroupMismatchException else, it will fail with + // RaftRetryFailureException + Assert.assertTrue((HddsClientUtils. + checkForException(blockOutputStream + .getIoException()) instanceof RaftRetryFailureException) + || HddsClientUtils.checkForException( + blockOutputStream.getIoException()) instanceof GroupMismatchException); // Make sure the retryCount is reset after the exception is handled Assert.assertTrue(keyOutputStream.getRetryCount() == 0); // now close the stream, It will update the ack length after watchForCommit @@ -524,7 +537,8 @@ private String getKeyName() { validateData(keyName, data1); } - @Test public void testFailureWithPrimeSizedData() throws Exception { + @Test + public void testFailureWithPrimeSizedData() throws Exception { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); long writeChunkCount = @@ -644,7 +658,8 @@ private String getKeyName() { validateData(keyName, dataString.concat(dataString).getBytes()); } - @Test public void testExceptionDuringClose() throws Exception { + @Test + public void testExceptionDuringClose() throws Exception { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); long writeChunkCount = @@ -758,7 +773,8 @@ private String getKeyName() { validateData(keyName, dataString.concat(dataString).getBytes()); } - @Test public void testWatchForCommitWithSingleNodeRatis() throws Exception { + @Test + public void testWatchForCommitWithSingleNodeRatis() throws Exception { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); long writeChunkCount = @@ -898,7 +914,8 @@ private String getKeyName() { validateData(keyName, dataString.concat(dataString).getBytes()); } - @Test public void testDatanodeFailureWithSingleNodeRatis() throws Exception { + @Test + public void testDatanodeFailureWithSingleNodeRatis() throws Exception { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); long writeChunkCount = @@ -1037,7 +1054,8 @@ private String getKeyName() { validateData(keyName, dataString.concat(dataString).getBytes()); } - @Test public void testDatanodeFailureWithPreAllocation() throws Exception { + @Test + public void testDatanodeFailureWithPreAllocation() throws Exception { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); long writeChunkCount = From ec92ca6575e0074ed4983fa8b34324bdbeb23499 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Mon, 27 May 2019 23:40:51 +0200 Subject: [PATCH 0043/1308] HDDS-1598. Fix Ozone checkstyle issues on trunk. Contributed by Elek, Marton. (#854) --- .../placement/algorithms/SCMContainerPlacementRackAware.java | 4 ++-- .../algorithms/TestSCMContainerPlacementRackAware.java | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index 3758b858886f9..ffebb84c4a7a0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -51,8 +51,8 @@ public final class SCMContainerPlacementRackAware extends SCMCommonPolicy { LoggerFactory.getLogger(SCMContainerPlacementRackAware.class); private final NetworkTopology networkTopology; private boolean fallback; - private int RACK_LEVEL = 1; - private int MAX_RETRY= 3; + private static final int RACK_LEVEL = 1; + private static final int MAX_RETRY= 3; /** * Constructs a Container Placement with rack awareness. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index d80c7e53162ee..732178e499c07 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -53,7 +53,7 @@ public class TestSCMContainerPlacementRackAware { // policy prohibit fallback private SCMContainerPlacementRackAware policyNoFallback; // node storage capacity - private final long STORAGE_CAPACITY = 100L; + private static final long STORAGE_CAPACITY = 100L; @Before public void setup() { From b70d1be685c5f9d08ab39f9ea73fc0561e037c74 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Mon, 27 May 2019 17:52:09 -0700 Subject: [PATCH 0044/1308] HDDS-1559. Fix TestReplicationManager. Contributed by Bharat Viswanadham. (#856) --- .../hadoop/hdds/scm/container/TestReplicationManager.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java index bc921e3ce55eb..35fd1088124c2 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java @@ -96,10 +96,10 @@ public void setup() throws IOException, InterruptedException { containerPlacementPolicy = Mockito.mock(ContainerPlacementPolicy.class); Mockito.when(containerPlacementPolicy.chooseDatanodes( - Mockito.anyListOf(DatanodeDetails.class), null, - Mockito.anyInt(), Mockito.anyLong())) - .thenAnswer(invocation -> { - int count = (int) invocation.getArguments()[1]; + Mockito.anyListOf(DatanodeDetails.class), + Mockito.anyListOf(DatanodeDetails.class), Mockito.anyInt(), + Mockito.anyLong())).thenAnswer(invocation -> { + int count = (int) invocation.getArguments()[2]; return IntStream.range(0, count) .mapToObj(i -> randomDatanodeDetails()) .collect(Collectors.toList()); From 72dd79015a00d29015bec30f1bfc7ededab6a2b1 Mon Sep 17 00:00:00 2001 From: Mukul Kumar Singh Date: Tue, 28 May 2019 07:52:56 +0530 Subject: [PATCH 0045/1308] HDDS-1534. freon should return non-zero exit code on failure. Contributed by Nilotpal Nandi. --- .../hadoop/ozone/freon/RandomKeyGenerator.java | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java index a255342679e98..b0461cb1084d0 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java @@ -107,7 +107,7 @@ enum FreonOps { LoggerFactory.getLogger(RandomKeyGenerator.class); private boolean completed = false; - private boolean exception = false; + private Exception exception = null; @Option(names = "--numOfThreads", description = "number of threads to be launched for the run", @@ -278,7 +278,7 @@ public Void call() throws Exception { processor.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS); completed = true; - if (exception) { + if (exception != null) { progressbar.terminate(); } else { progressbar.shutdown(); @@ -288,6 +288,9 @@ public Void call() throws Exception { validator.join(); } ozoneClient.close(); + if (exception != null) { + throw exception; + } return null; } @@ -337,7 +340,7 @@ private void printStats(PrintStream out) { out.println(); out.println("***************************************************"); - out.println("Status: " + (exception ? "Failed" : "Success")); + out.println("Status: " + (exception != null ? "Failed" : "Success")); out.println("Git Base Revision: " + VersionInfo.getRevision()); out.println("Number of Volumes created: " + numberOfVolumesCreated); out.println("Number of Buckets created: " + numberOfBucketsCreated); @@ -577,7 +580,7 @@ public void run() { numberOfVolumesCreated.getAndIncrement(); volume = objectStore.getVolume(volumeName); } catch (IOException e) { - exception = true; + exception = e; LOG.error("Could not create volume", e); return; } @@ -644,13 +647,13 @@ public void run() { } } } catch (Exception e) { - exception = true; + exception = e; LOG.error("Exception while adding key: {} in bucket: {}" + " of volume: {}.", key, bucket, volume, e); } } } catch (Exception e) { - exception = true; + exception = e; LOG.error("Exception while creating bucket: {}" + " in volume: {}.", bucketName, volume, e); } @@ -696,7 +699,7 @@ private final class FreonJobInfo { private String[] tenQuantileKeyWriteTime; private FreonJobInfo() { - this.status = exception ? "Failed" : "Success"; + this.status = exception != null ? "Failed" : "Success"; this.numOfVolumes = RandomKeyGenerator.this.numOfVolumes; this.numOfBuckets = RandomKeyGenerator.this.numOfBuckets; this.numOfKeys = RandomKeyGenerator.this.numOfKeys; From 9078e28a2482f79fb90a16fb46a127e92847c5e7 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Tue, 28 May 2019 15:52:39 +0900 Subject: [PATCH 0046/1308] YARN-9503. Fix JavaDoc error in TestSchedulerOvercommit. Contributed by Wanqiang Ji. --- .../resourcemanager/scheduler/TestSchedulerOvercommit.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerOvercommit.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerOvercommit.java index 758fd33e8a1f0..36b5198506058 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerOvercommit.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerOvercommit.java @@ -70,7 +70,7 @@ /** * Generic tests for overcommitting resources. This needs to be instantiated - * with a scheduler ({@link YarnConfiguration.RM_SCHEDULER}). + * with a scheduler ({@link YarnConfiguration#RM_SCHEDULER}). * * If reducing the amount of resources leads to overcommitting (negative * available resources), the scheduler will select containers to make room. @@ -142,7 +142,7 @@ public void setup() throws Exception { * Get the configuration for the scheduler. This is used when setting up the * Resource Manager and should setup the scheduler (e.g., Capacity Scheduler * or Fair Scheduler). It needs to set the configuration with - * {@link YarnConfiguration.RM_SCHEDULER}. + * {@link YarnConfiguration#RM_SCHEDULER}. * @return Configuration for the scheduler. */ protected Configuration getConfiguration() { @@ -531,7 +531,7 @@ public void testEndToEnd() throws Exception { /** * Create a container with a particular size and make sure it succeeds. - * @param am Application Master to add the container to. + * @param app Application Master to add the container to. * @param memory Memory of the container. * @return Newly created container. * @throws Exception If there are issues creating the container. From 4a692bc3be0800891ea58c13ba7d89efd358f6f6 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Tue, 28 May 2019 16:53:43 +0900 Subject: [PATCH 0047/1308] YARN-9500. Fix typos in ResourceModel.md. Contributed by leiqiang. --- .../hadoop-yarn-site/src/site/markdown/ResourceModel.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md index ac16d5316671e..8a449a801deb5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md @@ -234,7 +234,7 @@ with a single profile and a means for administrators to regulate how resources are consumed. To configure resource types, the administrator must set -`yarn.resourcemanager.resource-profiles.enabled` ot `true` in the resource +`yarn.resourcemanager.resource-profiles.enabled` to `true` in the resource manager's `yarn-site.xml` file. This file defines the supported profiles. For example: From 9f0d3412f88c61c83a20cc46adce866ccf4fdc7c Mon Sep 17 00:00:00 2001 From: Mukul Kumar Singh Date: Tue, 28 May 2019 14:18:25 +0200 Subject: [PATCH 0048/1308] HDDS-1533. JVM exit on TestHddsDatanodeService Closes #859 --- .../java/org/apache/hadoop/ozone/TestHddsDatanodeService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java index f54fa75acf0b9..af56d0643d5b4 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java @@ -65,13 +65,13 @@ public void tearDown() { public void testStartup() throws IOException { service = HddsDatanodeService.createHddsDatanodeService(args); service.start(conf); - service.join(); assertNotNull(service.getDatanodeDetails()); assertNotNull(service.getDatanodeDetails().getHostName()); assertFalse(service.getDatanodeStateMachine().isDaemonStopped()); service.stop(); + service.join(); service.close(); } From 06fa4d276442f302805472802adf53145f6ea930 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Elek?= Date: Tue, 21 May 2019 17:57:24 +0200 Subject: [PATCH 0049/1308] HDDS-1565. Rename k8s-dev and k8s-dev-push profiles to docker and docker-push. Closes: #840 --- hadoop-ozone/dist/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 51e512f461d48..9e2966b1a0c4e 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -286,7 +286,7 @@ - k8s-dev + docker-build ${user.name}/ozone:${project.version} @@ -321,7 +321,7 @@ - k8s-dev-push + docker-push From d1ec1c52e5a8567526ac1ab6d8ab95010c49fddb Mon Sep 17 00:00:00 2001 From: Shweta Yakkali Date: Tue, 28 May 2019 11:14:24 -0700 Subject: [PATCH 0050/1308] HDDS-1580. Obtain Handler reference in ContainerScrubber (#842) --- .../ozoneimpl/ContainerController.java | 7 ++++++- .../container/ozoneimpl/ContainerScrubber.java | 17 ++++++++--------- .../container/ozoneimpl/OzoneContainer.java | 2 +- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java index 3c0f25164945b..11cb8eeadd111 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java @@ -30,6 +30,7 @@ import java.io.FileInputStream; import java.io.IOException; +import java.util.Iterator; import java.util.Map; /** @@ -132,7 +133,11 @@ public void deleteContainer(final long containerId, boolean force) * @param container Container * @return handler of the container */ - private Handler getHandler(final Container container) { + Handler getHandler(final Container container) { return handlers.get(container.getContainerType()); } + + Iterator getContainerSetIterator() { + return containerSet.getContainerIterator(); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubber.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubber.java index dea7323d757fd..d6f8b273d1b9d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubber.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubber.java @@ -18,12 +18,11 @@ package org.apache.hadoop.ozone.container.ozoneimpl; -import com.google.common.base.Preconditions; import org.apache.commons.net.ntp.TimeStamp; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; +import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,20 +35,19 @@ public class ContainerScrubber implements Runnable { private static final Logger LOG = LoggerFactory.getLogger(ContainerScrubber.class); - private final ContainerSet containerSet; private final OzoneConfiguration config; private final long timePerContainer = 10000; // 10 sec in millis private boolean halt; private Thread scrubThread; + private ContainerController controller; - public ContainerScrubber(ContainerSet cSet, OzoneConfiguration conf) { - Preconditions.checkNotNull(cSet, - "ContainerScrubber received a null ContainerSet"); - Preconditions.checkNotNull(conf); - this.containerSet = cSet; + + public ContainerScrubber(OzoneConfiguration conf, + ContainerController controller) { this.config = conf; this.halt = false; this.scrubThread = null; + this.controller = controller; } @Override public void run() { @@ -130,12 +128,13 @@ private void throttleScrubber(TimeStamp startTime) { private void scrub() { - Iterator containerIt = containerSet.getContainerIterator(); + Iterator containerIt = controller.getContainerSetIterator(); long count = 0; while (containerIt.hasNext()) { TimeStamp startTime = new TimeStamp(System.currentTimeMillis()); Container container = containerIt.next(); + Handler containerHandler = controller.getHandler(container); if (this.halt) { break; // stop if requested diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index f34334d1034e4..2254ec052d79a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -161,7 +161,7 @@ private void startContainerScrub() { HddsConfigKeys.HDDS_CONTAINERSCRUB_ENABLED); } else { if (this.scrubber == null) { - this.scrubber = new ContainerScrubber(containerSet, config); + this.scrubber = new ContainerScrubber(config, controller); } scrubber.up(); } From d8b18e82f05a101bd3cafa3df8705e427092be17 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Tue, 28 May 2019 11:27:16 -0700 Subject: [PATCH 0051/1308] HDDS-1602. Fix TestContainerPersistence#testDeleteBlockTwice. (#858) --- .../ozone/container/keyvalue/impl/BlockManagerImpl.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index f62a013f4cce7..8fe0b810a3cd0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -229,12 +229,13 @@ public void deleteBlock(Container container, BlockID blockID) throws // to delete a Block which might have just gotten inserted after // the get check. byte[] kKey = Longs.toByteArray(blockID.getLocalID()); - try { - db.getStore().delete(kKey); - } catch (IOException e) { + + byte[] kData = db.getStore().get(kKey); + if (kData == null) { throw new StorageContainerException("Unable to find the block.", NO_SUCH_BLOCK); } + db.getStore().delete(kKey); // Decrement blockcount here container.getContainerData().decrKeyCount(); } From fb0b39f4bfa6d97ca758e617cb9566242813c1a3 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Tue, 28 May 2019 14:02:36 -0700 Subject: [PATCH 0052/1308] HDDS-1536. testSCMSafeModeRestrictedOp is failing consistently. Contributed by Xiaoyu Yao. (#865) --- .../test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 59331c9cf5a47..4b6118715e7eb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -223,6 +223,7 @@ public void testSCMSafeMode() throws Exception { assertTrue(cluster.getStorageContainerManager().isInSafeMode()); cluster.startHddsDatanodes(); cluster.waitForClusterToBeReady(); + cluster.waitTobeOutOfSafeMode(); assertFalse(cluster.getStorageContainerManager().isInSafeMode()); // Test2: Test safe mode when containers are there in system. @@ -301,6 +302,7 @@ public void testSCMSafeModeRestrictedOp() throws Exception { cluster.startHddsDatanodes(); cluster.waitForClusterToBeReady(); + cluster.waitTobeOutOfSafeMode(); assertFalse(scm.isInSafeMode()); TestStorageContainerManagerHelper helper = From d78854b928bb877f26b11b5b212a100a79941f35 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Tue, 28 May 2019 17:31:35 -0400 Subject: [PATCH 0053/1308] HDFS-14434. Ignore user.name query parameter in secure WebHDFS. Contributed by KWON BYUNGCHANG --- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 16 +- .../hadoop/hdfs/server/common/JspHelper.java | 8 +- .../hdfs/server/common/TestJspHelper.java | 88 +++---- .../hadoop/hdfs/web/TestWebHdfsTokens.java | 217 +++++++++++------- .../hadoop/hdfs/web/TestWebHdfsUrl.java | 47 +++- 5 files changed, 236 insertions(+), 140 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index fe30a9ac88fa2..e8049e9b9f082 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -170,6 +170,7 @@ public class WebHdfsFileSystem extends FileSystem private InetSocketAddress nnAddrs[]; private int currentNNAddrIndex; private boolean disallowFallbackToInsecureCluster; + private boolean isInsecureCluster; private String restCsrfCustomHeader; private Set restCsrfMethodsToIgnore; @@ -282,6 +283,7 @@ public synchronized void initialize(URI uri, Configuration conf this.workingDir = makeQualified(new Path(getHomeDirectoryString(ugi))); this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled(); + this.isInsecureCluster = !this.canRefreshDelegationToken; this.disallowFallbackToInsecureCluster = !conf.getBoolean( CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT); @@ -367,6 +369,7 @@ protected synchronized Token getDelegationToken() throws IOException { LOG.debug("Fetched new token: {}", token); } else { // security is disabled canRefreshDelegationToken = false; + isInsecureCluster = true; } } } @@ -413,8 +416,7 @@ public Path getHomeDirectory() { if (cachedHomeDirectory == null) { final HttpOpParam.Op op = GetOpParam.Op.GETHOMEDIRECTORY; try { - String pathFromDelegatedFS = new FsPathResponseRunner(op, null, - new UserParam(ugi)) { + String pathFromDelegatedFS = new FsPathResponseRunner(op, null){ @Override String decodeResponse(Map json) throws IOException { return JsonUtilClient.getPath(json); @@ -576,7 +578,8 @@ private URL getNamenodeURL(String path, String query) throws IOException { return url; } - Param[] getAuthParameters(final HttpOpParam.Op op) throws IOException { + private synchronized Param[] getAuthParameters(final HttpOpParam.Op op) + throws IOException { List> authParams = Lists.newArrayList(); // Skip adding delegation token for token operations because these // operations require authentication. @@ -593,7 +596,12 @@ Param[] getAuthParameters(final HttpOpParam.Op op) throws IOException { authParams.add(new DoAsParam(userUgi.getShortUserName())); userUgi = realUgi; } - authParams.add(new UserParam(userUgi.getShortUserName())); + UserParam userParam = new UserParam((userUgi.getShortUserName())); + + //in insecure, use user.name parameter, in secure, use spnego auth + if(isInsecureCluster) { + authParams.add(userParam); + } } return authParams.toArray(new Param[0]); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index eb488e81e29cd..2c65c3fe2cecf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -118,12 +118,9 @@ public static UserGroupInformation getUGI(ServletContext context, remoteUser = request.getRemoteUser(); final String tokenString = request.getParameter(DELEGATION_PARAMETER_NAME); if (tokenString != null) { - // Token-based connections need only verify the effective user, and - // disallow proxying to different user. Proxy authorization checks - // are not required since the checks apply to issuing a token. + + // user.name, doas param is ignored in the token-based auth ugi = getTokenUGI(context, request, tokenString, conf); - checkUsername(ugi.getShortUserName(), usernameFromQuery); - checkUsername(ugi.getShortUserName(), doAsUserFromQuery); } else if (remoteUser == null) { throw new IOException( "Security enabled but user not authenticated by filter"); @@ -137,7 +134,6 @@ public static UserGroupInformation getUGI(ServletContext context, if (ugi == null) { // security is off, or there's no token ugi = UserGroupInformation.createRemoteUser(remoteUser); - checkUsername(ugi.getShortUserName(), usernameFromQuery); if (UserGroupInformation.isSecurityEnabled()) { // This is not necessarily true, could have been auth'ed by user-facing // filter diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java index 119db8c1ddc7b..5a1661c987a5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java @@ -158,7 +158,7 @@ public void testGetUgiFromToken() throws IOException { Token token = new Token( dtId, new DummySecretManager(0, 0, 0, 0)); String tokenString = token.encodeToUrlString(); - + // token with no auth-ed user request = getMockRequest(null, null, null); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn( @@ -168,7 +168,7 @@ public void testGetUgiFromToken() throws IOException { Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser); Assert.assertEquals(ugi.getShortUserName(), user); checkUgiFromToken(ugi); - + // token with auth-ed user request = getMockRequest(realUser, null, null); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn( @@ -198,32 +198,40 @@ public void testGetUgiFromToken() throws IOException { Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser); Assert.assertEquals(ugi.getShortUserName(), user); checkUgiFromToken(ugi); - - // can't proxy with a token! + + // if present token, ignore doas parameter request = getMockRequest(null, null, "rogue"); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn( tokenString); - try { - JspHelper.getUGI(context, request, conf); - Assert.fail("bad request allowed"); - } catch (IOException ioe) { - Assert.assertEquals( - "Usernames not matched: name=rogue != expected="+user, - ioe.getMessage()); - } - - // can't proxy with a token! + + ugi = JspHelper.getUGI(context, request, conf); + Assert.assertNotNull(ugi.getRealUser()); + Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser); + Assert.assertEquals(ugi.getShortUserName(), user); + checkUgiFromToken(ugi); + + // if present token, ignore user.name parameter + request = getMockRequest(null, "rogue", null); + when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn( + tokenString); + + ugi = JspHelper.getUGI(context, request, conf); + Assert.assertNotNull(ugi.getRealUser()); + Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser); + Assert.assertEquals(ugi.getShortUserName(), user); + checkUgiFromToken(ugi); + + // if present token, ignore user.name and doas parameter request = getMockRequest(null, user, "rogue"); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn( tokenString); - try { - JspHelper.getUGI(context, request, conf); - Assert.fail("bad request allowed"); - } catch (IOException ioe) { - Assert.assertEquals( - "Usernames not matched: name=rogue != expected="+user, - ioe.getMessage()); - } + + ugi = JspHelper.getUGI(context, request, conf); + Assert.assertNotNull(ugi.getRealUser()); + Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser); + Assert.assertEquals(ugi.getShortUserName(), user); + checkUgiFromToken(ugi); + } @Test @@ -271,16 +279,12 @@ public void testGetNonProxyUgi() throws IOException { Assert.assertEquals(ugi.getShortUserName(), realUser); checkUgiFromAuth(ugi); - // ugi for remote user != real user + // if there is remote user via SPNEGO, ignore user.name param request = getMockRequest(realUser, user, null); - try { - JspHelper.getUGI(context, request, conf); - Assert.fail("bad request allowed"); - } catch (IOException ioe) { - Assert.assertEquals( - "Usernames not matched: name="+user+" != expected="+realUser, - ioe.getMessage()); - } + ugi = JspHelper.getUGI(context, request, conf); + Assert.assertNull(ugi.getRealUser()); + Assert.assertEquals(ugi.getShortUserName(), realUser); + checkUgiFromAuth(ugi); } @Test @@ -335,17 +339,16 @@ public void testGetProxyUgi() throws IOException { Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser); Assert.assertEquals(ugi.getShortUserName(), user); checkUgiFromAuth(ugi); - - // proxy ugi for user via remote user != real user + + // if there is remote user via SPNEGO, ignore user.name, doas param request = getMockRequest(realUser, user, user); - try { - JspHelper.getUGI(context, request, conf); - Assert.fail("bad request allowed"); - } catch (IOException ioe) { - Assert.assertEquals( - "Usernames not matched: name="+user+" != expected="+realUser, - ioe.getMessage()); - } + ugi = JspHelper.getUGI(context, request, conf); + Assert.assertNotNull(ugi.getRealUser()); + Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser); + Assert.assertEquals(ugi.getShortUserName(), user); + checkUgiFromAuth(ugi); + + // try to get get a proxy user with unauthorized user try { @@ -368,6 +371,9 @@ public void testGetProxyUgi() throws IOException { } } + + + private HttpServletRequest getMockRequest(String remoteUser, String user, String doAs) { HttpServletRequest request = mock(HttpServletRequest.class); when(request.getParameter(UserParam.NAME)).thenReturn(user); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java index 0b1dfa5b9a545..b5d44109f8bdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java @@ -20,20 +20,37 @@ import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.SIMPLE; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.*; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; -import java.net.InetSocketAddress; import java.net.URI; import java.net.URL; import java.net.URLConnection; import java.security.PrivilegedExceptionAction; import java.util.Map; +import java.util.Properties; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -46,17 +63,18 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.web.resources.*; import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; -import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; +import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.Whitebox; import org.apache.hadoop.security.token.Token; +import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; @@ -65,16 +83,95 @@ public class TestWebHdfsTokens { private static Configuration conf; URI uri = null; + //secure cluster + private static MiniKdc kdc = null; + private static File baseDir; + private static File keytabFile; + private static String username = "webhdfs-tokens-test"; + private static String principal; + private static String keystoresDir; + private static String sslConfDir; + @BeforeClass public static void setUp() { conf = new Configuration(); + } + + @AfterClass + public static void destroy() throws Exception { + if (kdc != null) { + kdc.stop(); + FileUtil.fullyDelete(baseDir); + KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); + } + } + + private static void initEnv(){ SecurityUtil.setAuthenticationMethod(KERBEROS, conf); - UserGroupInformation.setConfiguration(conf); + UserGroupInformation.setConfiguration(conf); UserGroupInformation.setLoginUser( UserGroupInformation.createUserForTesting( "LoginUser", new String[]{"supergroup"})); } + private static void initSecureConf(Configuration secureConf) + throws Exception { + + baseDir = GenericTestUtils.getTestDir( + TestWebHdfsTokens.class.getSimpleName()); + FileUtil.fullyDelete(baseDir); + assertTrue(baseDir.mkdirs()); + + Properties kdcConf = MiniKdc.createConf(); + kdc = new MiniKdc(kdcConf, baseDir); + kdc.start(); + + SecurityUtil.setAuthenticationMethod( + UserGroupInformation.AuthenticationMethod.KERBEROS, secureConf); + UserGroupInformation.setConfiguration(secureConf); + KerberosName.resetDefaultRealm(); + assertTrue("Expected secureConfiguration to enable security", + UserGroupInformation.isSecurityEnabled()); + + keytabFile = new File(baseDir, username + ".keytab"); + String keytab = keytabFile.getAbsolutePath(); + // Windows will not reverse name lookup "127.0.0.1" to "localhost". + String krbInstance = Path.WINDOWS ? "127.0.0.1" : "localhost"; + principal = username + "/" + krbInstance + "@" + kdc.getRealm(); + String spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.getRealm(); + kdc.createPrincipal(keytabFile, username, username + "/" + krbInstance, + "HTTP/" + krbInstance); + + secureConf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, principal); + secureConf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); + secureConf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, principal); + secureConf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab); + secureConf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, + spnegoPrincipal); + secureConf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); + secureConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication"); + secureConf.set(DFS_HTTP_POLICY_KEY, + HttpConfig.Policy.HTTP_AND_HTTPS.name()); + secureConf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); + secureConf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:0"); + secureConf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); + secureConf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "localhost:0"); + secureConf.setBoolean(DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); + secureConf.setBoolean(IGNORE_SECURE_PORTS_FOR_TESTING_KEY, true); + + keystoresDir = baseDir.getAbsolutePath(); + sslConfDir = KeyStoreTestUtil.getClasspathDir(TestWebHdfsTokens.class); + KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, + secureConf, false); + + secureConf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY, + KeyStoreTestUtil.getClientSSLConfigFileName()); + secureConf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, + KeyStoreTestUtil.getServerSSLConfigFileName()); + } + + + private WebHdfsFileSystem spyWebhdfsInSecureSetup() throws IOException { WebHdfsFileSystem fsOrig = new WebHdfsFileSystem(); fsOrig.initialize(URI.create("webhdfs://127.0.0.1:0"), conf); @@ -84,6 +181,7 @@ private WebHdfsFileSystem spyWebhdfsInSecureSetup() throws IOException { @Test(timeout = 5000) public void testTokenForNonTokenOp() throws IOException { + initEnv(); WebHdfsFileSystem fs = spyWebhdfsInSecureSetup(); Token token = mock(Token.class); doReturn(token).when(fs).getDelegationToken(null); @@ -104,16 +202,19 @@ public void testTokenForNonTokenOp() throws IOException { @Test(timeout = 5000) public void testNoTokenForGetToken() throws IOException { + initEnv(); checkNoTokenForOperation(GetOpParam.Op.GETDELEGATIONTOKEN); } @Test(timeout = 5000) public void testNoTokenForRenewToken() throws IOException { + initEnv(); checkNoTokenForOperation(PutOpParam.Op.RENEWDELEGATIONTOKEN); } @Test(timeout = 5000) public void testNoTokenForCancelToken() throws IOException { + initEnv(); checkNoTokenForOperation(PutOpParam.Op.CANCELDELEGATIONTOKEN); } @@ -162,86 +263,42 @@ public void testDeleteOpRequireAuth() { @Test public void testLazyTokenFetchForWebhdfs() throws Exception { MiniDFSCluster cluster = null; - WebHdfsFileSystem fs = null; + UserGroupInformation ugi = null; try { final Configuration clusterConf = new HdfsConfiguration(conf); - SecurityUtil.setAuthenticationMethod(SIMPLE, clusterConf); - clusterConf.setBoolean(DFSConfigKeys - .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); + initSecureConf(clusterConf); - // trick the NN into thinking security is enabled w/o it trying - // to login from a keytab - UserGroupInformation.setConfiguration(clusterConf); cluster = new MiniDFSCluster.Builder(clusterConf).numDataNodes(1).build(); cluster.waitActive(); - SecurityUtil.setAuthenticationMethod(KERBEROS, clusterConf); - UserGroupInformation.setConfiguration(clusterConf); - + + ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI( + principal, keytabFile.getAbsolutePath()); + + //test with swebhdfs + uri = DFSUtil.createUri( + "swebhdfs", cluster.getNameNode().getHttpsAddress()); + validateLazyTokenFetch(ugi, clusterConf); + + //test with webhdfs uri = DFSUtil.createUri( "webhdfs", cluster.getNameNode().getHttpAddress()); - validateLazyTokenFetch(clusterConf); + validateLazyTokenFetch(ugi, clusterConf); + } finally { - IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } + + // Reset UGI so that other tests are not affected. + UserGroupInformation.reset(); + UserGroupInformation.setConfiguration(new Configuration()); } } - - @Test - public void testLazyTokenFetchForSWebhdfs() throws Exception { - MiniDFSCluster cluster = null; - SWebHdfsFileSystem fs = null; - String keystoresDir; - String sslConfDir; - try { - final Configuration clusterConf = new HdfsConfiguration(conf); - SecurityUtil.setAuthenticationMethod(SIMPLE, clusterConf); - clusterConf.setBoolean(DFSConfigKeys - .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); - String baseDir = - GenericTestUtils.getTempPath(TestWebHdfsTokens.class.getSimpleName()); - - clusterConf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); - clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); - clusterConf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); - - File base = new File(baseDir); - FileUtil.fullyDelete(base); - base.mkdirs(); - keystoresDir = new File(baseDir).getAbsolutePath(); - sslConfDir = KeyStoreTestUtil.getClasspathDir(TestWebHdfsTokens.class); - KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, clusterConf, false); - clusterConf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY, - KeyStoreTestUtil.getClientSSLConfigFileName()); - clusterConf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, - KeyStoreTestUtil.getServerSSLConfigFileName()); - - // trick the NN into thinking security is enabled w/o it trying - // to login from a keytab - UserGroupInformation.setConfiguration(clusterConf); - cluster = new MiniDFSCluster.Builder(clusterConf).numDataNodes(1).build(); - cluster.waitActive(); - InetSocketAddress addr = cluster.getNameNode().getHttpsAddress(); - String nnAddr = NetUtils.getHostPortString(addr); - clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr); - SecurityUtil.setAuthenticationMethod(KERBEROS, clusterConf); - UserGroupInformation.setConfiguration(clusterConf); - - uri = DFSUtil.createUri( - "swebhdfs", cluster.getNameNode().getHttpsAddress()); - validateLazyTokenFetch(clusterConf); - } finally { - IOUtils.cleanup(null, fs); - if (cluster != null) { - cluster.shutdown(); - } - } - KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); - } + @Test public void testSetTokenServiceAndKind() throws Exception { + initEnv(); MiniDFSCluster cluster = null; try { @@ -296,16 +353,20 @@ Token decodeResponse(Map json) } } - private void validateLazyTokenFetch(final Configuration clusterConf) throws Exception{ - final String testUser = "DummyUser"; - UserGroupInformation ugi = UserGroupInformation.createUserForTesting( - testUser, new String[]{"supergroup"}); - WebHdfsFileSystem fs = ugi.doAs(new PrivilegedExceptionAction() { - @Override - public WebHdfsFileSystem run() throws IOException { - return spy((WebHdfsFileSystem) FileSystem.newInstance(uri, clusterConf)); - } - }); + private void validateLazyTokenFetch(UserGroupInformation ugi, + final Configuration clusterConf) throws Exception { + + String testUser = ugi.getShortUserName(); + + WebHdfsFileSystem fs = ugi.doAs( + new PrivilegedExceptionAction() { + @Override + public WebHdfsFileSystem run() throws IOException { + return spy((WebHdfsFileSystem) FileSystem.newInstance(uri, + clusterConf)); + } + }); + // verify token ops don't get a token Assert.assertNull(fs.getRenewToken()); Token token = fs.getDelegationToken(null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java index 02a68ea024555..449f2c6089361 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import java.io.IOException; @@ -144,37 +145,47 @@ public void testSecureAuthParamsInUrl() throws IOException { Path fsPath = new Path("/"); String tokenString = webhdfs.getDelegationToken().encodeToUrlString(); + String userParam = new UserParam(ugi.getShortUserName()).toString(); + // send user URL getTokenUrl = webhdfs.toUrl(GetOpParam.Op.GETDELEGATIONTOKEN, fsPath); + assertTrue("secure webhdfs SHOULD NOT use user.name parameter", + getTokenUrl.toString().indexOf(userParam) == -1); checkQueryParams( new String[]{ GetOpParam.Op.GETDELEGATIONTOKEN.toQueryString(), - new UserParam(ugi.getShortUserName()).toString() }, getTokenUrl); + + // send user URL renewTokenUrl = webhdfs.toUrl(PutOpParam.Op.RENEWDELEGATIONTOKEN, fsPath, new TokenArgumentParam(tokenString)); + assertTrue("secure webhdfs SHOULD NOT use user.name parameter", + renewTokenUrl.toString().indexOf(userParam) == -1); checkQueryParams( new String[]{ PutOpParam.Op.RENEWDELEGATIONTOKEN.toQueryString(), - new UserParam(ugi.getShortUserName()).toString(), new TokenArgumentParam(tokenString).toString(), }, renewTokenUrl); + + // send token URL cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN, fsPath, new TokenArgumentParam(tokenString)); + assertTrue("secure webhdfs SHOULD NOT use user.name parameter", + cancelTokenUrl.toString().indexOf(userParam) == -1); checkQueryParams( new String[]{ PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(), - new UserParam(ugi.getShortUserName()).toString(), new TokenArgumentParam(tokenString).toString(), }, cancelTokenUrl); - + + // send token URL fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath); checkQueryParams( @@ -190,14 +201,16 @@ public void testSecureAuthParamsInUrl() throws IOException { // send user cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN, fsPath, new TokenArgumentParam(tokenString)); + assertTrue("secure webhdfs SHOULD NOT use user.name parameter", + cancelTokenUrl.toString().indexOf(userParam) == -1); checkQueryParams( new String[]{ PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(), - new UserParam(ugi.getShortUserName()).toString(), new TokenArgumentParam(tokenString).toString(), }, cancelTokenUrl); + // send user fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath); checkQueryParams( @@ -225,40 +238,50 @@ public void testSecureProxyAuthParamsInUrl() throws IOException { Path fsPath = new Path("/"); String tokenString = webhdfs.getDelegationToken().encodeToUrlString(); + String userParam = new UserParam(ugi.getRealUser(). + getShortUserName()).toString(); + // send real+effective URL getTokenUrl = webhdfs.toUrl(GetOpParam.Op.GETDELEGATIONTOKEN, fsPath); + assertTrue("secure webhdfs SHOULD NOT use user.name parameter", + getTokenUrl.toString().indexOf(userParam) == -1); checkQueryParams( new String[]{ GetOpParam.Op.GETDELEGATIONTOKEN.toQueryString(), - new UserParam(ugi.getRealUser().getShortUserName()).toString(), new DoAsParam(ugi.getShortUserName()).toString() }, getTokenUrl); + + // send real+effective URL renewTokenUrl = webhdfs.toUrl(PutOpParam.Op.RENEWDELEGATIONTOKEN, fsPath, new TokenArgumentParam(tokenString)); + assertTrue("secure webhdfs SHOULD NOT use user.name parameter", + renewTokenUrl.toString().indexOf(userParam) == -1); checkQueryParams( new String[]{ PutOpParam.Op.RENEWDELEGATIONTOKEN.toQueryString(), - new UserParam(ugi.getRealUser().getShortUserName()).toString(), new DoAsParam(ugi.getShortUserName()).toString(), new TokenArgumentParam(tokenString).toString(), }, renewTokenUrl); + // send token URL cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN, fsPath, new TokenArgumentParam(tokenString)); + assertTrue("secure webhdfs SHOULD NOT use user.name parameter", + cancelTokenUrl.toString().indexOf(userParam) == -1); checkQueryParams( new String[]{ PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(), - new UserParam(ugi.getRealUser().getShortUserName()).toString(), new DoAsParam(ugi.getShortUserName()).toString(), new TokenArgumentParam(tokenString).toString(), }, cancelTokenUrl); - + + // send token URL fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath); checkQueryParams( @@ -274,15 +297,17 @@ public void testSecureProxyAuthParamsInUrl() throws IOException { // send real+effective cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN, fsPath, new TokenArgumentParam(tokenString)); + assertTrue("secure webhdfs SHOULD NOT use user.name parameter", + cancelTokenUrl.toString().indexOf(userParam) == -1); checkQueryParams( new String[]{ PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(), - new UserParam(ugi.getRealUser().getShortUserName()).toString(), new DoAsParam(ugi.getShortUserName()).toString(), new TokenArgumentParam(tokenString).toString() }, cancelTokenUrl); - + + // send real+effective fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath); checkQueryParams( From 79d14d0d421d20c2147990707238435e7808d73d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Tue, 28 May 2019 23:40:45 +0200 Subject: [PATCH 0054/1308] HDDS-1341. TestContainerReplication#testContainerReplication fails intermittently. Contributed by Elek, Marton. (#862) --- .../common/statemachine/DatanodeStateMachine.java | 5 +++++ .../ozone/container/common/volume/HddsVolume.java | 3 ++- .../container/replication/ReplicationSupervisor.java | 8 ++++++++ .../ozone/container/TestContainerReplication.java | 12 ++++++++++-- 4 files changed, 25 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index 0119d23fe69c0..c9eb7024eaf18 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -481,4 +481,9 @@ public long getCommandHandled() { public CommandDispatcher getCommandDispatcher() { return commandDispatcher; } + + @VisibleForTesting + public ReplicationSupervisor getSupervisor() { + return supervisor; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 3a2034580ad0d..4eb16c166c5fd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -18,9 +18,10 @@ package org.apache.hadoop.ozone.container.common.volume; +import javax.annotation.Nullable; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.sun.istack.Nullable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.GetSpaceUsed; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java index c59d643c47b98..7a07c4df71eb5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java @@ -22,6 +22,7 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; @@ -42,6 +43,7 @@ public class ReplicationSupervisor { private final ContainerSet containerSet; private final ContainerReplicator replicator; private final ThreadPoolExecutor executor; + private final AtomicLong replicationCounter; /** * A set of container IDs that are currently being downloaded @@ -56,6 +58,7 @@ public ReplicationSupervisor( this.containerSet = containerSet; this.replicator = replicator; this.containersInFlight = ConcurrentHashMap.newKeySet(); + replicationCounter = new AtomicLong(); this.executor = new ThreadPoolExecutor( 0, poolSize, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), @@ -123,7 +126,12 @@ public void run() { } } finally { containersInFlight.remove(task.getContainerId()); + replicationCounter.incrementAndGet(); } } } + + public long getReplicationCounter() { + return replicationCounter.get(); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java index 84d6fd5a9f07b..ab78705559e6f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java @@ -41,11 +41,13 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.interfaces.Container; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer; import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; +import org.apache.hadoop.test.GenericTestUtils; import static org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer .writeChunkForContainer; @@ -123,10 +125,16 @@ public void testContainerReplication() throws Exception { new ReplicateContainerCommand(containerId, sourcePipelines.getNodes())); - Thread.sleep(3000); + DatanodeStateMachine destinationDatanodeDatanodeStateMachine = + destinationDatanode.getDatanodeStateMachine(); + + //wait for the replication + GenericTestUtils.waitFor(() + -> destinationDatanodeDatanodeStateMachine.getSupervisor() + .getReplicationCounter() > 0, 1000, 20_000); OzoneContainer ozoneContainer = - destinationDatanode.getDatanodeStateMachine().getContainer(); + destinationDatanodeDatanodeStateMachine.getContainer(); From 0c73dba3a6f2dc57435a069623f6c43e45ce655e Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Tue, 28 May 2019 22:50:37 +0100 Subject: [PATCH 0055/1308] HADOOP-16332. Remove S3A dependency on http core. Contributed by Steve Loughran. Change-Id: I53209c993a405fefdb5e1b692d5a56d027d3b845 --- hadoop-tools/hadoop-aws/pom.xml | 5 ----- .../java/org/apache/hadoop/fs/s3a/impl/ChangeTracker.java | 3 ++- .../org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java | 3 +-- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index b2e4a6b639100..0baefbe37e6f3 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -406,11 +406,6 @@ hadoop-common provided - - org.apache.httpcomponents - httpcore - provided - org.apache.hadoop hadoop-common diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeTracker.java index 75fecd5f14632..a95282cd8a713 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeTracker.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeTracker.java @@ -41,7 +41,6 @@ import org.apache.hadoop.fs.s3a.S3ObjectAttributes; import static com.google.common.base.Preconditions.checkNotNull; -import static org.apache.http.HttpStatus.SC_PRECONDITION_FAILED; /** * Change tracking for input streams: the version ID or etag of the object is @@ -57,6 +56,8 @@ public class ChangeTracker { private static final Logger LOG = LoggerFactory.getLogger(ChangeTracker.class); + /** {@code 412 Precondition Failed} (HTTP/1.1 - RFC 2616) */ + public static final int SC_PRECONDITION_FAILED = 412; public static final String CHANGE_REPORTED_BY_S3 = "Change reported by S3"; /** Policy to use. */ diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java index c645ac5ad807d..8ca2eccfe3a91 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java @@ -36,7 +36,6 @@ import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy; import org.apache.hadoop.fs.s3a.impl.ChangeTracker; import org.apache.hadoop.test.HadoopTestBase; -import org.apache.http.HttpStatus; import static org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy.CHANGE_DETECTED; import static org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy.createPolicy; @@ -250,7 +249,7 @@ public void testCopyVersionMismatch() throws Throwable { // https://github.com/aws/aws-sdk-java/issues/1644 AmazonServiceException awsException = new AmazonServiceException("aws exception"); - awsException.setStatusCode(HttpStatus.SC_PRECONDITION_FAILED); + awsException.setStatusCode(ChangeTracker.SC_PRECONDITION_FAILED); expectChangeException(tracker, awsException, "copy", RemoteFileChangedException.PRECONDITIONS_FAILED); From 7f2e87a419cf87d1b1aa2b3b56f0f23504baa110 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Tue, 28 May 2019 18:39:23 -0700 Subject: [PATCH 0056/1308] =?UTF-8?q?HDDS-1604.=20ContainerReader#initiali?= =?UTF-8?q?zeUsedBytes=20leaks=20DB=20reference.=20Co=E2=80=A6=20(#866)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../DeleteBlocksCommandHandler.java | 2 +- .../common/utils/ContainerCache.java | 70 ------------ .../common/utils/ReferenceCountedDB.java | 101 ++++++++++++++++++ .../keyvalue/KeyValueBlockIterator.java | 2 +- .../container/keyvalue/KeyValueContainer.java | 2 +- .../keyvalue/KeyValueContainerCheck.java | 2 +- .../keyvalue/helpers/BlockUtils.java | 2 +- .../helpers/KeyValueContainerUtil.java | 2 +- .../keyvalue/impl/BlockManagerImpl.java | 2 +- .../background/BlockDeletingService.java | 2 +- .../container/ozoneimpl/ContainerReader.java | 29 ++--- .../keyvalue/TestKeyValueBlockIterator.java | 2 +- .../keyvalue/TestKeyValueContainer.java | 2 +- .../keyvalue/TestKeyValueContainerCheck.java | 2 +- .../ozoneimpl/TestOzoneContainer.java | 4 +- .../TestStorageContainerManagerHelper.java | 2 +- .../common/TestBlockDeletingService.java | 2 +- .../common/impl/TestContainerPersistence.java | 2 +- .../commandhandler/TestBlockDeletion.java | 2 +- .../TestCloseContainerByPipeline.java | 2 +- 20 files changed, 134 insertions(+), 102 deletions(-) create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index 966452e105be3..a5d47603ab1df 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -48,7 +48,7 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.BatchOperation; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java index c15bef0c0cfd3..ef75ec13db4d9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java @@ -28,11 +28,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.Closeable; import java.io.File; import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -69,22 +66,6 @@ public synchronized static ContainerCache getInstance(Configuration conf) { return cache; } - /** - * Closes a db instance. - * - * @param containerPath - path of the container db to be closed. - * @param db - db instance to close. - */ - private void closeDB(String containerPath, MetadataStore db) { - if (db != null) { - try { - db.close(); - } catch (Exception e) { - LOG.error("Error closing DB. Container: " + containerPath, e); - } - } - } - /** * Closes all the db instances and resets the cache. */ @@ -183,55 +164,4 @@ public void removeDB(String containerDBPath) { lock.unlock(); } } - - - /** - * Class to implement reference counting over instances handed by Container - * Cache. - */ - public class ReferenceCountedDB implements Closeable { - private final AtomicInteger referenceCount; - private final AtomicBoolean isEvicted; - private final MetadataStore store; - private final String containerDBPath; - - public ReferenceCountedDB(MetadataStore store, String containerDBPath) { - this.referenceCount = new AtomicInteger(0); - this.isEvicted = new AtomicBoolean(false); - this.store = store; - this.containerDBPath = containerDBPath; - } - - private void incrementReference() { - this.referenceCount.incrementAndGet(); - } - - private void decrementReference() { - this.referenceCount.decrementAndGet(); - cleanup(); - } - - private void setEvicted(boolean checkNoReferences) { - Preconditions.checkState(!checkNoReferences || - (referenceCount.get() == 0), - "checkNoReferences:%b, referencount:%d", - checkNoReferences, referenceCount.get()); - isEvicted.set(true); - cleanup(); - } - - private void cleanup() { - if (referenceCount.get() == 0 && isEvicted.get() && store != null) { - closeDB(containerDBPath, store); - } - } - - public MetadataStore getStore() { - return store; - } - - public void close() { - decrementReference(); - } - } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java new file mode 100644 index 0000000000000..31aca64c5fbd6 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.common.utils; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.utils.MetadataStore; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Class to implement reference counting over instances handed by Container + * Cache. + * Enable DEBUG log below will enable us quickly locate the leaked reference + * from caller stack. When JDK9 StackWalker is available, we can switch to + * StackWalker instead of new Exception().printStackTrace(). + */ +public class ReferenceCountedDB implements Closeable { + private static final Logger LOG = + LoggerFactory.getLogger(ReferenceCountedDB.class); + private final AtomicInteger referenceCount; + private final AtomicBoolean isEvicted; + private final MetadataStore store; + private final String containerDBPath; + + public ReferenceCountedDB(MetadataStore store, String containerDBPath) { + this.referenceCount = new AtomicInteger(0); + this.isEvicted = new AtomicBoolean(false); + this.store = store; + this.containerDBPath = containerDBPath; + } + + public void incrementReference() { + this.referenceCount.incrementAndGet(); + if (LOG.isDebugEnabled()) { + LOG.debug("IncRef {} to refCnt {} \n", containerDBPath, + referenceCount.get()); + new Exception().printStackTrace(); + } + } + + public void decrementReference() { + this.referenceCount.decrementAndGet(); + if (LOG.isDebugEnabled()) { + LOG.debug("DecRef {} to refCnt {} \n", containerDBPath, + referenceCount.get()); + new Exception().printStackTrace(); + } + cleanup(); + } + + public void setEvicted(boolean checkNoReferences) { + Preconditions.checkState(!checkNoReferences || + (referenceCount.get() == 0), + "checkNoReferences:%b, referencount:%d, dbPath:%s", + checkNoReferences, referenceCount.get(), containerDBPath); + isEvicted.set(true); + cleanup(); + } + + private void cleanup() { + if (referenceCount.get() == 0 && isEvicted.get() && store != null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Close {} refCnt {}", containerDBPath, + referenceCount.get()); + } + try { + store.close(); + } catch (Exception e) { + LOG.error("Error closing DB. Container: " + containerDBPath, e); + } + } + } + + public MetadataStore getStore() { + return store; + } + + public void close() { + decrementReference(); + } +} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java index f1b71b89a9373..2f9451c198d58 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java @@ -31,7 +31,7 @@ import org.apache.hadoop.utils.MetaStoreIterator; import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.utils.MetadataStore.KeyValue; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 8d5ec72b980e5..38257c39f2389 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -73,7 +73,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .Result.UNSUPPORTED_REQUEST; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index 4043914c89de7..373408bc6cc9c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -35,7 +35,7 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index fd3c7688f6cda..da7c8579d887b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -37,7 +37,7 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.common.utils.ContainerCache; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import java.io.IOException; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index 377536a1c91df..d5487b3602534 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -39,7 +39,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.io.FileUtils; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 8fe0b810a3cd0..dadd2afc3e51e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -35,7 +35,7 @@ import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.utils.BatchOperation; import org.apache.hadoop.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index c03bea791fcbd..4e02892dd0b97 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -43,7 +43,7 @@ import org.apache.hadoop.utils.BackgroundTaskResult; import org.apache.hadoop.utils.BatchOperation; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 37b726ddbb31b..92c17d14ee521 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -41,7 +41,7 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; import org.apache.hadoop.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -237,24 +237,25 @@ public void verifyAndFixupContainerData(ContainerData containerData) private void initializeUsedBytes(KeyValueContainer container) throws IOException { - KeyValueBlockIterator blockIter = new KeyValueBlockIterator( + try (KeyValueBlockIterator blockIter = new KeyValueBlockIterator( container.getContainerData().getContainerID(), - new File(container.getContainerData().getContainerPath())); - long usedBytes = 0; + new File(container.getContainerData().getContainerPath()))) { + long usedBytes = 0; - while (blockIter.hasNext()) { - BlockData block = blockIter.nextBlock(); - long blockLen = 0; + while (blockIter.hasNext()) { + BlockData block = blockIter.nextBlock(); + long blockLen = 0; - List chunkInfoList = block.getChunks(); - for (ContainerProtos.ChunkInfo chunk : chunkInfoList) { - ChunkInfo info = ChunkInfo.getFromProtoBuf(chunk); - blockLen += info.getLen(); + List chunkInfoList = block.getChunks(); + for (ContainerProtos.ChunkInfo chunk : chunkInfoList) { + ChunkInfo info = ChunkInfo.getFromProtoBuf(chunk); + blockLen += info.getLen(); + } + + usedBytes += blockLen; } - usedBytes += blockLen; + container.getContainerData().setBytesUsed(usedBytes); } - - container.getContainerData().setBytesUsed(usedBytes); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index 687e64e16b18a..6a29d0de14d3d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -34,7 +34,7 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index c16574155255b..81d3065833ebe 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -36,7 +36,7 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DiskChecker; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.junit.Assert; import org.junit.Before; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index cae275af52584..4ef77e45658e8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -35,7 +35,7 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 6c089021cb372..e6782823ed1c0 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; @@ -155,7 +155,7 @@ private long addBlocks(KeyValueContainer container, long freeBytes = container.getContainerData().getMaxSize(); long containerId = container.getContainerData().getContainerID(); - ContainerCache.ReferenceCountedDB db = BlockUtils.getDB(container + ReferenceCountedDB db = BlockUtils.getDB(container .getContainerData(), conf); for (int bi = 0; bi < blocks; bi++) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java index da81e6de4545a..505ee5d7ea855 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java @@ -40,7 +40,7 @@ import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import java.io.IOException; import java.io.OutputStream; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 9993f90ca279f..a85a117d529f2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -45,7 +45,7 @@ import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.utils.BackgroundService; import org.apache.hadoop.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Test; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index f43caeeda2b49..c0415e81ff2ed 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -47,7 +47,7 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index cbb83eaadd004..26d16d505a3ea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -50,7 +50,7 @@ import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index e384d71f609a6..f86208640a692 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -41,7 +41,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; From 346c2b798080cc1f22d6ba85e584141e7dee2c08 Mon Sep 17 00:00:00 2001 From: supratimdeka <46919641+supratimdeka@users.noreply.github.com> Date: Wed, 29 May 2019 09:18:14 +0530 Subject: [PATCH 0057/1308] HDDS-1559. Include committedBytes to determine Out of Space in VolumeChoosingPolicy. Contributed by Supratim Deka (#841) --- .../RoundRobinVolumeChoosingPolicy.java | 4 ++- .../ozoneimpl/TestOzoneContainer.java | 35 +++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java index 75c92ec024bb4..f503149aca438 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java @@ -58,7 +58,9 @@ public HddsVolume chooseVolume(List volumes, while (true) { final HddsVolume volume = volumes.get(currentVolumeIndex); - long availableVolumeSize = volume.getAvailable(); + // adjust for remaining capacity in Open containers + long availableVolumeSize = volume.getAvailable() + - volume.getCommittedBytes(); currentVolumeIndex = (currentVolumeIndex + 1) % volumes.size(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index e6782823ed1c0..f5ebb49d06317 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; @@ -52,6 +53,7 @@ import java.util.List; import java.util.ArrayList; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.DISK_OUT_OF_SPACE; import static org.junit.Assert.assertEquals; /** @@ -135,6 +137,39 @@ public void testBuildContainerMap() throws Exception { verifyCommittedSpace(ozoneContainer); } + @Test + public void testContainerCreateDiskFull() throws Exception { + volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf); + volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); + long containerSize = (long) StorageUnit.MB.toBytes(100); + boolean diskSpaceException = false; + + // Format the volumes + for (HddsVolume volume : volumeSet.getVolumesList()) { + volume.format(UUID.randomUUID().toString()); + + // eat up all available space except size of 1 container + volume.incCommittedBytes(volume.getAvailable() - containerSize); + // eat up 10 bytes more, now available space is less than 1 container + volume.incCommittedBytes(10); + } + keyValueContainerData = new KeyValueContainerData(99, containerSize, + UUID.randomUUID().toString(), datanodeDetails.getUuidString()); + keyValueContainer = new KeyValueContainer(keyValueContainerData, conf); + + // we expect an out of space Exception + try { + keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); + } catch (StorageContainerException e) { + if (e.getResult() == DISK_OUT_OF_SPACE) { + diskSpaceException = true; + } + } + + // Test failed if there was no exception + assertEquals(true, diskSpaceException); + } + //verify committed space on each volume private void verifyCommittedSpace(OzoneContainer oc) { for (HddsVolume dnVol : oc.getVolumeSet().getVolumesList()) { From afd844059c7ff8b10c1978442ba3faf8d6ce57b2 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Tue, 28 May 2019 16:29:44 +0900 Subject: [PATCH 0058/1308] HADOOP-16331. Fix ASF License check in pom.xml Signed-off-by: Takanobu Asanuma --- hadoop-assemblies/pom.xml | 2 +- hadoop-build-tools/pom.xml | 2 +- hadoop-client-modules/hadoop-client-api/pom.xml | 2 +- hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 2 +- .../hadoop-client-check-test-invariants/pom.xml | 2 +- hadoop-client-modules/hadoop-client-integration-tests/pom.xml | 2 +- hadoop-client-modules/hadoop-client-minicluster/pom.xml | 2 +- hadoop-client-modules/hadoop-client-runtime/pom.xml | 2 +- hadoop-client-modules/hadoop-client/pom.xml | 2 +- hadoop-client-modules/pom.xml | 2 +- hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml | 2 +- hadoop-cloud-storage-project/pom.xml | 2 +- hadoop-common-project/hadoop-annotations/pom.xml | 2 +- hadoop-common-project/hadoop-auth-examples/pom.xml | 2 +- hadoop-common-project/hadoop-auth/pom.xml | 2 +- hadoop-common-project/hadoop-common/pom.xml | 2 +- hadoop-common-project/hadoop-kms/pom.xml | 2 +- hadoop-common-project/hadoop-minikdc/pom.xml | 2 +- hadoop-common-project/hadoop-nfs/pom.xml | 2 +- hadoop-common-project/hadoop-registry/pom.xml | 2 +- hadoop-common-project/pom.xml | 2 +- hadoop-dist/pom.xml | 2 +- hadoop-hdds/client/pom.xml | 2 +- hadoop-hdds/common/pom.xml | 2 +- hadoop-hdds/config/pom.xml | 2 +- hadoop-hdds/container-service/pom.xml | 2 +- hadoop-hdds/docs/pom.xml | 2 +- hadoop-hdds/framework/pom.xml | 2 +- hadoop-hdds/pom.xml | 2 +- hadoop-hdds/server-scm/pom.xml | 2 +- hadoop-hdds/tools/pom.xml | 2 +- hadoop-hdfs-project/hadoop-hdfs-client/pom.xml | 2 +- hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 2 +- hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml | 2 +- hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml | 2 +- hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml | 2 +- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 2 +- hadoop-hdfs-project/pom.xml | 2 +- .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml | 2 +- .../hadoop-mapreduce-client-common/pom.xml | 2 +- .../hadoop-mapreduce-client-core/pom.xml | 2 +- .../hadoop-mapreduce-client-hs-plugins/pom.xml | 2 +- .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml | 2 +- .../hadoop-mapreduce-client-jobclient/pom.xml | 2 +- .../hadoop-mapreduce-client-nativetask/pom.xml | 2 +- .../hadoop-mapreduce-client-shuffle/pom.xml | 2 +- .../hadoop-mapreduce-client-uploader/pom.xml | 2 +- hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 2 +- hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml | 2 +- hadoop-mapreduce-project/pom.xml | 2 +- hadoop-maven-plugins/pom.xml | 2 +- hadoop-minicluster/pom.xml | 2 +- hadoop-ozone/client/pom.xml | 2 +- hadoop-ozone/common/pom.xml | 2 +- hadoop-ozone/datanode/pom.xml | 2 +- hadoop-ozone/dist/pom.xml | 2 +- hadoop-ozone/integration-test/pom.xml | 2 +- hadoop-ozone/objectstore-service/pom.xml | 2 +- hadoop-ozone/ozone-manager/pom.xml | 2 +- hadoop-ozone/ozone-recon-codegen/pom.xml | 2 +- hadoop-ozone/ozone-recon/pom.xml | 2 +- hadoop-ozone/ozonefs-lib-current/pom.xml | 2 +- hadoop-ozone/ozonefs-lib-legacy/pom.xml | 2 +- hadoop-ozone/ozonefs/pom.xml | 2 +- hadoop-ozone/pom.xml | 2 +- hadoop-ozone/s3gateway/pom.xml | 2 +- hadoop-ozone/tools/pom.xml | 2 +- hadoop-ozone/upgrade/pom.xml | 2 +- hadoop-project-dist/pom.xml | 2 +- hadoop-project/pom.xml | 2 +- hadoop-submarine/hadoop-submarine-all/pom.xml | 2 +- hadoop-submarine/hadoop-submarine-core/pom.xml | 2 +- hadoop-submarine/hadoop-submarine-dist/pom.xml | 2 +- hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml | 2 +- hadoop-submarine/hadoop-submarine-yarnservice-runtime/pom.xml | 2 +- hadoop-submarine/pom.xml | 2 +- hadoop-tools/hadoop-aliyun/pom.xml | 2 +- hadoop-tools/hadoop-archive-logs/pom.xml | 2 +- hadoop-tools/hadoop-archives/pom.xml | 2 +- hadoop-tools/hadoop-aws/pom.xml | 2 +- hadoop-tools/hadoop-azure-datalake/pom.xml | 2 +- hadoop-tools/hadoop-azure/pom.xml | 2 +- hadoop-tools/hadoop-datajoin/pom.xml | 2 +- hadoop-tools/hadoop-distcp/pom.xml | 2 +- hadoop-tools/hadoop-extras/pom.xml | 2 +- hadoop-tools/hadoop-fs2img/pom.xml | 2 +- hadoop-tools/hadoop-gridmix/pom.xml | 2 +- hadoop-tools/hadoop-kafka/pom.xml | 2 +- hadoop-tools/hadoop-openstack/pom.xml | 2 +- hadoop-tools/hadoop-pipes/pom.xml | 2 +- hadoop-tools/hadoop-resourceestimator/pom.xml | 2 +- hadoop-tools/hadoop-rumen/pom.xml | 2 +- hadoop-tools/hadoop-sls/pom.xml | 2 +- hadoop-tools/hadoop-streaming/pom.xml | 2 +- hadoop-tools/hadoop-tools-dist/pom.xml | 2 +- hadoop-tools/pom.xml | 2 +- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml | 2 +- .../hadoop-yarn-applications-catalog-docker/pom.xml | 2 +- .../hadoop-yarn-applications-catalog-webapp/pom.xml | 2 +- .../hadoop-yarn-applications-catalog/pom.xml | 2 +- .../hadoop-yarn-applications-distributedshell/pom.xml | 2 +- .../hadoop-yarn-applications-mawo-core/pom.xml | 2 +- .../hadoop-yarn-applications-mawo/pom.xml | 2 +- .../hadoop-yarn-applications-unmanaged-am-launcher/pom.xml | 2 +- .../hadoop-yarn-services/hadoop-yarn-services-api/pom.xml | 2 +- .../hadoop-yarn-services/hadoop-yarn-services-core/pom.xml | 2 +- .../hadoop-yarn-applications/hadoop-yarn-services/pom.xml | 2 +- .../hadoop-yarn/hadoop-yarn-applications/pom.xml | 2 +- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml | 2 +- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml | 2 +- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml | 2 +- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml | 2 +- .../hadoop-yarn-server-applicationhistoryservice/pom.xml | 2 +- .../hadoop-yarn-server/hadoop-yarn-server-common/pom.xml | 2 +- .../hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml | 2 +- .../hadoop-yarn-server-resourcemanager/pom.xml | 2 +- .../hadoop-yarn-server/hadoop-yarn-server-router/pom.xml | 2 +- .../hadoop-yarn-server-sharedcachemanager/pom.xml | 2 +- .../hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml | 2 +- .../hadoop-yarn-server-timeline-pluginstorage/pom.xml | 2 +- .../hadoop-yarn-server-timelineservice-documentstore/pom.xml | 2 +- .../hadoop-yarn-server-timelineservice-hbase-tests/pom.xml | 2 +- .../hadoop-yarn-server-timelineservice-hbase-client/pom.xml | 2 +- .../hadoop-yarn-server-timelineservice-hbase-common/pom.xml | 2 +- .../hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml | 2 +- .../hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml | 2 +- .../hadoop-yarn-server-timelineservice-hbase-server/pom.xml | 2 +- .../hadoop-yarn-server-timelineservice-hbase/pom.xml | 2 +- .../hadoop-yarn-server-timelineservice/pom.xml | 2 +- .../hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml | 2 +- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml | 2 +- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml | 2 +- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml | 2 +- hadoop-yarn-project/hadoop-yarn/pom.xml | 2 +- hadoop-yarn-project/pom.xml | 2 +- pom.xml | 2 +- 136 files changed, 136 insertions(+), 136 deletions(-) diff --git a/hadoop-assemblies/pom.xml b/hadoop-assemblies/pom.xml index 2421ec12b3d76..b0fd7325c6eb1 100644 --- a/hadoop-assemblies/pom.xml +++ b/hadoop-assemblies/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-build-tools/pom.xml b/hadoop-build-tools/pom.xml index 39abbc9412fdb..ed4c0ef9ce9ff 100644 --- a/hadoop-build-tools/pom.xml +++ b/hadoop-build-tools/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-client-modules/hadoop-client-api/pom.xml b/hadoop-client-modules/hadoop-client-api/pom.xml index 7aee190b00648..7ee7b85fec937 100644 --- a/hadoop-client-modules/hadoop-client-api/pom.xml +++ b/hadoop-client-modules/hadoop-client-api/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml index f2504d84067ce..802a8f4406abe 100644 --- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml +++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml index a4c56f4d41a87..cf31e3a6e2953 100644 --- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml +++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml index 1a72df470c914..1a14549250c3e 100644 --- a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml +++ b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml index 4589c2d55977b..918b37413777c 100644 --- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml +++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml index fe7fe40ac2fc3..cb1a2f970c768 100644 --- a/hadoop-client-modules/hadoop-client-runtime/pom.xml +++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-client-modules/hadoop-client/pom.xml b/hadoop-client-modules/hadoop-client/pom.xml index 6d2daa81ebc4c..9216a2e54a397 100644 --- a/hadoop-client-modules/hadoop-client/pom.xml +++ b/hadoop-client-modules/hadoop-client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-client-modules/pom.xml b/hadoop-client-modules/pom.xml index 912fde91cc42a..0895e31ca307f 100644 --- a/hadoop-client-modules/pom.xml +++ b/hadoop-client-modules/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml index dc4161ec1d31e..b5e35b079f9fd 100644 --- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-cloud-storage-project/pom.xml b/hadoop-cloud-storage-project/pom.xml index ea899cf01f43a..a96431f92ab43 100644 --- a/hadoop-cloud-storage-project/pom.xml +++ b/hadoop-cloud-storage-project/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml index 5d64e83fad643..738f0ada8f1e9 100644 --- a/hadoop-common-project/hadoop-annotations/pom.xml +++ b/hadoop-common-project/hadoop-annotations/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml index 7bb0bcf2e5483..fb904912999b8 100644 --- a/hadoop-common-project/hadoop-auth-examples/pom.xml +++ b/hadoop-common-project/hadoop-auth-examples/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index eba5c650998f3..20a3e7059b154 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index fc089eb012a7c..6d15958e353a7 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index 31062c5c0aaa3..6f4ff09952ac4 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml index 8323ae440aa33..adbd6e32bee58 100644 --- a/hadoop-common-project/hadoop-minikdc/pom.xml +++ b/hadoop-common-project/hadoop-minikdc/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml index 62fa0f7594d32..f3ef25a19f825 100644 --- a/hadoop-common-project/hadoop-nfs/pom.xml +++ b/hadoop-common-project/hadoop-nfs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-common-project/hadoop-registry/pom.xml b/hadoop-common-project/hadoop-registry/pom.xml index d0c17cfc96b8d..dc45309dca296 100644 --- a/hadoop-common-project/hadoop-registry/pom.xml +++ b/hadoop-common-project/hadoop-registry/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml index 03c051aae6226..8be2593c21ffd 100644 --- a/hadoop-common-project/pom.xml +++ b/hadoop-common-project/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index e2a9b678989bd..07aa7b10a8320 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index d996a9fc90834..673af41aeef0d 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 8f9432c58fb0a..31a7311b0e322 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml index 7b79e50e6f812..bf62949411c8c 100644 --- a/hadoop-hdds/config/pom.xml +++ b/hadoop-hdds/config/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index f402dfd7feddd..c1dd403b03dda 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml index e8ef9fc351998..dc373954cf84b 100644 --- a/hadoop-hdds/docs/pom.xml +++ b/hadoop-hdds/docs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 05c92606b3b8d..1420d558fbc83 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 6a7cb611deea3..08c28e9c9882c 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index c9afd0c53c436..9b2a8e2b98caf 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index 44d20b9e1c716..6f0be62bd60a8 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml index 678ababb360ce..8769bef68e17f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index 60700b5ac0f94..50a24a9e6582e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml index 481066ed9c126..bb95442605a79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index 3f2d9b7f90a4c..2d30f67ef34a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index 014dceeaed2bc..8c76214166c8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 9fdb9312a311f..63cebe4a6494a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml index af27cf8e6fb4d..352db4769b81e 100644 --- a/hadoop-hdfs-project/pom.xml +++ b/hadoop-hdfs-project/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml index 94cba6edc8103..727797aa6605b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml index 924190d47b59b..430b09db7975c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml index e368b0de9eb34..336829dcf8e98 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml index 331dcd694351f..ee34f2c134403 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml index c75c866b150eb..d737e815cd240 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml index 246c9f363cb61..3ebba795ef6d0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml index 87938730364d4..8e2a12d1d5b6a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml index 6ef6bafdd5802..06b136299de50 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml index eb7ea8b0b7eb8..8060f70b1725b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index ed2a503a38f9a..fdea2d3e6bcc7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml index ae40c04cc75c9..5da8e4372555f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index 7742b7c51fa67..85ef6429a4b0c 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml index 401c26b043e1b..faf8494100949 100644 --- a/hadoop-maven-plugins/pom.xml +++ b/hadoop-maven-plugins/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-minicluster/pom.xml b/hadoop-minicluster/pom.xml index eb7746444fd80..4f6b33e1456d2 100644 --- a/hadoop-minicluster/pom.xml +++ b/hadoop-minicluster/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index 4fb54054d2d6a..2fefd8b1ef8dd 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index 50f89b1e51291..06973b397533a 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index b5b4f1d59fae5..b1a48e3d10c41 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 9e2966b1a0c4e..2edd2aa340d85 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index c8d8143af41a9..26387f83617d9 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/objectstore-service/pom.xml b/hadoop-ozone/objectstore-service/pom.xml index f81154aa21ddd..47d7e3782a8a9 100644 --- a/hadoop-ozone/objectstore-service/pom.xml +++ b/hadoop-ozone/objectstore-service/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 23663ce0760c1..54ba4f7bee088 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/ozone-recon-codegen/pom.xml b/hadoop-ozone/ozone-recon-codegen/pom.xml index 0e36e5ef001cd..6abc5ef85984d 100644 --- a/hadoop-ozone/ozone-recon-codegen/pom.xml +++ b/hadoop-ozone/ozone-recon-codegen/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/ozone-recon/pom.xml b/hadoop-ozone/ozone-recon/pom.xml index 791e76e1e1016..0debaccdbd543 100644 --- a/hadoop-ozone/ozone-recon/pom.xml +++ b/hadoop-ozone/ozone-recon/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/ozonefs-lib-current/pom.xml b/hadoop-ozone/ozonefs-lib-current/pom.xml index ad642ce1670ad..b3913c3b026b7 100644 --- a/hadoop-ozone/ozonefs-lib-current/pom.xml +++ b/hadoop-ozone/ozonefs-lib-current/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/ozonefs-lib-legacy/pom.xml b/hadoop-ozone/ozonefs-lib-legacy/pom.xml index 1cc01882c2495..14fcac36c02b2 100644 --- a/hadoop-ozone/ozonefs-lib-legacy/pom.xml +++ b/hadoop-ozone/ozonefs-lib-legacy/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml index 19443bd83ec02..02a5640966b84 100644 --- a/hadoop-ozone/ozonefs/pom.xml +++ b/hadoop-ozone/ozonefs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index aefdd9fc8e013..614c6d95ebf55 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index 658cee2ea005d..cbbba1f7fc00f 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 5b10389b5f127..f8ed807990636 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-ozone/upgrade/pom.xml b/hadoop-ozone/upgrade/pom.xml index a4763442d86aa..57211022f1f9f 100644 --- a/hadoop-ozone/upgrade/pom.xml +++ b/hadoop-ozone/upgrade/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml index b845f9cd10ad4..e165428bf9e95 100644 --- a/hadoop-project-dist/pom.xml +++ b/hadoop-project-dist/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index f442f5b356a6f..f4ec48ac0bf6c 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-submarine/hadoop-submarine-all/pom.xml b/hadoop-submarine/hadoop-submarine-all/pom.xml index 342fa5e47beee..639a9199e93de 100644 --- a/hadoop-submarine/hadoop-submarine-all/pom.xml +++ b/hadoop-submarine/hadoop-submarine-all/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-submarine/hadoop-submarine-core/pom.xml b/hadoop-submarine/hadoop-submarine-core/pom.xml index 47a4a64f9758b..332c4db966d99 100644 --- a/hadoop-submarine/hadoop-submarine-core/pom.xml +++ b/hadoop-submarine/hadoop-submarine-core/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-submarine/hadoop-submarine-dist/pom.xml b/hadoop-submarine/hadoop-submarine-dist/pom.xml index 8bcd510bdf3d7..7196df1bb880f 100644 --- a/hadoop-submarine/hadoop-submarine-dist/pom.xml +++ b/hadoop-submarine/hadoop-submarine-dist/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml b/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml index d449a6075e8e7..6254538342982 100644 --- a/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml +++ b/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-submarine/hadoop-submarine-yarnservice-runtime/pom.xml b/hadoop-submarine/hadoop-submarine-yarnservice-runtime/pom.xml index af4a57f9eb706..034a29e589073 100644 --- a/hadoop-submarine/hadoop-submarine-yarnservice-runtime/pom.xml +++ b/hadoop-submarine/hadoop-submarine-yarnservice-runtime/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-submarine/pom.xml b/hadoop-submarine/pom.xml index f09be42cd10b2..2d33da2ac1662 100644 --- a/hadoop-submarine/pom.xml +++ b/hadoop-submarine/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-aliyun/pom.xml b/hadoop-tools/hadoop-aliyun/pom.xml index d09af89873b4f..6bd540840a4c4 100644 --- a/hadoop-tools/hadoop-aliyun/pom.xml +++ b/hadoop-tools/hadoop-aliyun/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-archive-logs/pom.xml b/hadoop-tools/hadoop-archive-logs/pom.xml index c6325f3a396a5..d8b7150a839ef 100644 --- a/hadoop-tools/hadoop-archive-logs/pom.xml +++ b/hadoop-tools/hadoop-archive-logs/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-archives/pom.xml b/hadoop-tools/hadoop-archives/pom.xml index 87b34c5e5cbfb..a7fba81805166 100644 --- a/hadoop-tools/hadoop-archives/pom.xml +++ b/hadoop-tools/hadoop-archives/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index 0baefbe37e6f3..880ae832e5b8d 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml b/hadoop-tools/hadoop-azure-datalake/pom.xml index 8515360234e28..6e73eaacc174c 100644 --- a/hadoop-tools/hadoop-azure-datalake/pom.xml +++ b/hadoop-tools/hadoop-azure-datalake/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index a79e0ddc87e81..26d37b852ee38 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-datajoin/pom.xml b/hadoop-tools/hadoop-datajoin/pom.xml index 2a14da144f5e8..2353cfbff6626 100644 --- a/hadoop-tools/hadoop-datajoin/pom.xml +++ b/hadoop-tools/hadoop-datajoin/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml index 593d2fd5245ae..019ae366606de 100644 --- a/hadoop-tools/hadoop-distcp/pom.xml +++ b/hadoop-tools/hadoop-distcp/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-extras/pom.xml b/hadoop-tools/hadoop-extras/pom.xml index ba2c3a6859fc2..f93b0e15aa009 100644 --- a/hadoop-tools/hadoop-extras/pom.xml +++ b/hadoop-tools/hadoop-extras/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-fs2img/pom.xml b/hadoop-tools/hadoop-fs2img/pom.xml index 35c1e1d3e4196..feda6500189f2 100644 --- a/hadoop-tools/hadoop-fs2img/pom.xml +++ b/hadoop-tools/hadoop-fs2img/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-gridmix/pom.xml b/hadoop-tools/hadoop-gridmix/pom.xml index 7a70f2ea01b6c..b9e433bd08180 100644 --- a/hadoop-tools/hadoop-gridmix/pom.xml +++ b/hadoop-tools/hadoop-gridmix/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-kafka/pom.xml b/hadoop-tools/hadoop-kafka/pom.xml index 65bc9c22b107d..b675bd3751e9a 100644 --- a/hadoop-tools/hadoop-kafka/pom.xml +++ b/hadoop-tools/hadoop-kafka/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-openstack/pom.xml b/hadoop-tools/hadoop-openstack/pom.xml index 7c624f20e18a4..0236fbb897eb1 100644 --- a/hadoop-tools/hadoop-openstack/pom.xml +++ b/hadoop-tools/hadoop-openstack/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-pipes/pom.xml b/hadoop-tools/hadoop-pipes/pom.xml index 3bc8a7bb331a7..bda7fb35bef85 100644 --- a/hadoop-tools/hadoop-pipes/pom.xml +++ b/hadoop-tools/hadoop-pipes/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-resourceestimator/pom.xml b/hadoop-tools/hadoop-resourceestimator/pom.xml index 463b087f2e3c8..bd82cc8c8ccc5 100644 --- a/hadoop-tools/hadoop-resourceestimator/pom.xml +++ b/hadoop-tools/hadoop-resourceestimator/pom.xml @@ -9,7 +9,7 @@ ~ "License"); you may not use this file except in compliance ~ with the License. You may obtain a copy of the License at ~ - ~ https://www.apache.org/licenses/LICENSE-2.0 + ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-rumen/pom.xml b/hadoop-tools/hadoop-rumen/pom.xml index 824a87efa77be..d4b7d64d3bfb1 100644 --- a/hadoop-tools/hadoop-rumen/pom.xml +++ b/hadoop-tools/hadoop-rumen/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml index 95f09467d8a19..81d6a19527ba2 100644 --- a/hadoop-tools/hadoop-sls/pom.xml +++ b/hadoop-tools/hadoop-sls/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml index 7949b9d590b94..9b379ab243a9a 100644 --- a/hadoop-tools/hadoop-streaming/pom.xml +++ b/hadoop-tools/hadoop-streaming/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml index 8b88a54bd8778..708c04e6cc19f 100644 --- a/hadoop-tools/hadoop-tools-dist/pom.xml +++ b/hadoop-tools/hadoop-tools-dist/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index 33bcc60ef9a33..af1977aa96fd1 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml index 22076a3bc1ff7..21ace7d4638c7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml index 03a608d044d85..7b021f7d8b66b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml @@ -6,7 +6,7 @@ The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml index 2dd70701b451e..273379d3d0816 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml @@ -6,7 +6,7 @@ The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml index d279c169e46e7..f62a8f468f853 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml @@ -6,7 +6,7 @@ The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml index 769af490a07d2..ee41ffb1e8a20 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml index cf63740eb76f6..93eab69a43c0e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml @@ -2,7 +2,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml index d56c19ecdc032..3a5cd9f8d287a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml @@ -2,7 +2,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml index f1ab46bccfa63..1d2a23245d030 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml index a33a72e8d0131..36364bf2a80f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml @@ -6,7 +6,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml index de7bf00816d2d..6205468882cff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml @@ -6,7 +6,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml index a637c2c82df9b..43ea41d69b67b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml index 1bc6685eada8b..cab01298782b3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml index b4870924961a5..81ff752e98a9d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index d8196a1ccaa25..26268c37ad0ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml index 6389efd5a2fd8..ba70199ad8d17 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml index 40bb84728a497..9618a062e37ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml index d26e2636eb951..8e1be214eae90 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml index ed06300f08889..688c06d53c82a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml index eae8a68c4df0a..967643c664039 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml index 2ed8292e91163..190e1634f06d1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml index 716d7edf4cc21..30fce436cb099 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml index 441c7ffb9d714..7788fac509aa7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml index e6c5f4c2a7c58..1403cb1494319 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml @@ -3,7 +3,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml index 1e41c3b5b0069..c9fec01446823 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml index bed6aa0e7fba1..4a6cf7ab15e6f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml index a93a007a0847f..0b25e5b113ccd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml index 3a2c6f856efee..26b3e4f0e469c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml index 7d42823c6267e..804dfaec8b285 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml index 4833d95e2744c..57799e7422fee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml index 0dde629196fe6..cf6119c74dfe4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml index 1763c14b2d104..0dd25117e7924 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml index ed67661fc55c3..6574eb80a113d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml index c0d5be2d70d7a..10a9f2141a0f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml @@ -7,7 +7,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml index be8e70e2a080d..cf5f228fd7385 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml index 3eec5628b228a..f17e6ef3e44bf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml index 32d70703b3dec..ff460c29e5d64 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml index dabbd34c15040..3f696fee9ead9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml @@ -6,7 +6,7 @@ (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index d7b28e5f9bc62..1e222cad94e14 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml index ecbd65b2da3ef..a979cc8a6f987 100644 --- a/hadoop-yarn-project/pom.xml +++ b/hadoop-yarn-project/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pom.xml b/pom.xml index 0ad0cefb9be06..c77ec4baeac39 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, From 544876fe127780415c6d5dcfa8c8934eeafe6815 Mon Sep 17 00:00:00 2001 From: Weiwei Yang Date: Wed, 29 May 2019 16:34:48 +0800 Subject: [PATCH 0059/1308] YARN-8693. Add signalToContainer REST API for RMWebServices. Contributed by Tao Yang. --- .../resourcemanager/webapp/RMWSConsts.java | 8 + .../webapp/RMWebServiceProtocol.java | 13 ++ .../resourcemanager/webapp/RMWebServices.java | 34 ++++ .../webapp/TestRMWebServicesContainers.java | 158 ++++++++++++++++++ .../webapp/DefaultRequestInterceptorREST.java | 9 + .../webapp/FederationInterceptorREST.java | 6 + .../router/webapp/RouterWebServices.java | 14 ++ .../webapp/MockRESTRequestInterceptor.java | 6 + .../PassThroughRESTRequestInterceptor.java | 6 + .../src/site/markdown/ResourceManagerRest.md | 54 ++++++ 10 files changed, 308 insertions(+) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesContainers.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java index 3c36fe82e8be8..b7a60087e648f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java @@ -189,6 +189,12 @@ public final class RMWSConsts { public static final String CHECK_USER_ACCESS_TO_QUEUE = "/queues/{queue}/access"; + /** + * Path for {@code RMWebServiceProtocol#signalContainer}. + */ + public static final String SIGNAL_TO_CONTAINER = + "/containers/{containerid}/signal/{command}"; + // ----------------QueryParams for RMWebServiceProtocol---------------- public static final String TIME = "time"; @@ -229,6 +235,8 @@ public final class RMWSConsts { public static final String REQUEST_PRIORITIES = "requestPriorities"; public static final String ALLOCATION_REQUEST_IDS = "allocationRequestIds"; public static final String GROUP_BY = "groupBy"; + public static final String SIGNAL = "signal"; + public static final String COMMAND = "command"; private RMWSConsts() { // not called diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java index 7b49ed4ac2289..3aa2593c1c2a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java @@ -712,4 +712,17 @@ Response updateApplicationTimeout(AppTimeoutInfo appTimeout, RMQueueAclInfo checkUserAccessToQueue(String queue, String username, String queueAclType, HttpServletRequest hsr) throws AuthorizationException; + + /** + * This method sends a signal to container. + * @param containerId containerId + * @param command signal command, it could be OUTPUT_THREAD_DUMP/ + * GRACEFUL_SHUTDOWN/FORCEFUL_SHUTDOWN + * @param req request + * @return Response containing the status code + * @throws AuthorizationException if the user is not authorized to invoke this + * method. + */ + Response signalToContainer(String containerId, String command, + HttpServletRequest req) throws AuthorizationException; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index 9b36995eb2640..3f010350cb58d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -94,6 +94,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityRequest; @@ -104,6 +105,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType; +import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; @@ -118,6 +120,7 @@ import org.apache.hadoop.yarn.api.records.ReservationRequests; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceOption; +import org.apache.hadoop.yarn.api.records.SignalContainerCommand; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -2550,4 +2553,35 @@ public RMQueueAclInfo checkUserAccessToQueue( return new RMQueueAclInfo(true, user.getUserName(), ""); } + + @POST + @Path(RMWSConsts.SIGNAL_TO_CONTAINER) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response signalToContainer( + @PathParam(RMWSConsts.CONTAINERID) String containerId, + @PathParam(RMWSConsts.COMMAND) String command, + @Context HttpServletRequest hsr) + throws AuthorizationException { + UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true); + initForWritableEndpoints(callerUGI, true); + if (!EnumUtils.isValidEnum( + SignalContainerCommand.class, command.toUpperCase())) { + String errMsg = + "Invalid command: " + command.toUpperCase() + ", valid commands are: " + + Arrays.asList(SignalContainerCommand.values()); + return Response.status(Status.BAD_REQUEST).entity(errMsg).build(); + } + try { + ContainerId containerIdObj = ContainerId.fromString(containerId); + rm.getClientRMService().signalToContainer(SignalContainerRequest + .newInstance(containerIdObj, + SignalContainerCommand.valueOf(command.toUpperCase()))); + } catch (Exception e) { + return Response.status(Status.INTERNAL_SERVER_ERROR) + .entity(e.getMessage()).build(); + } + return Response.status(Status.OK).build(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesContainers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesContainers.java new file mode 100644 index 0000000000000..d2a94c135dc5b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesContainers.java @@ -0,0 +1,158 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import javax.ws.rs.core.MediaType; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.http.JettyUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.SignalContainerCommand; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; +import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; +import org.apache.hadoop.yarn.webapp.GuiceServletConfig; +import org.apache.hadoop.yarn.webapp.JerseyTestBase; +import org.eclipse.jetty.server.Response; +import org.junit.Before; +import org.junit.Test; + +import com.google.inject.Guice; +import com.google.inject.servlet.ServletModule; +import com.sun.jersey.api.client.ClientResponse; +import com.sun.jersey.api.client.WebResource; +import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; +import com.sun.jersey.test.framework.WebAppDescriptor; + +/** + * Testing containers REST API. + */ +public class TestRMWebServicesContainers extends JerseyTestBase { + + private static MockRM rm; + private static String userName; + + private static class WebServletModule extends ServletModule { + @Override + protected void configureServlets() { + bind(JAXBContextResolver.class); + bind(RMWebServices.class); + bind(GenericExceptionHandler.class); + try { + userName = UserGroupInformation.getCurrentUser().getShortUserName(); + } catch (IOException ioe) { + throw new RuntimeException("Unable to get current user name " + + ioe.getMessage(), ioe); + } + Configuration conf = new Configuration(); + conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, + ResourceScheduler.class); + conf.set(YarnConfiguration.YARN_ADMIN_ACL, userName); + rm = new MockRM(conf); + bind(ResourceManager.class).toInstance(rm); + serve("/*").with(GuiceContainer.class); + filter("/*").through(TestRMWebServicesAppsModification + .TestRMCustomAuthFilter.class); + } + } + + static { + GuiceServletConfig.setInjector( + Guice.createInjector(new WebServletModule())); + } + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + GuiceServletConfig.setInjector( + Guice.createInjector(new WebServletModule())); + } + + public TestRMWebServicesContainers() { + super(new WebAppDescriptor.Builder( + "org.apache.hadoop.yarn.server.resourcemanager.webapp") + .contextListenerClass(GuiceServletConfig.class) + .filterClass(com.google.inject.servlet.GuiceFilter.class) + .contextPath("jersey-guice-filter").servletPath("/").build()); + } + + @Test + public void testSignalContainer() throws Exception { + rm.start(); + MockNM nm = rm.registerNode("127.0.0.1:1234", 2048); + RMApp app = rm.submitApp(1024); + nm.nodeHeartbeat(true); + MockRM + .waitForState(app.getCurrentAppAttempt(), RMAppAttemptState.ALLOCATED); + rm.sendAMLaunched(app.getCurrentAppAttempt().getAppAttemptId()); + WebResource r = resource(); + + // test error command + ClientResponse response = + r.path("ws").path("v1").path("cluster").path("containers").path( + app.getCurrentAppAttempt().getMasterContainer().getId().toString()) + .path("signal") + .path("not-exist-signal") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).post(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, + response.getType().toString()); + assertEquals(Response.SC_BAD_REQUEST, response.getStatus()); + assertTrue(response.getEntity(String.class) + .contains("Invalid command: NOT-EXIST-SIGNAL")); + + // test error containerId + response = + r.path("ws").path("v1").path("cluster").path("containers").path("XXX") + .path("signal") + .path(SignalContainerCommand.OUTPUT_THREAD_DUMP.name()) + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).post(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, + response.getType().toString()); + assertEquals(Response.SC_INTERNAL_SERVER_ERROR, response.getStatus()); + assertTrue( + response.getEntity(String.class).contains("Invalid ContainerId")); + + // test correct signal + response = + r.path("ws").path("v1").path("cluster").path("containers").path( + app.getCurrentAppAttempt().getMasterContainer().getId().toString()) + .path("signal") + .path(SignalContainerCommand.OUTPUT_THREAD_DUMP.name()) + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).post(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, + response.getType().toString()); + assertEquals(Response.SC_OK, response.getStatus()); + + rm.stop(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java index f5f549a0594ab..7e6f306252186 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java @@ -543,4 +543,13 @@ public void setNextInterceptor(RESTRequestInterceptor next) { + "is correct"); } + @Override + public Response signalToContainer(String containerId, String command, + HttpServletRequest req) throws AuthorizationException { + return RouterWebServiceUtil + .genericForward(webAppAddress, req, Response.class, HTTPMethods.POST, + RMWSConsts.RM_WEB_SERVICE_PATH + "/" + RMWSConsts.CONTAINERS + "/" + + containerId + "/" + RMWSConsts.SIGNAL + "/" + command, null, + null); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java index ec4cb8b21e0be..1c8b7a85f2976 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java @@ -1342,6 +1342,12 @@ public void setNextInterceptor(RESTRequestInterceptor next) { + "is correct"); } + @Override + public Response signalToContainer(String containerId, String command, + HttpServletRequest req) { + throw new NotImplementedException("Code is not implemented"); + } + @Override public void shutdown() { if (threadpool != null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java index ce45f21560cf6..9327c6f688d16 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java @@ -934,4 +934,18 @@ protected void setResponse(HttpServletResponse response) { this.response = response; } + @POST + @Path(RMWSConsts.SIGNAL_TO_CONTAINER) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + public Response signalToContainer( + @PathParam(RMWSConsts.CONTAINERID) String containerId, + @PathParam(RMWSConsts.COMMAND) String command, + @Context HttpServletRequest req) + throws AuthorizationException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(req); + return pipeline.getRootInterceptor() + .signalToContainer(containerId, command, req); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java index b3e18a9206167..f93b397e386a9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java @@ -359,4 +359,10 @@ public ContainerInfo getContainer(HttpServletRequest req, String containerId) { return new ContainerInfo(); } + + @Override + public Response signalToContainer(String containerId, String command, + HttpServletRequest req) { + return Response.status(Status.OK).build(); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java index 400bf714a86cb..126610cc475c4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java @@ -363,4 +363,10 @@ public Response updateApplicationTimeout(AppTimeoutInfo appTimeout, return getNextInterceptor().updateApplicationTimeout(appTimeout, hsr, appId); } + + @Override + public Response signalToContainer(String containerId, + String command, HttpServletRequest req) throws AuthorizationException { + return getNextInterceptor().signalToContainer(containerId, command, req); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md index 54a692e6cc7a0..a6d7971b63f1a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md @@ -5291,3 +5291,57 @@ Response Header: HTTP/1.1 200 OK Content-Type: application/xml Transfer-Encoding: chunked + + +Cluster Container Signal API +-------------------------------- + +With the Container Signal API, you can send a signal to a specified container with one of the following commands: OUTPUT_THREAD_DUMP, GRACEFUL_SHUTDOWN and FORCEFUL_SHUTDOWN. + +### URI + + http://rm-http-address:port/ws/v1/cluster/containers/{containerId}/signal/{command} + +### HTTP Operations Supported + + POST + +### Query Parameters Supported + + None + +### Response Examples + +**JSON response** + +HTTP Request: + + POST http://rm-http-address:port/ws/v1/cluster/containers/container_1531404209605_0008_01_000001/signal/OUTPUT_THREAD_DUMP + Accept: application/json + Content-Type: application/json + +Response Header: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + Server: Jetty(6.1.26) + +No response body. + +**XML response** + +HTTP Request: + + POST http://rm-http-address:port/ws/v1/cluster/containers/container_1531404209605_0008_01_000001/signal/OUTPUT_THREAD_DUMP + Accept: application/xml + Content-Type: application/xml + +Response Header: + + HTTP/1.1 200 OK + Content-Type: application/xml + Content-Length: 552 + Server: Jetty(6.1.26) + +No response body. From 3c63551101dc17038efb9f5345833d2883fce86e Mon Sep 17 00:00:00 2001 From: Eric E Payne Date: Wed, 29 May 2019 16:05:39 +0000 Subject: [PATCH 0060/1308] YARN-8625. Aggregate Resource Allocation for each job is not present in ATS. Contributed by Prabhu Joseph. --- .../webapp/TestAHSWebServices.java | 5 +++++ .../hadoop/yarn/server/webapp/dao/AppInfo.java | 16 ++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java index 492e4535f6cde..610f7e562cf64 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.assertResponseStatusCode; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; import java.net.HttpURLConnection; @@ -415,6 +416,10 @@ public void testSingleApp() throws Exception { assertEquals(FinalApplicationStatus.UNDEFINED.toString(), app.get("finalAppStatus")); assertEquals(YarnApplicationState.FINISHED.toString(), app.get("appState")); + assertNotNull("Aggregate resource allocation is null", + app.get("aggregateResourceAllocation")); + assertNotNull("Aggregate Preempted Resource Allocation is null", + app.get("aggregatePreemptedResourceAllocation")); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java index c4de022f0255a..d053f33bd0a00 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java @@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.util.Times; +import org.apache.hadoop.yarn.util.StringHelper; @Public @Evolving @@ -67,6 +68,8 @@ public class AppInfo { protected boolean unmanagedApplication; private String appNodeLabelExpression; private String amNodeLabelExpression; + private String aggregateResourceAllocation; + private String aggregatePreemptedResourceAllocation; public AppInfo() { // JAXB needs this @@ -110,6 +113,11 @@ public AppInfo(ApplicationReport app) { reservedMemoryMB = app.getApplicationResourceUsageReport() .getReservedResources().getMemorySize(); } + aggregateResourceAllocation = StringHelper.getResourceSecondsString( + app.getApplicationResourceUsageReport().getResourceSecondsMap()); + aggregatePreemptedResourceAllocation = StringHelper + .getResourceSecondsString(app.getApplicationResourceUsageReport() + .getPreemptedResourceSecondsMap()); } progress = app.getProgress() * 100; // in percent if (app.getApplicationTags() != null && !app.getApplicationTags().isEmpty()) { @@ -235,4 +243,12 @@ public String getAppNodeLabelExpression() { public String getAmNodeLabelExpression() { return amNodeLabelExpression; } + + public String getAggregateResourceAllocation() { + return aggregateResourceAllocation; + } + + public String getAggregatePreemptedResourceAllocation() { + return aggregatePreemptedResourceAllocation; + } } From abf76ac371e4611c8eb371736b433e3d89c9d2ae Mon Sep 17 00:00:00 2001 From: Ahmed Hussein Date: Wed, 29 May 2019 11:24:08 -0500 Subject: [PATCH 0061/1308] YARN-9563. Resource report REST API could return NaN or Inf (Ahmed Hussein via jeagles) Signed-off-by: Jonathan Eagles --- .../scheduler/SchedulerApplicationAttempt.java | 7 ++++--- .../scheduler/TestSchedulerApplicationAttempt.java | 2 +- .../scheduler/capacity/TestLeafQueue.java | 10 ++++++++++ 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index 2e8a7c14ebe70..cc7f5852d6379 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -1125,9 +1125,10 @@ public ApplicationResourceUsageReport getResourceUsageReport() { if (!calc.isInvalidDivisor(cluster)) { float queueCapacityPerc = queue.getQueueInfo(false, false) .getCapacity(); - if (queueCapacityPerc != 0) { - queueUsagePerc = calc.divide(cluster, usedResourceClone, - Resources.multiply(cluster, queueCapacityPerc)) * 100; + queueUsagePerc = calc.divide(cluster, usedResourceClone, + Resources.multiply(cluster, queueCapacityPerc)) * 100; + if (Float.isNaN(queueUsagePerc) || Float.isInfinite(queueUsagePerc)) { + queueUsagePerc = 0.0f; } clusterUsagePerc = calc.divide(cluster, usedResourceClone, cluster) * 100; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java index b7a143b8e9def..c3fe5f5145d05 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java @@ -276,7 +276,7 @@ public void testAppPercentages() throws Exception { assertEquals(60.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f); - queue = createQueue("test3", null, 0.0f); + queue = createQueue("test3", null, Float.MIN_VALUE); app = new SchedulerApplicationAttempt(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index e21f9e3270c4a..29fe14245efc6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -4039,6 +4039,16 @@ public void testApplicationQueuePercent() 0.01f); assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f); + + // test that queueUsagePercentage returns neither NaN nor Infinite + AbstractCSQueue zeroQueue = createQueue("test2.2", null, + Float.MIN_VALUE, Float.MIN_VALUE, + Resources.multiply(res, Float.MIN_VALUE)); + app = new FiCaSchedulerApp(appAttId, user, zeroQueue, + qChild.getAbstractUsersManager(), rmContext); + app.getAppAttemptResourceUsage().incUsed(requestedResource); + assertEquals(0.0f, app.getResourceUsageReport().getQueueUsagePercentage(), + 0.01f); } @Test From 751f0df71045ee90b21fbb8aa95c66c74a64b683 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Wed, 29 May 2019 11:55:44 -0700 Subject: [PATCH 0062/1308] HDDS-1231. Add ChillMode metrics. Contributed by Bharat Viswanadham. --- .../scm/safemode/ContainerSafeModeRule.java | 4 + .../safemode/HealthyPipelineSafeModeRule.java | 5 +- .../OneReplicaPipelineSafeModeRule.java | 5 + .../hdds/scm/safemode/SCMSafeModeManager.java | 15 +++ .../hdds/scm/safemode/SafeModeExitRule.java | 4 + .../hdds/scm/safemode/SafeModeMetrics.java | 111 ++++++++++++++++++ .../scm/server/StorageContainerManager.java | 2 + .../scm/safemode/TestSCMSafeModeManager.java | 60 +++++++++- 8 files changed, 200 insertions(+), 6 deletions(-) create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java index 14091b23222de..496d481b61145 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java @@ -76,6 +76,8 @@ public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, maxContainer = containerMap.size(); } + long cutOff = (long) Math.ceil(maxContainer * safeModeCutoff); + getSafeModeMetrics().setNumContainerWithOneReplicaReportedThreshold(cutOff); } @@ -105,6 +107,8 @@ protected void process(NodeRegistrationContainerReport reportsProto) { if (containerMap.containsKey(c.getContainerID())) { if(containerMap.remove(c.getContainerID()) != null) { containerWithMinReplicas.getAndAdd(1); + getSafeModeMetrics() + .incCurrentContainersWithOneReplicaReportedCount(); } } }); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java index aee17b1ce0e7e..7a00d760fa4d1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java @@ -89,6 +89,9 @@ public class HealthyPipelineSafeModeRule LOG.info(" Total pipeline count is {}, healthy pipeline " + "threshold count is {}", pipelineCount, healthyPipelineThresholdCount); + + getSafeModeMetrics().setNumHealthyPipelinesThreshold( + healthyPipelineThresholdCount); } @Override @@ -135,9 +138,9 @@ protected void process(PipelineReportFromDatanode // If the pipeline is open state mean, all 3 datanodes are reported // for this pipeline. currentHealthyPipelineCount++; + getSafeModeMetrics().incCurrentHealthyPipelinesCount(); } } - if (scmInSafeMode()) { SCMSafeModeManager.getLogger().info( "SCM in safe mode. Healthy pipelines reported count is {}, " + diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java index f8f41b7d3620c..841d8ff6654c2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java @@ -85,6 +85,9 @@ public OneReplicaPipelineSafeModeRule(String ruleName, EventQueue eventQueue, "datanode reported threshold count is {}", totalPipelineCount, thresholdCount); + getSafeModeMetrics().setNumPipelinesWithAtleastOneReplicaReportedThreshold( + thresholdCount); + } @Override @@ -120,6 +123,8 @@ protected void process(PipelineReportFromDatanode if (pipeline.getFactor() == HddsProtos.ReplicationFactor.THREE && !reportedPipelineIDSet.contains(pipelineID)) { reportedPipelineIDSet.add(pipelineID); + getSafeModeMetrics() + .incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount(); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java index 365259ae1cb7d..a22d1623fdcdc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java @@ -96,6 +96,8 @@ public class SCMSafeModeManager { private final EventQueue eventPublisher; private final PipelineManager pipelineManager; + private final SafeModeMetrics safeModeMetrics; + public SCMSafeModeManager(Configuration conf, List allContainers, PipelineManager pipelineManager, EventQueue eventQueue) { @@ -106,7 +108,9 @@ public SCMSafeModeManager(Configuration conf, HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED_DEFAULT); + if (isSafeModeEnabled) { + this.safeModeMetrics = SafeModeMetrics.create(); ContainerSafeModeRule containerSafeModeRule = new ContainerSafeModeRule(CONT_EXIT_RULE, eventQueue, config, allContainers, this); @@ -132,10 +136,21 @@ public SCMSafeModeManager(Configuration conf, } emitSafeModeStatus(); } else { + this.safeModeMetrics = null; exitSafeMode(eventQueue); } } + public void stop() { + if (isSafeModeEnabled) { + this.safeModeMetrics.unRegister(); + } + } + + public SafeModeMetrics getSafeModeMetrics() { + return safeModeMetrics; + } + /** * Emit Safe mode status. */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java index eceb8a3c005b7..05e84dbbb3ddd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java @@ -107,4 +107,8 @@ protected boolean scmInSafeMode() { return safeModeManager.getInSafeMode(); } + protected SafeModeMetrics getSafeModeMetrics() { + return safeModeManager.getSafeModeMetrics(); + } + } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java new file mode 100644 index 0000000000000..80b8257c40b24 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements.  See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership.  The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License.  You may obtain a copy of the License at + * + *      http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.safemode; + +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MutableCounterLong; + +/** + * This class is used for maintaining SafeMode metric information, which can + * be used for monitoring during SCM startup when SCM is still in SafeMode. + */ +public class SafeModeMetrics { + private static final String SOURCE_NAME = + SafeModeMetrics.class.getSimpleName(); + + + // These all values will be set to some values when safemode is enabled. + private @Metric MutableCounterLong + numContainerWithOneReplicaReportedThreshold; + private @Metric MutableCounterLong + currentContainersWithOneReplicaReportedCount; + + // When hdds.scm.safemode.pipeline-availability.check is set then only + // below metrics will have some values, otherwise they will be zero. + private @Metric MutableCounterLong numHealthyPipelinesThreshold; + private @Metric MutableCounterLong currentHealthyPipelinesCount; + private @Metric MutableCounterLong + numPipelinesWithAtleastOneReplicaReportedThreshold; + private @Metric MutableCounterLong + currentPipelinesWithAtleastOneReplicaReportedCount; + + public static SafeModeMetrics create() { + MetricsSystem ms = DefaultMetricsSystem.instance(); + return ms.register(SOURCE_NAME, + "SCM Safemode Metrics", + new SafeModeMetrics()); + } + + public void setNumHealthyPipelinesThreshold(long val) { + this.numHealthyPipelinesThreshold.incr(val); + } + + public void incCurrentHealthyPipelinesCount() { + this.currentHealthyPipelinesCount.incr(); + } + + public void setNumPipelinesWithAtleastOneReplicaReportedThreshold(long val) { + this.numPipelinesWithAtleastOneReplicaReportedThreshold.incr(val); + } + + public void incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount() { + this.currentPipelinesWithAtleastOneReplicaReportedCount.incr(); + } + + public void setNumContainerWithOneReplicaReportedThreshold(long val) { + this.numContainerWithOneReplicaReportedThreshold.incr(val); + } + + public void incCurrentContainersWithOneReplicaReportedCount() { + this.currentContainersWithOneReplicaReportedCount.incr(); + } + + public MutableCounterLong getNumHealthyPipelinesThreshold() { + return numHealthyPipelinesThreshold; + } + + public MutableCounterLong getCurrentHealthyPipelinesCount() { + return currentHealthyPipelinesCount; + } + + public MutableCounterLong + getNumPipelinesWithAtleastOneReplicaReportedThreshold() { + return numPipelinesWithAtleastOneReplicaReportedThreshold; + } + + public MutableCounterLong getCurrentPipelinesWithAtleastOneReplicaCount() { + return currentPipelinesWithAtleastOneReplicaReportedCount; + } + + public MutableCounterLong getNumContainerWithOneReplicaReportedThreshold() { + return numContainerWithOneReplicaReportedThreshold; + } + + public MutableCounterLong getCurrentContainersWithOneReplicaReportedCount() { + return currentContainersWithOneReplicaReportedCount; + } + + + public void unRegister() { + MetricsSystem ms = DefaultMetricsSystem.instance(); + ms.unregisterSource(SOURCE_NAME); + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 8c4a514649cc0..b13f2cb7cfd32 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -1017,6 +1017,8 @@ public void stop() { } catch (Exception ex) { LOG.error("SCM Metadata store stop failed", ex); } + + scmSafeModeManager.stop(); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index fa582e39c3777..7ddf84e776910 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -90,10 +91,10 @@ public void testSafeModeStateWithNullContainers() { private void testSafeMode(int numContainers) throws Exception { containers = new ArrayList<>(); containers.addAll(HddsTestUtils.getContainerInfo(numContainers)); - // Assign open state to containers to be included in the safe mode - // container list + + // Currently only considered containers which are not in open state. for (ContainerInfo container : containers) { - container.setState(HddsProtos.LifeCycleState.OPEN); + container.setState(HddsProtos.LifeCycleState.CLOSED); } scmSafeModeManager = new SCMSafeModeManager( config, containers, null, queue); @@ -101,15 +102,28 @@ private void testSafeMode(int numContainers) throws Exception { assertTrue(scmSafeModeManager.getInSafeMode()); queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, HddsTestUtils.createNodeRegistrationContainerReport(containers)); + + long cutOff = (long) Math.ceil(numContainers * config.getDouble( + HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, + HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT)); + + Assert.assertEquals(cutOff, scmSafeModeManager.getSafeModeMetrics() + .getNumContainerWithOneReplicaReportedThreshold().value()); + GenericTestUtils.waitFor(() -> { return !scmSafeModeManager.getInSafeMode(); }, 100, 1000 * 5); + + Assert.assertEquals(cutOff, scmSafeModeManager.getSafeModeMetrics() + .getCurrentContainersWithOneReplicaReportedCount().value()); + } @Test public void testSafeModeExitRule() throws Exception { containers = new ArrayList<>(); - containers.addAll(HddsTestUtils.getContainerInfo(25 * 4)); + int numContainers = 100; + containers.addAll(HddsTestUtils.getContainerInfo(numContainers)); // Assign open state to containers to be included in the safe mode // container list for (ContainerInfo container : containers) { @@ -118,15 +132,30 @@ public void testSafeModeExitRule() throws Exception { scmSafeModeManager = new SCMSafeModeManager( config, containers, null, queue); + long cutOff = (long) Math.ceil(numContainers * config.getDouble( + HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, + HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT)); + + Assert.assertEquals(cutOff, scmSafeModeManager.getSafeModeMetrics() + .getNumContainerWithOneReplicaReportedThreshold().value()); + assertTrue(scmSafeModeManager.getInSafeMode()); testContainerThreshold(containers.subList(0, 25), 0.25); + Assert.assertEquals(25, scmSafeModeManager.getSafeModeMetrics() + .getCurrentContainersWithOneReplicaReportedCount().value()); assertTrue(scmSafeModeManager.getInSafeMode()); testContainerThreshold(containers.subList(25, 50), 0.50); + Assert.assertEquals(50, scmSafeModeManager.getSafeModeMetrics() + .getCurrentContainersWithOneReplicaReportedCount().value()); assertTrue(scmSafeModeManager.getInSafeMode()); testContainerThreshold(containers.subList(50, 75), 0.75); + Assert.assertEquals(75, scmSafeModeManager.getSafeModeMetrics() + .getCurrentContainersWithOneReplicaReportedCount().value()); assertTrue(scmSafeModeManager.getInSafeMode()); testContainerThreshold(containers.subList(75, 100), 1.0); + Assert.assertEquals(100, scmSafeModeManager.getSafeModeMetrics() + .getCurrentContainersWithOneReplicaReportedCount().value()); GenericTestUtils.waitFor(() -> { return !scmSafeModeManager.getInSafeMode(); @@ -248,7 +277,6 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( pipelineManager, queue); assertTrue(scmSafeModeManager.getInSafeMode()); - testContainerThreshold(containers, 1.0); List pipelines = pipelineManager.getPipelines(); @@ -260,6 +288,14 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( scmSafeModeManager.getOneReplicaPipelineSafeModeRule() .getThresholdCount(); + Assert.assertEquals(healthyPipelineThresholdCount, + scmSafeModeManager.getSafeModeMetrics() + .getNumHealthyPipelinesThreshold().value()); + + Assert.assertEquals(oneReplicaThresholdCount, + scmSafeModeManager.getSafeModeMetrics() + .getNumPipelinesWithAtleastOneReplicaReportedThreshold().value()); + // Because even if no pipelines are there, and threshold we set to zero, // we shall a get an event when datanode is registered. In that case, // validate will return true, and add this to validatedRules. @@ -273,13 +309,27 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( if (i < healthyPipelineThresholdCount) { checkHealthy(i + 1); + Assert.assertEquals(i + 1, + scmSafeModeManager.getSafeModeMetrics() + .getCurrentHealthyPipelinesCount().value()); } if (i < oneReplicaThresholdCount) { checkOpen(i + 1); + Assert.assertEquals(i + 1, + scmSafeModeManager.getSafeModeMetrics() + .getCurrentPipelinesWithAtleastOneReplicaCount().value()); } } + Assert.assertEquals(healthyPipelineThresholdCount, + scmSafeModeManager.getSafeModeMetrics() + .getCurrentHealthyPipelinesCount().value()); + + Assert.assertEquals(oneReplicaThresholdCount, + scmSafeModeManager.getSafeModeMetrics() + .getCurrentPipelinesWithAtleastOneReplicaCount().value()); + GenericTestUtils.waitFor(() -> { return !scmSafeModeManager.getInSafeMode(); From 0ead2090a65817db52f9dc687befa13bebb72d51 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Wed, 29 May 2019 13:39:27 -0700 Subject: [PATCH 0063/1308] HDDS-1542. Create Radix tree to support ozone prefix ACLs. Contributed by Xiaoyu Yao. --- .../apache/hadoop/ozone/util/RadixNode.java | 59 +++++ .../apache/hadoop/ozone/util/RadixTree.java | 214 ++++++++++++++++++ .../hadoop/ozone/util/TestRadixTree.java | 129 +++++++++++ .../hadoop/ozone/util/package-info.java | 21 ++ 4 files changed, 423 insertions(+) create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixNode.java create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java create mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java create mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/package-info.java diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixNode.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixNode.java new file mode 100644 index 0000000000000..3009c9a4e8c06 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixNode.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.util; + +import java.util.HashMap; + +/** + * Wrapper class for Radix tree node representing Ozone prefix path segment + * separated by "/". + */ +public class RadixNode { + + public RadixNode(String name) { + this.name = name; + this.children = new HashMap<>(); + } + + public String getName() { + return name; + } + + public boolean hasChildren() { + return children.isEmpty(); + } + + public HashMap getChildren() { + return children; + } + + public void setValue(T v) { + this.value = v; + } + + public T getValue() { + return value; + } + + private HashMap children; + + private String name; + + // TODO: k/v pairs for more metadata as needed + private T value; +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java new file mode 100644 index 0000000000000..72e9ab3f5e776 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java @@ -0,0 +1,214 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.util; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.ozone.OzoneConsts; + +import java.util.ArrayList; +import java.util.HashMap; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; + +/** + * Wrapper class for handling Ozone prefix path lookup of ACL APIs + * with radix tree. + */ +public class RadixTree { + + /** + * create a empty radix tree with root only. + */ + public RadixTree() { + root = new RadixNode(PATH_DELIMITER); + } + + /** + * If the Radix tree contains root only. + * @return true if the radix tree contains root only. + */ + public boolean isEmpty() { + return root.hasChildren(); + } + + /** + * Insert prefix tree node without value, value can be ACL or other metadata + * of the prefix path. + * @param path + */ + public void insert(String path) { + insert(path, null); + } + + /** + * Insert prefix tree node with value, value can be ACL or other metadata + * of the prefix path. + * @param path + * @param val + */ + public void insert(String path, T val) { + // all prefix path inserted should end with "/" + RadixNode n = root; + Path p = Paths.get(path); + for (int level = 0; level < p.getNameCount(); level++) { + HashMap child = n.getChildren(); + String component = p.getName(level).toString(); + if (child.containsKey(component)) { + n = child.get(component); + } else { + RadixNode tmp = new RadixNode(component); + child.put(component, tmp); + n = tmp; + } + } + if (val != null) { + n.setValue(val); + } + } + + /** + * Get the last node in the exact prefix path that matches in the tree. + * @param path - prefix path + * @return last node in the prefix tree or null if non exact prefix matchl + */ + public RadixNode getLastNodeInPrefixPath(String path) { + List> lpp = getLongestPrefixPath(path); + Path p = Paths.get(path); + if (lpp.size() != p.getNameCount() + 1) { + return null; + } else { + return lpp.get(p.getNameCount()); + } + } + + /** + * Remove prefix path. + * @param path + */ + public void removePrefixPath(String path) { + Path p = Paths.get(path); + removePrefixPathInternal(root, p, 0); + } + + /** + * Recursively remove non-overlapped part of the prefix path from radix tree. + * @param current current radix tree node. + * @param path prefix path to be removed. + * @param level current recursive level. + * @return true if current radix node can be removed. + * (not overlapped with other path), + * false otherwise. + */ + private boolean removePrefixPathInternal(RadixNode current, + Path path, int level) { + // last component is processed + if (level == path.getNameCount()) { + return current.hasChildren(); + } + + // not last component, recur for next component + String name = path.getName(level).toString(); + RadixNode node = current.getChildren().get(name); + if (node == null) { + return false; + } + + if (removePrefixPathInternal(node, path, level+1)) { + current.getChildren().remove(name); + return current.hasChildren(); + } + return false; + } + + /** + * Get the longest prefix path. + * @param path - prefix path. + * @return longest prefix path as list of RadixNode. + */ + public List> getLongestPrefixPath(String path) { + RadixNode n = root; + Path p = Paths.get(path); + int level = 0; + List> result = new ArrayList<>(); + result.add(root); + while (level < p.getNameCount()) { + HashMap children = n.getChildren(); + if (children.isEmpty()) { + break; + } + String component = p.getName(level).toString(); + if (children.containsKey(component)) { + n = children.get(component); + result.add(n); + level++; + } else { + break; + } + } + return result; + } + + @VisibleForTesting + /** + * Convert radix path to string format for output. + * @param path - radix path represented by list of radix nodes. + * @return radix path as string separated by "/". + * Note: the path will always be normalized with and ending "/". + */ + public static String radixPathToString(List> path) { + StringBuilder sb = new StringBuilder(); + for (RadixNode n : path) { + sb.append(n.getName()); + sb.append(n.getName().equals(PATH_DELIMITER) ? "" : PATH_DELIMITER); + } + return sb.toString(); + } + + /** + * Get the longest prefix path. + * @param path - prefix path. + * @return longest prefix path as String separated by "/". + */ + public String getLongestPrefix(String path) { + RadixNode n = root; + Path p = Paths.get(path); + int level = 0; + while (level < p.getNameCount()) { + HashMap children = n.getChildren(); + if (children.isEmpty()) { + break; + } + String component = p.getName(level).toString(); + if (children.containsKey(component)) { + n = children.get(component); + level++; + } else { + break; + } + } + return level >= 1 ? + Paths.get(root.getName()).resolve(p.subpath(0, level)).toString() : + root.getName(); + } + + // root of a radix tree has a name of "/" and may optionally has it value. + private RadixNode root; + + private final static String PATH_DELIMITER = OzoneConsts.OZONE_URI_DELIMITER; +} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java new file mode 100644 index 0000000000000..ceed5346f8134 --- /dev/null +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.util; + +import org.junit.BeforeClass; +import org.junit.Test; + +import java.util.List; + +import static org.junit.Assert.assertEquals; + +/** + * Test Ozone Radix tree operations. + */ +public class TestRadixTree { + + final static RadixTree ROOT = new RadixTree<>(); + + @BeforeClass + public static void setupRadixTree() { + // Test prefix paths with an empty tree + assertEquals(true, ROOT.isEmpty()); + assertEquals("/", ROOT.getLongestPrefix("/a/b/c")); + assertEquals("/", RadixTree.radixPathToString( + ROOT.getLongestPrefixPath("/a/g"))); + // Build Radix tree below for testing. + // a + // | + // b + // / \ + // c e + // / \ / \ \ + // d f g dir1 dir2(1000) + // | + // g + // | + // h + ROOT.insert("/a/b/c/d"); + ROOT.insert("/a/b/c/d/g/h"); + ROOT.insert("/a/b/c/f"); + ROOT.insert("/a/b/e/g"); + ROOT.insert("/a/b/e/dir1"); + ROOT.insert("/a/b/e/dir2", 1000); + } + + /** + * Tests if insert and build prefix tree is correct. + */ + @Test + public void testGetLongestPrefix() { + assertEquals("/a/b/c", ROOT.getLongestPrefix("/a/b/c")); + assertEquals("/a/b", ROOT.getLongestPrefix("/a/b")); + assertEquals("/a", ROOT.getLongestPrefix("/a")); + assertEquals("/a/b/e/g", ROOT.getLongestPrefix("/a/b/e/g/h")); + + assertEquals("/", ROOT.getLongestPrefix("/d/b/c")); + assertEquals("/a/b/e", ROOT.getLongestPrefix("/a/b/e/dir3")); + assertEquals("/a/b/c/d", ROOT.getLongestPrefix("/a/b/c/d/p")); + + assertEquals("/a/b/c/f", ROOT.getLongestPrefix("/a/b/c/f/p")); + } + + @Test + public void testGetLongestPrefixPath() { + List> lpp = + ROOT.getLongestPrefixPath("/a/b/c/d/g/p"); + RadixNode lpn = lpp.get(lpp.size()-1); + assertEquals("g", lpn.getName()); + lpn.setValue(100); + + + List> lpq = + ROOT.getLongestPrefixPath("/a/b/c/d/g/q"); + RadixNode lqn = lpp.get(lpq.size()-1); + System.out.print(RadixTree.radixPathToString(lpq)); + assertEquals(lpn, lqn); + assertEquals("g", lqn.getName()); + assertEquals(100, (int)lqn.getValue()); + + + assertEquals("/a/", RadixTree.radixPathToString( + ROOT.getLongestPrefixPath("/a/g"))); + + } + + @Test + public void testGetLastNoeInPrefixPath() { + assertEquals(null, ROOT.getLastNodeInPrefixPath("/a/g")); + RadixNode ln = ROOT.getLastNodeInPrefixPath("/a/b/e/dir1"); + assertEquals("dir1", ln.getName()); + } + + @Test + public void testRemovePrefixPath() { + + // Remove, test and restore + // Remove partially overlapped path + ROOT.removePrefixPath("/a/b/c/d/g/h"); + assertEquals("/a/b/c", ROOT.getLongestPrefix("a/b/c/d")); + ROOT.insert("/a/b/c/d/g/h"); + + // Remove fully overlapped path + ROOT.removePrefixPath("/a/b/c/d"); + assertEquals("/a/b/c/d", ROOT.getLongestPrefix("a/b/c/d")); + ROOT.insert("/a/b/c/d"); + + // Remove non existing path + ROOT.removePrefixPath("/d/a"); + assertEquals("/a/b/c/d", ROOT.getLongestPrefix("a/b/c/d")); + } + + +} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/package-info.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/package-info.java new file mode 100644 index 0000000000000..a6acd30d77c58 --- /dev/null +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/package-info.java @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.util; +/** + * Unit tests of generic ozone utils. + */ From 9ad7cad2054854c9db280f5a44616ceb5f248a24 Mon Sep 17 00:00:00 2001 From: Xudong Cao Date: Thu, 30 May 2019 06:57:48 +0800 Subject: [PATCH 0064/1308] HDDS-1530. Freon support big files larger than 2GB and add --bufferSize and --validateWrites options. Contributed by Xudong Cao. (#830) --- .../ozone/freon/RandomKeyGenerator.java | 140 +++++++++++------- .../ozone/freon/TestRandomKeyGenerator.java | 20 ++- 2 files changed, 106 insertions(+), 54 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java index b0461cb1084d0..e6888b9d40be0 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java @@ -24,10 +24,8 @@ import java.io.PrintStream; import java.text.SimpleDateFormat; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.UUID; -import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; @@ -67,7 +65,6 @@ import com.fasterxml.jackson.databind.ObjectWriter; import com.google.common.annotations.VisibleForTesting; import static java.lang.Math.min; -import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.time.DurationFormatUtils; import org.slf4j.Logger; @@ -75,6 +72,8 @@ import picocli.CommandLine.Command; import picocli.CommandLine.Option; import picocli.CommandLine.ParentCommand; +import java.util.concurrent.LinkedBlockingQueue; +import java.security.MessageDigest; /** * Data generator tool to generate as much keys as possible. @@ -103,6 +102,12 @@ enum FreonOps { private static final int QUANTILES = 10; + private byte[] keyValueBuffer = null; + + private static final String DIGEST_ALGORITHM = "MD5"; + // A common initial MesssageDigest for each key without its UUID + private MessageDigest commonInitialMD = null; + private static final Logger LOG = LoggerFactory.getLogger(RandomKeyGenerator.class); @@ -136,7 +141,20 @@ enum FreonOps { description = "Specifies the size of Key in bytes to be created", defaultValue = "10240" ) - private int keySize = 10240; + private long keySize = 10240; + + @Option( + names = "--validateWrites", + description = "Specifies whether to validate keys after writing" + ) + private boolean validateWrites = false; + + @Option( + names = "--bufferSize", + description = "Specifies the buffer size while writing", + defaultValue = "4096" + ) + private int bufferSize = 4096; @Option( names = "--json", @@ -159,9 +177,6 @@ enum FreonOps { private ReplicationFactor factor = ReplicationFactor.ONE; private int threadPoolSize; - private byte[] keyValue = null; - - private boolean validateWrites; private OzoneClient ozoneClient; private ObjectStore objectStore; @@ -185,7 +200,7 @@ enum FreonOps { private Long writeValidationSuccessCount; private Long writeValidationFailureCount; - private BlockingQueue validationQueue; + private BlockingQueue validationQueue; private ArrayList histograms = new ArrayList<>(); private OzoneConfiguration ozoneConfiguration; @@ -228,8 +243,20 @@ public Void call() throws Exception { init(freon.createOzoneConfiguration()); } - keyValue = - DFSUtil.string2Bytes(RandomStringUtils.randomAscii(keySize - 36)); + keyValueBuffer = DFSUtil.string2Bytes( + RandomStringUtils.randomAscii(bufferSize)); + + // Compute the common initial digest for all keys without their UUID + if (validateWrites) { + commonInitialMD = DigestUtils.getDigest(DIGEST_ALGORITHM); + int uuidLength = UUID.randomUUID().toString().length(); + keySize = Math.max(uuidLength, keySize); + for (long nrRemaining = keySize - uuidLength; nrRemaining > 0; + nrRemaining -= bufferSize) { + int curSize = (int)Math.min(bufferSize, nrRemaining); + commonInitialMD.update(keyValueBuffer, 0, curSize); + } + } LOG.info("Number of Threads: " + numOfThreads); threadPoolSize = @@ -241,6 +268,7 @@ public Void call() throws Exception { LOG.info("Number of Buckets per Volume: {}.", numOfBuckets); LOG.info("Number of Keys per Bucket: {}.", numOfKeys); LOG.info("Key size: {} bytes", keySize); + LOG.info("Buffer size: {} bytes", bufferSize); for (int i = 0; i < numOfVolumes; i++) { String volume = "vol-" + i + "-" + RandomStringUtils.randomNumeric(5); @@ -253,8 +281,7 @@ public Void call() throws Exception { writeValidationSuccessCount = 0L; writeValidationFailureCount = 0L; - validationQueue = - new ArrayBlockingQueue<>(numOfThreads); + validationQueue = new LinkedBlockingQueue<>(); validator = new Thread(new Validator()); validator.start(); LOG.info("Data validation is enabled."); @@ -512,43 +539,35 @@ long getUnsuccessfulValidationCount() { } /** - * Returns the length of the common key value initialized. - * - * @return key value length initialized. + * Wrapper to hold ozone keyValidate entry. */ - @VisibleForTesting - long getKeyValueLength() { - return keyValue.length; - } - - /** - * Wrapper to hold ozone key-value pair. - */ - private static class KeyValue { - + private static class KeyValidate { /** - * Bucket name associated with the key-value. + * Bucket name. */ private OzoneBucket bucket; + /** - * Key name associated with the key-value. + * Key name. */ - private String key; + private String keyName; + /** - * Value associated with the key-value. + * Digest of this key's full value. */ - private byte[] value; + private byte[] digest; /** - * Constructs a new ozone key-value pair. + * Constructs a new ozone keyValidate. * - * @param key key part - * @param value value part + * @param bucket bucket part + * @param keyName key part + * @param keyName digest of this key's full value */ - KeyValue(OzoneBucket bucket, String key, byte[] value) { + KeyValidate(OzoneBucket bucket, String keyName, byte[] digest) { this.bucket = bucket; - this.key = key; - this.value = value; + this.keyName = keyName; + this.digest = digest; } } @@ -625,7 +644,11 @@ public void run() { try (Scope writeScope = GlobalTracer.get() .buildSpan("writeKeyData") .startActive(true)) { - os.write(keyValue); + for (long nrRemaining = keySize - randomValue.length; + nrRemaining > 0; nrRemaining -= bufferSize) { + int curSize = (int)Math.min(bufferSize, nrRemaining); + os.write(keyValueBuffer, 0, curSize); + } os.write(randomValue); os.close(); } @@ -639,9 +662,10 @@ public void run() { numberOfKeysAdded.getAndIncrement(); } if (validateWrites) { - byte[] value = ArrayUtils.addAll(keyValue, randomValue); + MessageDigest tmpMD = (MessageDigest)commonInitialMD.clone(); + tmpMD.update(randomValue); boolean validate = validationQueue.offer( - new KeyValue(bucket, key, value)); + new KeyValidate(bucket, key, tmpMD.digest())); if (validate) { LOG.trace("Key {}, is queued for validation.", key); } @@ -678,7 +702,8 @@ private final class FreonJobInfo { private String replicationFactor; private String replicationType; - private int keySize; + private long keySize; + private int bufferSize; private String totalThroughputPerSecond; @@ -705,6 +730,7 @@ private FreonJobInfo() { this.numOfKeys = RandomKeyGenerator.this.numOfKeys; this.numOfThreads = RandomKeyGenerator.this.numOfThreads; this.keySize = RandomKeyGenerator.this.keySize; + this.bufferSize = RandomKeyGenerator.this.bufferSize; this.jobStartTime = Time.formatTime(RandomKeyGenerator.this.jobStartTime); this.replicationFactor = RandomKeyGenerator.this.factor.name(); this.replicationType = RandomKeyGenerator.this.type.name(); @@ -856,10 +882,14 @@ public String getStatus() { return status; } - public int getKeySize() { + public long getKeySize() { return keySize; } + public int getBufferSize() { + return bufferSize; + } + public String getGitBaseRevision() { return gitBaseRevision; } @@ -925,28 +955,32 @@ public String[] getTenQuantileKeyWriteTime() { * Validates the write done in ozone cluster. */ private class Validator implements Runnable { - @Override public void run() { - while (!completed) { + DigestUtils dig = new DigestUtils(DIGEST_ALGORITHM); + + while (true) { + if (completed && validationQueue.isEmpty()) { + return; + } + try { - KeyValue kv = validationQueue.poll(5, TimeUnit.SECONDS); + KeyValidate kv = validationQueue.poll(5, TimeUnit.SECONDS); if (kv != null) { - - OzoneInputStream is = kv.bucket.readKey(kv.key); - byte[] value = new byte[kv.value.length]; - int length = is.read(value); + OzoneInputStream is = kv.bucket.readKey(kv.keyName); + dig.getMessageDigest().reset(); + byte[] curDigest = dig.digest(is); totalWritesValidated++; - if (length == kv.value.length && Arrays.equals(value, kv.value)) { + if (MessageDigest.isEqual(kv.digest, curDigest)) { writeValidationSuccessCount++; } else { writeValidationFailureCount++; LOG.warn("Data validation error for key {}/{}/{}", - kv.bucket.getVolumeName(), kv.bucket, kv.key); + kv.bucket.getVolumeName(), kv.bucket, kv.keyName); LOG.warn("Expected checksum: {}, Actual checksum: {}", - DigestUtils.md5Hex(kv.value), - DigestUtils.md5Hex(value)); + kv.digest, curDigest); } + is.close(); } } catch (IOException | InterruptedException ex) { LOG.error("Exception while validating write: " + ex.getMessage()); @@ -976,7 +1010,7 @@ public void setNumOfThreads(int numOfThreads) { } @VisibleForTesting - public void setKeySize(int keySize) { + public void setKeySize(long keySize) { this.keySize = keySize; } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java index e5bb8ae80f657..c0873d2df6149 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java @@ -73,7 +73,6 @@ public void defaultTest() throws Exception { Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated()); Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); - Assert.assertEquals(10240 - 36, randomKeyGenerator.getKeyValueLength()); } @Test @@ -109,4 +108,23 @@ public void ratisTest3() throws Exception { Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); } + + @Test + public void bigFileThan2GB() throws Exception { + RandomKeyGenerator randomKeyGenerator = + new RandomKeyGenerator((OzoneConfiguration) cluster.getConf()); + randomKeyGenerator.setNumOfVolumes(1); + randomKeyGenerator.setNumOfBuckets(1); + randomKeyGenerator.setNumOfKeys(1); + randomKeyGenerator.setNumOfThreads(1); + randomKeyGenerator.setKeySize(10L + Integer.MAX_VALUE); + randomKeyGenerator.setFactor(ReplicationFactor.THREE); + randomKeyGenerator.setType(ReplicationType.RATIS); + randomKeyGenerator.setValidateWrites(true); + randomKeyGenerator.call(); + Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated()); + Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated()); + Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); + Assert.assertEquals(1, randomKeyGenerator.getSuccessfulValidationCount()); + } } From c1caab40f27e3e4f58ff1b5ef3e93efc56bbecbe Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Wed, 29 May 2019 20:52:58 -0700 Subject: [PATCH 0065/1308] HDFS-14512. ONE_SSD policy will be violated while write data with DistributedFileSystem.create(....favoredNodes). Contributed by Ayush Saxena. Signed-off-by: Wei-Chiu Chuang --- .../blockmanagement/BlockPlacementPolicy.java | 12 +++++++ .../BlockPlacementPolicyDefault.java | 35 +++++++++++++------ .../hdfs/TestDistributedFileSystem.java | 29 +++++++++++++++ 3 files changed, 66 insertions(+), 10 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java index 897bf694714d3..563183882765c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java @@ -19,6 +19,7 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.EnumMap; import java.util.EnumSet; import java.util.List; import java.util.Map; @@ -101,6 +102,17 @@ DatanodeStorageInfo[] chooseTarget(String src, excludedNodes, blocksize, storagePolicy, flags); } + /** + * @param storageTypes storage types that should be used as targets. + */ + public DatanodeStorageInfo[] chooseTarget(String srcPath, int numOfReplicas, + Node writer, List chosen, boolean returnChosenNodes, + Set excludedNodes, long blocksize, BlockStoragePolicy storagePolicy, + EnumSet flags, EnumMap storageTypes) { + return chooseTarget(srcPath, numOfReplicas, writer, chosen, + returnChosenNodes, excludedNodes, blocksize, storagePolicy, flags); + } + /** * Verify if the block's placement meets requirement of placement policy, * i.e. replicas are placed on no less than minRacks racks in the system. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index d13782686e3ae..6fed8a18f0af1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -150,7 +150,16 @@ public DatanodeStorageInfo[] chooseTarget(String srcPath, final BlockStoragePolicy storagePolicy, EnumSet flags) { return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes, - excludedNodes, blocksize, storagePolicy, flags); + excludedNodes, blocksize, storagePolicy, flags, null); + } + + @Override + public DatanodeStorageInfo[] chooseTarget(String srcPath, int numOfReplicas, + Node writer, List chosen, boolean returnChosenNodes, + Set excludedNodes, long blocksize, BlockStoragePolicy storagePolicy, + EnumSet flags, EnumMap storageTypes) { + return chooseTarget(numOfReplicas, writer, chosen, returnChosenNodes, + excludedNodes, blocksize, storagePolicy, flags, storageTypes); } @Override @@ -202,7 +211,8 @@ DatanodeStorageInfo[] chooseTarget(String src, DatanodeStorageInfo[] remainingTargets = chooseTarget(src, numOfReplicas, writer, new ArrayList(numOfReplicas), false, - favoriteAndExcludedNodes, blocksize, storagePolicy, flags); + favoriteAndExcludedNodes, blocksize, storagePolicy, flags, + storageTypes); for (int i = 0; i < remainingTargets.length; i++) { results.add(remainingTargets[i]); } @@ -252,7 +262,8 @@ private DatanodeStorageInfo[] chooseTarget(int numOfReplicas, Set excludedNodes, long blocksize, final BlockStoragePolicy storagePolicy, - EnumSet addBlockFlags) { + EnumSet addBlockFlags, + EnumMap sTypes) { if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) { return DatanodeStorageInfo.EMPTY_ARRAY; } @@ -290,7 +301,7 @@ private DatanodeStorageInfo[] chooseTarget(int numOfReplicas, localNode = chooseTarget(numOfReplicas, writer, excludedNodeCopy, blocksize, maxNodesPerRack, results, avoidStaleNodes, storagePolicy, - EnumSet.noneOf(StorageType.class), results.isEmpty()); + EnumSet.noneOf(StorageType.class), results.isEmpty(), sTypes); if (results.size() < numOfReplicas) { // not enough nodes; discard results and fall back results = null; @@ -300,7 +311,8 @@ private DatanodeStorageInfo[] chooseTarget(int numOfReplicas, results = new ArrayList<>(chosenStorage); localNode = chooseTarget(numOfReplicas, writer, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, - storagePolicy, EnumSet.noneOf(StorageType.class), results.isEmpty()); + storagePolicy, EnumSet.noneOf(StorageType.class), results.isEmpty(), + sTypes); } if (!returnChosenNodes) { @@ -380,6 +392,7 @@ private EnumMap getRequiredStorageTypes( * @param maxNodesPerRack max nodes allowed per rack * @param results the target nodes already chosen * @param avoidStaleNodes avoid stale nodes in replica choosing + * @param storageTypes storage type to be considered for target * @return local node of writer (not chosen node) */ private Node chooseTarget(int numOfReplicas, @@ -391,7 +404,8 @@ private Node chooseTarget(int numOfReplicas, final boolean avoidStaleNodes, final BlockStoragePolicy storagePolicy, final EnumSet unavailableStorages, - final boolean newBlock) { + final boolean newBlock, + EnumMap storageTypes) { if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) { return (writer instanceof DatanodeDescriptor) ? writer : null; } @@ -409,8 +423,9 @@ private Node chooseTarget(int numOfReplicas, .chooseStorageTypes((short) totalReplicasExpected, DatanodeStorageInfo.toStorageTypes(results), unavailableStorages, newBlock); - final EnumMap storageTypes = - getRequiredStorageTypes(requiredStorageTypes); + if (storageTypes == null) { + storageTypes = getRequiredStorageTypes(requiredStorageTypes); + } if (LOG.isTraceEnabled()) { LOG.trace("storageTypes=" + storageTypes); } @@ -453,7 +468,7 @@ private Node chooseTarget(int numOfReplicas, numOfReplicas = totalReplicasExpected - results.size(); return chooseTarget(numOfReplicas, writer, oldExcludedNodes, blocksize, maxNodesPerRack, results, false, storagePolicy, unavailableStorages, - newBlock); + newBlock, null); } boolean retry = false; @@ -473,7 +488,7 @@ private Node chooseTarget(int numOfReplicas, numOfReplicas = totalReplicasExpected - results.size(); return chooseTarget(numOfReplicas, writer, oldExcludedNodes, blocksize, maxNodesPerRack, results, false, storagePolicy, unavailableStorages, - newBlock); + newBlock, null); } } return writer; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index 8ad70852aeeac..d09ad0209c711 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -40,6 +40,7 @@ import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.HashSet; @@ -50,6 +51,7 @@ import java.util.UUID; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; @@ -80,6 +82,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DistributedFileSystem.HdfsDataOutputStreamBuilder; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.impl.LeaseRenewer; import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType; import org.apache.hadoop.hdfs.net.Peer; @@ -1979,4 +1982,30 @@ public Object run() throws Exception { } } } + + @Test + public void testStorageFavouredNodes() + throws IOException, InterruptedException, TimeoutException { + Configuration conf = new HdfsConfiguration(); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .storageTypes(new StorageType[] {StorageType.SSD, StorageType.DISK}) + .numDataNodes(3).storagesPerDatanode(2).build()) { + DistributedFileSystem fs = cluster.getFileSystem(); + Path file1 = new Path("/tmp/file1"); + fs.mkdirs(new Path("/tmp")); + fs.setStoragePolicy(new Path("/tmp"), "ONE_SSD"); + InetSocketAddress[] addrs = + {cluster.getDataNodes().get(0).getXferAddress()}; + HdfsDataOutputStream stream = fs.create(file1, FsPermission.getDefault(), + false, 1024, (short) 3, 1024, null, addrs); + stream.write("Some Bytes".getBytes()); + stream.close(); + DFSTestUtil.waitReplication(fs, file1, (short) 3); + BlockLocation[] locations = fs.getClient() + .getBlockLocations(file1.toUri().getPath(), 0, Long.MAX_VALUE); + int numSSD = Collections.frequency( + Arrays.asList(locations[0].getStorageTypes()), StorageType.SSD); + assertEquals("Number of SSD should be 1 but was : " + numSSD, 1, numSSD); + } + } } From d23e8e9232f030fcdeff91b9e8a625516244dc72 Mon Sep 17 00:00:00 2001 From: Wanqiang Ji Date: Tue, 28 May 2019 22:20:31 +0800 Subject: [PATCH 0066/1308] HADOOP-16334. Fix yetus-wrapper not working when HADOOP_YETUS_VERSION >= 0.9.0 This closes #864 Signed-off-by: Akira Ajisaka --- dev-support/bin/yetus-wrapper | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper index ae05d426b2c76..692216e7500d8 100755 --- a/dev-support/bin/yetus-wrapper +++ b/dev-support/bin/yetus-wrapper @@ -68,6 +68,10 @@ function yetus_abs return 1 } +function version_ge() +{ + test "$(echo "$@" | tr " " "\n" | sort -rV | head -n 1)" == "$1"; +} WANTED="$1" shift @@ -77,11 +81,17 @@ HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.8.0} BIN=$(yetus_abs "${BASH_SOURCE-$0}") BINDIR=$(dirname "${BIN}") +## HADOOP_YETUS_VERSION >= 0.9.0 the tarball named with apache-yetus prefix +if version_ge "${HADOOP_YETUS_VERSION}" "0.9.0"; then + YETUS_PREFIX=apache-yetus +else + YETUS_PREFIX=yetus +fi + ### ### if YETUS_HOME is set, then try to use it ### -if [[ -n "${YETUS_HOME}" - && -x "${YETUS_HOME}/bin/${WANTED}" ]]; then +if [[ -n "${YETUS_HOME}" && -x "${YETUS_HOME}/bin/${WANTED}" ]]; then exec "${YETUS_HOME}/bin/${WANTED}" "${ARGV[@]}" fi @@ -105,8 +115,8 @@ HADOOP_PATCHPROCESS=${mytmpdir} ## ## if we've already DL'd it, then short cut ## -if [[ -x "${HADOOP_PATCHPROCESS}/yetus-${HADOOP_YETUS_VERSION}/bin/${WANTED}" ]]; then - exec "${HADOOP_PATCHPROCESS}/yetus-${HADOOP_YETUS_VERSION}/bin/${WANTED}" "${ARGV[@]}" +if [[ -x "${HADOOP_PATCHPROCESS}/${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/bin/${WANTED}" ]]; then + exec "${HADOOP_PATCHPROCESS}/${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/bin/${WANTED}" "${ARGV[@]}" fi ## @@ -114,7 +124,7 @@ fi ## BASEURL="https://archive.apache.org/dist/yetus/${HADOOP_YETUS_VERSION}/" -TARBALL="yetus-${HADOOP_YETUS_VERSION}-bin.tar" +TARBALL="${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}-bin.tar" GPGBIN=$(command -v gpg) CURLBIN=$(command -v curl) @@ -166,9 +176,9 @@ if ! (gunzip -c "${TARBALL}.gz" | tar xpf -); then exit 1 fi -if [[ -x "${HADOOP_PATCHPROCESS}/yetus-${HADOOP_YETUS_VERSION}/bin/${WANTED}" ]]; then +if [[ -x "${HADOOP_PATCHPROCESS}/${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/bin/${WANTED}" ]]; then popd >/dev/null - exec "${HADOOP_PATCHPROCESS}/yetus-${HADOOP_YETUS_VERSION}/bin/${WANTED}" "${ARGV[@]}" + exec "${HADOOP_PATCHPROCESS}/${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/bin/${WANTED}" "${ARGV[@]}" fi ## From 12be6ff2ff05970b20367d910296c4c0ec642b16 Mon Sep 17 00:00:00 2001 From: Ajay Yadav <7813154+ajayydv@users.noreply.github.com> Date: Wed, 29 May 2019 23:18:06 -0700 Subject: [PATCH 0067/1308] HDDS-1539. Implement addAcl,removeAcl,setAcl,getAcl for Volume. Contributed Ajay Kumar. (#847) --- .../hadoop/ozone/client/ObjectStore.java | 51 +++++ .../ozone/client/protocol/ClientProtocol.java | 41 ++++ .../hadoop/ozone/client/rest/RestClient.java | 55 ++++++ .../hadoop/ozone/client/rpc/RpcClient.java | 51 +++++ .../java/org/apache/hadoop/ozone/OmUtils.java | 4 + .../org/apache/hadoop/ozone/OzoneAcl.java | 93 +++++---- .../ozone/om/exceptions/OMException.java | 2 +- .../hadoop/ozone/om/helpers/OmBucketInfo.java | 4 +- .../ozone/om/helpers/OmOzoneAclMap.java | 129 ++++++++----- .../hadoop/ozone/om/helpers/OmVolumeArgs.java | 17 +- .../om/protocol/OzoneManagerProtocol.java | 41 ++++ ...ManagerProtocolClientSideTranslatorPB.java | 116 +++++++++++- .../hadoop/ozone/protocolPB/OMPBHelper.java | 20 +- .../ozone/security/acl/IAccessAuthorizer.java | 15 +- .../hadoop/ozone/security/acl/OzoneObj.java | 11 ++ .../ozone/security/acl/OzoneObjInfo.java | 31 +++ .../src/main/proto/OzoneManagerProtocol.proto | 65 ++++++- .../apache/hadoop/ozone/TestOzoneAcls.java | 63 +++++-- .../rpc/TestOzoneRpcClientAbstract.java | 42 +++++ .../hadoop/ozone/ozShell/TestOzoneShell.java | 6 +- .../apache/hadoop/ozone/om/OzoneManager.java | 85 +++++++++ .../apache/hadoop/ozone/om/VolumeManager.java | 41 ++++ .../hadoop/ozone/om/VolumeManagerImpl.java | 177 ++++++++++++++++++ .../OzoneManagerRequestHandler.java | 93 +++++++-- .../om/ratis/TestOzoneManagerRatisServer.java | 3 +- 25 files changed, 1112 insertions(+), 144 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index c5e3210d44a11..2db4a6d5aa38d 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -31,11 +31,13 @@ import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.io.Text; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.security.UserGroupInformation; import com.google.common.annotations.VisibleForTesting; @@ -444,4 +446,53 @@ public String getCanonicalServiceName() { return proxy.getCanonicalServiceName(); } + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * @return true if acl is added successfully, else false. + * @throws IOException if there is error. + * */ + public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + return proxy.addAcl(obj, acl); + } + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * @return true if acl is added successfully, else false. + * @throws IOException if there is error. + */ + public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + return proxy.removeAcl(obj, acl); + } + + /** + * Acls to be set for given Ozone object. This operations reset ACL for given + * object to list of ACLs provided in argument. + * + * @param obj Ozone object. + * @param acls List of acls. + * @return true if acl is added successfully, else false. + * @throws IOException if there is error. + */ + public boolean setAcl(OzoneObj obj, List acls) throws IOException { + return proxy.setAcl(obj, acls); + } + + /** + * Returns list of ACLs for given Ozone object. + * + * @param obj Ozone object. + * @return true if acl is added successfully, else false. + * @throws IOException if there is error. + */ + public List getAcl(OzoneObj obj) throws IOException { + return proxy.getAcl(obj); + } + } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index c7607ef53dd30..d5bac34eefdef 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -43,6 +43,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.token.Token; @@ -621,4 +622,44 @@ OzoneOutputStream createFile(String volumeName, String bucketName, List listStatus(String volumeName, String bucketName, String keyName, boolean recursive, String startKey, long numEntries) throws IOException; + + + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * + * @throws IOException if there is error. + * */ + boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException; + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * + * @throws IOException if there is error. + * */ + boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException; + + /** + * Acls to be set for given Ozone object. This operations reset ACL for + * given object to list of ACLs provided in argument. + * @param obj Ozone object. + * @param acls List of acls. + * + * @throws IOException if there is error. + * */ + boolean setAcl(OzoneObj obj, List acls) throws IOException; + + /** + * Returns list of ACLs for given Ozone object. + * @param obj Ozone object. + * + * @throws IOException if there is error. + * */ + List getAcl(OzoneObj obj) throws IOException; + } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java index 71fb8ca8c6c66..f3afc924c3676 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java @@ -54,6 +54,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.web.response.ListBuckets; import org.apache.hadoop.ozone.web.response.ListKeys; import org.apache.hadoop.ozone.web.response.ListVolumes; @@ -1121,4 +1122,58 @@ public List listStatus(String volumeName, String bucketName, throw new UnsupportedOperationException( "Ozone REST protocol does not " + "support this operation."); } + + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * @throws IOException if there is error. + */ + @Override + public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + throw new UnsupportedOperationException("Ozone REST protocol does not" + + " support this operation."); + } + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * @throws IOException if there is error. + */ + @Override + public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + throw new UnsupportedOperationException("Ozone REST protocol does not" + + " support this operation."); + } + + /** + * Acls to be set for given Ozone object. This operations reset ACL for given + * object to list of ACLs provided in argument. + * + * @param obj Ozone object. + * @param acls List of acls. + * @throws IOException if there is error. + */ + @Override + public boolean setAcl(OzoneObj obj, List acls) throws IOException { + throw new UnsupportedOperationException("Ozone REST protocol does not" + + " support this operation."); + } + + /** + * Returns list of ACLs for given Ozone object. + * + * @param obj Ozone object. + * @throws IOException if there is error. + */ + @Override + public List getAcl(OzoneObj obj) throws IOException { + throw new UnsupportedOperationException("Ozone REST protocol does not" + + " support this operation."); + } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 3aa4fb8e6358b..cb6ac539a9a14 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -87,6 +87,7 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -1009,6 +1010,56 @@ public List listStatus(String volumeName, String bucketName, .listStatus(keyArgs, recursive, startKey, numEntries); } + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * @throws IOException if there is error. + */ + @Override + public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + return ozoneManagerClient.addAcl(obj, acl); + } + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * @throws IOException if there is error. + */ + @Override + public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + return ozoneManagerClient.removeAcl(obj, acl); + } + + /** + * Acls to be set for given Ozone object. This operations reset ACL for given + * object to list of ACLs provided in argument. + * + * @param obj Ozone object. + * @param acls List of acls. + * @throws IOException if there is error. + */ + @Override + public boolean setAcl(OzoneObj obj, List acls) throws IOException { + return ozoneManagerClient.setAcl(obj, acls); + } + + /** + * Returns list of ACLs for given Ozone object. + * + * @param obj Ozone object. + * @throws IOException if there is error. + */ + @Override + public List getAcl(OzoneObj obj) throws IOException { + return ozoneManagerClient.getAcl(obj); + } + private OzoneInputStream createInputStream(OmKeyInfo keyInfo, String requestId) throws IOException { LengthInputStream lengthInputStream = KeyInputStream diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index f060735296c0b..5cd51421cb72d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -190,6 +190,7 @@ public static boolean isReadOnly( case GetFileStatus: case LookupFile: case ListStatus: + case GetAcl: return true; case CreateVolume: case SetVolumeProperty: @@ -216,6 +217,9 @@ public static boolean isReadOnly( case ApplyInitiateMultiPartUpload: case CreateDirectory: case CreateFile: + case RemoveAcl: + case SetAcl: + case AddAcl: return false; default: LOG.error("CmdType {} is not categorized as readOnly or not.", cmdType); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index eaec507314dbe..9a50ee03c1351 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -19,10 +19,15 @@ package org.apache.hadoop.ozone; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import java.util.ArrayList; +import java.util.BitSet; import java.util.List; import java.util.Objects; @@ -36,10 +41,11 @@ *

  • world::rw * */ +@JsonIgnoreProperties(value = {"aclBitSet"}) public class OzoneAcl { private ACLIdentityType type; private String name; - private List rights; + private BitSet aclBitSet; /** * Constructor for OzoneAcl. @@ -56,8 +62,8 @@ public OzoneAcl() { */ public OzoneAcl(ACLIdentityType type, String name, ACLType acl) { this.name = name; - this.rights = new ArrayList<>(); - this.rights.add(acl); + this.aclBitSet = new BitSet(ACLType.getNoOfAcls()); + aclBitSet.set(acl.ordinal(), true); this.type = type; if (type == ACLIdentityType.WORLD && name.length() != 0) { throw new IllegalArgumentException("Unexpected name part in world type"); @@ -75,9 +81,20 @@ public OzoneAcl(ACLIdentityType type, String name, ACLType acl) { * @param name - Name of user * @param acls - Rights */ - public OzoneAcl(ACLIdentityType type, String name, List acls) { + public OzoneAcl(ACLIdentityType type, String name, BitSet acls) { + Objects.requireNonNull(type); + Objects.requireNonNull(acls); + + if(acls.cardinality() > ACLType.getNoOfAcls()) { + throw new IllegalArgumentException("Acl bitset passed has unexpected " + + "size. bitset size:" + acls.cardinality() + ", bitset:" + + acls.toString()); + } + + this.aclBitSet = (BitSet) acls.clone(); + acls.stream().forEach(a -> aclBitSet.set(a)); + this.name = name; - this.rights = acls; this.type = type; if (type == ACLIdentityType.WORLD && name.length() != 0) { throw new IllegalArgumentException("Unexpected name part in world type"); @@ -105,9 +122,10 @@ public static OzoneAcl parseAcl(String acl) throws IllegalArgumentException { } ACLIdentityType aclType = ACLIdentityType.valueOf(parts[0].toUpperCase()); - List acls = new ArrayList<>(); + BitSet acls = new BitSet(ACLType.getNoOfAcls()); + for (char ch : parts[2].toCharArray()) { - acls.add(ACLType.getACLRight(String.valueOf(ch))); + acls.set(ACLType.getACLRight(String.valueOf(ch)).ordinal()); } // TODO : Support sanitation of these user names by calling into @@ -115,9 +133,27 @@ public static OzoneAcl parseAcl(String acl) throws IllegalArgumentException { return new OzoneAcl(aclType, parts[1], acls); } + public static OzoneAclInfo toProtobuf(OzoneAcl acl) { + OzoneAclInfo.Builder builder = OzoneAclInfo.newBuilder() + .setName(acl.getName()) + .setType(OzoneAclType.valueOf(acl.getType().name())); + acl.getAclBitSet().stream().forEach(a -> + builder.addRights(OzoneAclRights.valueOf(ACLType.values()[a].name()))); + return builder.build(); + } + + public static OzoneAcl fromProtobuf(OzoneAclInfo protoAcl) { + BitSet aclRights = new BitSet(ACLType.getNoOfAcls()); + protoAcl.getRightsList().parallelStream().forEach(a -> + aclRights.set(a.ordinal())); + + return new OzoneAcl(ACLIdentityType.valueOf(protoAcl.getType().name()), + protoAcl.getName(), aclRights); + } + @Override public String toString() { - return type + ":" + name + ":" + ACLType.getACLString(rights); + return type + ":" + name + ":" + ACLType.getACLString(aclBitSet); } /** @@ -131,7 +167,7 @@ public String toString() { */ @Override public int hashCode() { - return Objects.hash(this.getName(), this.getRights().toString(), + return Objects.hash(this.getName(), this.getAclBitSet(), this.getType().toString()); } @@ -149,8 +185,16 @@ public String getName() { * * @return - Rights */ - public List getRights() { - return rights; + public BitSet getAclBitSet() { + return aclBitSet; + } + + public List getAclList() { + List acls = new ArrayList<>(ACLType.getNoOfAcls()); + if(aclBitSet != null) { + aclBitSet.stream().forEach(a -> acls.add(ACLType.values()[a])); + } + return acls; } /** @@ -179,29 +223,8 @@ public boolean equals(Object obj) { return false; } OzoneAcl otherAcl = (OzoneAcl) obj; - return otherAcl.toString().equals(this.toString()); - } - - /** - * ACL types. - */ - public enum OzoneACLType { - USER(OzoneConsts.OZONE_ACL_USER_TYPE), - GROUP(OzoneConsts.OZONE_ACL_GROUP_TYPE), - WORLD(OzoneConsts.OZONE_ACL_WORLD_TYPE); - - /** - * String value for this Enum. - */ - private final String value; - - /** - * Init OzoneACLtypes enum. - * - * @param val String type for this enum. - */ - OzoneACLType(String val) { - value = val; - } + return otherAcl.getName().equals(this.getName()) && + otherAcl.getType().equals(this.getType()) && + otherAcl.getAclBitSet().equals(this.getAclBitSet()); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java index 2c4418cb9ec4b..96a860c98f072 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java @@ -195,6 +195,6 @@ public enum ResultCodes { FILE_ALREADY_EXISTS, - NOT_A_FILE + NOT_A_FILE, } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index 4cdaa48ec570c..8fb4697287275 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -274,8 +274,8 @@ public BucketInfo getProtobuf() { BucketInfo.Builder bib = BucketInfo.newBuilder() .setVolumeName(volumeName) .setBucketName(bucketName) - .addAllAcls(acls.stream().map( - OMPBHelper::convertOzoneAcl).collect(Collectors.toList())) + .addAllAcls(acls.stream().map(OMPBHelper::convertOzoneAcl) + .collect(Collectors.toList())) .setIsVersionEnabled(isVersionEnabled) .setStorageType(storageType.toProto()) .setCreationTime(creationTime) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java index 8831c6b879c28..cc181f7549afb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java @@ -18,18 +18,26 @@ package org.apache.hadoop.ozone.om.helpers; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.protocol.proto .OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights; import org.apache.hadoop.ozone.protocol.proto .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; +import java.util.BitSet; import java.util.List; import java.util.LinkedList; import java.util.Map; import java.util.ArrayList; import java.util.HashMap; +import java.util.Objects; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights.ALL; /** * This helper class keeps a map of all user and their permissions. @@ -37,7 +45,7 @@ @SuppressWarnings("ProtocolBufferOrdinal") public class OmOzoneAclMap { // per Acl Type user:rights map - private ArrayList>> aclMaps; + private ArrayList> aclMaps; OmOzoneAclMap() { aclMaps = new ArrayList<>(); @@ -46,18 +54,77 @@ public class OmOzoneAclMap { } } - private Map> getMap(OzoneAclType type) { + private Map getMap(OzoneAclType type) { return aclMaps.get(type.ordinal()); } // For a given acl type and user, get the stored acl - private List getAcl(OzoneAclType type, String user) { + private BitSet getAcl(OzoneAclType type, String user) { return getMap(type).get(user); } + public List getAcl() { + List acls = new ArrayList<>(); + + for (OzoneAclType type : OzoneAclType.values()) { + aclMaps.get(type.ordinal()).entrySet().stream(). + forEach(entry -> acls.add(new OzoneAcl(ACLIdentityType. + valueOf(type.name()), entry.getKey(), entry.getValue()))); + } + return acls; + } + // Add a new acl to the map - public void addAcl(OzoneAclInfo acl) { - getMap(acl.getType()).put(acl.getName(), acl.getRightsList()); + public void addAcl(OzoneAcl acl) throws OMException { + Objects.requireNonNull(acl, "Acl should not be null."); + OzoneAclType aclType = OzoneAclType.valueOf(acl.getType().name()); + if (!getMap(aclType).containsKey(acl.getName())) { + getMap(aclType).put(acl.getName(), acl.getAclBitSet()); + } else { + // throw exception if acl is already added. + throw new OMException("Acl " + acl + " already exist.", INVALID_REQUEST); + } + } + + // Add a new acl to the map + public void setAcls(List acls) throws OMException { + Objects.requireNonNull(acls, "Acls should not be null."); + // Remove all Acls. + for (OzoneAclType type : OzoneAclType.values()) { + aclMaps.get(type.ordinal()).clear(); + } + + // Add acls. + for (OzoneAcl acl : acls) { + addAcl(acl); + } + } + + // Add a new acl to the map + public void removeAcl(OzoneAcl acl) throws OMException { + Objects.requireNonNull(acl, "Acl should not be null."); + OzoneAclType aclType = OzoneAclType.valueOf(acl.getType().name()); + if (getMap(aclType).containsKey(acl.getName())) { + getMap(aclType).remove(acl.getName()); + } else { + // throw exception if acl is already added. + throw new OMException("Acl [" + acl + "] doesn't exist.", + INVALID_REQUEST); + } + } + + // Add a new acl to the map + public void addAcl(OzoneAclInfo acl) throws OMException { + Objects.requireNonNull(acl, "Acl should not be null."); + if (!getMap(acl.getType()).containsKey(acl.getName())) { + BitSet acls = new BitSet(OzoneAclRights.values().length); + acl.getRightsList().parallelStream().forEach(a -> acls.set(a.ordinal())); + getMap(acl.getType()).put(acl.getName(), acls); + } else { + // throw exception if acl is already added. + + throw new OMException("Acl " + acl + " already exist.", INVALID_REQUEST); + } } // for a given acl, check if the user has access rights @@ -66,40 +133,14 @@ public boolean hasAccess(OzoneAclInfo acl) { return false; } - List storedRights = getAcl(acl.getType(), acl.getName()); - if(storedRights == null) { + BitSet aclBitSet = getAcl(acl.getType(), acl.getName()); + if (aclBitSet == null) { return false; } - for (OzoneAclRights right : storedRights) { - switch (right) { - case CREATE: - return (right == OzoneAclRights.CREATE) - || (right == OzoneAclRights.ALL); - case LIST: - return (right == OzoneAclRights.LIST) - || (right == OzoneAclRights.ALL); - case WRITE: - return (right == OzoneAclRights.WRITE) - || (right == OzoneAclRights.ALL); - case READ: - return (right == OzoneAclRights.READ) - || (right == OzoneAclRights.ALL); - case DELETE: - return (right == OzoneAclRights.DELETE) - || (right == OzoneAclRights.ALL); - case READ_ACL: - return (right == OzoneAclRights.READ_ACL) - || (right == OzoneAclRights.ALL); - case WRITE_ACL: - return (right == OzoneAclRights.WRITE_ACL) - || (right == OzoneAclRights.ALL); - case ALL: - return (right == OzoneAclRights.ALL); - case NONE: - return !(right == OzoneAclRights.NONE); - default: - return false; + for (OzoneAclRights right : acl.getRightsList()) { + if (aclBitSet.get(right.ordinal()) || aclBitSet.get(ALL.ordinal())) { + return true; } } return false; @@ -108,15 +149,15 @@ public boolean hasAccess(OzoneAclInfo acl) { // Convert this map to OzoneAclInfo Protobuf List public List ozoneAclGetProtobuf() { List aclList = new LinkedList<>(); - for (OzoneAclType type: OzoneAclType.values()) { - for (Map.Entry> entry : + for (OzoneAclType type : OzoneAclType.values()) { + for (Map.Entry entry : aclMaps.get(type.ordinal()).entrySet()) { - OzoneAclInfo aclInfo = OzoneAclInfo.newBuilder() + OzoneAclInfo.Builder builder = OzoneAclInfo.newBuilder() .setName(entry.getKey()) - .setType(type) - .addAllRights(entry.getValue()) - .build(); - aclList.add(aclInfo); + .setType(type); + entry.getValue().stream().forEach(a -> + builder.addRights(OzoneAclRights.values()[a])); + aclList.add(builder.build()); } } @@ -125,7 +166,7 @@ public List ozoneAclGetProtobuf() { // Create map from list of OzoneAclInfos public static OmOzoneAclMap ozoneAclGetFromProtobuf( - List aclList) { + List aclList) throws OMException { OmOzoneAclMap aclMap = new OmOzoneAclMap(); for (OzoneAclInfo acl : aclList) { aclMap.addAcl(acl); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index 7b25d78704fc8..95ed231c2e0a7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -23,8 +23,10 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.Auditable; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; @@ -77,6 +79,18 @@ public void setCreationTime(long time) { this.creationTime = time; } + public void addAcl(OzoneAcl acl) throws OMException { + this.aclMap.addAcl(acl); + } + + public void setAcls(List acls) throws OMException { + this.aclMap.setAcls(acls); + } + + public void removeAcl(OzoneAcl acl) throws OMException { + this.aclMap.removeAcl(acl); + } + /** * Returns the Admin Name. * @return String. @@ -232,7 +246,8 @@ public VolumeInfo getProtobuf() { .build(); } - public static OmVolumeArgs getFromProtobuf(VolumeInfo volInfo) { + public static OmVolumeArgs getFromProtobuf(VolumeInfo volInfo) + throws OMException { OmOzoneAclMap aclMap = OmOzoneAclMap.ozoneAclGetFromProtobuf(volInfo.getVolumeAclsList()); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 0a7d6fd0ada74..40dae8f42bfef 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.protocol; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; @@ -45,6 +46,7 @@ import java.util.List; import org.apache.hadoop.ozone.security.OzoneDelegationTokenSelector; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.token.TokenInfo; @@ -464,5 +466,44 @@ OpenKeySession createFile(OmKeyArgs keyArgs, boolean overWrite, */ List listStatus(OmKeyArgs keyArgs, boolean recursive, String startKey, long numEntries) throws IOException; + + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * + * @throws IOException if there is error. + * */ + boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException; + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * + * @throws IOException if there is error. + * */ + boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException; + + /** + * Acls to be set for given Ozone object. This operations reset ACL for + * given object to list of ACLs provided in argument. + * @param obj Ozone object. + * @param acls List of acls. + * + * @throws IOException if there is error. + * */ + boolean setAcl(OzoneObj obj, List acls) throws IOException; + + /** + * Returns list of ACLs for given Ozone object. + * @param obj Ozone object. + * + * @throws IOException if there is error. + * */ + List getAcl(OzoneObj obj) throws IOException; + } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 48d19aea89ea0..7d1e4151a7110 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -34,6 +34,7 @@ import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtocolTranslator; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.exceptions.NotLeaderException; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -55,6 +56,9 @@ import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileResponse; @@ -62,6 +66,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusRequest; @@ -107,6 +113,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3BucketInfoRequest; @@ -117,12 +125,14 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3ListBucketsResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; @@ -528,12 +538,11 @@ private List listVolume(ListVolumeRequest request) ListVolumeResponse resp = handleError(submitRequest(omRequest)).getListVolumeResponse(); - - - - return resp.getVolumeInfoList().stream() - .map(item -> OmVolumeArgs.getFromProtobuf(item)) - .collect(Collectors.toList()); + List list = new ArrayList<>(resp.getVolumeInfoList().size()); + for (VolumeInfo info : resp.getVolumeInfoList()) { + list.add(OmVolumeArgs.getFromProtobuf(info)); + } + return list; } /** @@ -1298,6 +1307,101 @@ public OmKeyInfo lookupFile(OmKeyArgs args) return OmKeyInfo.getFromProtobuf(resp.getKeyInfo()); } + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * @throws IOException if there is error. + */ + @Override + public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + AddAclRequest req = AddAclRequest.newBuilder() + .setObj(OzoneObj.toProtobuf(obj)) + .setAcl(OzoneAcl.toProtobuf(acl)) + .build(); + + OMRequest omRequest = createOMRequest(Type.AddAcl) + .setAddAclRequest(req) + .build(); + AddAclResponse addAclResponse = + handleError(submitRequest(omRequest)).getAddAclResponse(); + + return addAclResponse.getResponse(); + } + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * @throws IOException if there is error. + */ + @Override + public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + RemoveAclRequest req = RemoveAclRequest.newBuilder() + .setObj(OzoneObj.toProtobuf(obj)) + .setAcl(OzoneAcl.toProtobuf(acl)) + .build(); + + OMRequest omRequest = createOMRequest(Type.RemoveAcl) + .setRemoveAclRequest(req) + .build(); + RemoveAclResponse response = + handleError(submitRequest(omRequest)).getRemoveAclResponse(); + + return response.getResponse(); + } + + /** + * Acls to be set for given Ozone object. This operations reset ACL for given + * object to list of ACLs provided in argument. + * + * @param obj Ozone object. + * @param acls List of acls. + * @throws IOException if there is error. + */ + @Override + public boolean setAcl(OzoneObj obj, List acls) throws IOException { + SetAclRequest.Builder builder = SetAclRequest.newBuilder() + .setObj(OzoneObj.toProtobuf(obj)); + + acls.parallelStream().forEach(a -> builder.addAcl(OzoneAcl.toProtobuf(a))); + + OMRequest omRequest = createOMRequest(Type.SetAcl) + .setSetAclRequest(builder.build()) + .build(); + OzoneManagerProtocolProtos.SetAclResponse response = + handleError(submitRequest(omRequest)).getSetAclResponse(); + + return response.getResponse(); + } + + /** + * Returns list of ACLs for given Ozone object. + * + * @param obj Ozone object. + * @throws IOException if there is error. + */ + @Override + public List getAcl(OzoneObj obj) throws IOException { + GetAclRequest req = GetAclRequest.newBuilder() + .setObj(OzoneObj.toProtobuf(obj)) + .build(); + + OMRequest omRequest = createOMRequest(Type.GetAcl) + .setGetAclRequest(req) + .build(); + GetAclResponse response = + handleError(submitRequest(omRequest)).getGetAclResponse(); + List acls = new ArrayList<>(); + response.getAclsList().stream().forEach(a -> + acls.add(OzoneAcl.fromProtobuf(a))); + return acls; + } + @Override public OpenKeySession createFile(OmKeyArgs args, boolean overWrite, boolean recursive) throws IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java index 45ae0b301af94..bd4e300bddecb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java @@ -41,12 +41,12 @@ import org.apache.hadoop.ozone.protocol.proto .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.security.token.Token; +import java.util.BitSet; import java.util.List; import java.util.ArrayList; @@ -84,15 +84,14 @@ public static OzoneAclInfo convertOzoneAcl(OzoneAcl acl) { default: throw new IllegalArgumentException("ACL type is not recognized"); } - List aclRights = new ArrayList<>(); - - for (ACLType right : acl.getRights()) { - aclRights.add(OzoneAclRights.valueOf(right.name())); - } + List ozAclRights = + new ArrayList<>(acl.getAclBitSet().cardinality()); + acl.getAclBitSet().stream().forEach(a -> ozAclRights.add( + OzoneAclRights.valueOf(ACLType.values()[a].name()))); return OzoneAclInfo.newBuilder().setType(aclType) .setName(acl.getName()) - .addAllRights(aclRights) + .addAllRights(ozAclRights) .build(); } @@ -122,10 +121,9 @@ public static OzoneAcl convertOzoneAcl(OzoneAclInfo aclInfo) { throw new IllegalArgumentException("ACL type is not recognized"); } - List aclRights = new ArrayList<>(); - for (OzoneAclRights acl : aclInfo.getRightsList()) { - aclRights.add(ACLType.valueOf(acl.name())); - } + BitSet aclRights = new BitSet(ACLType.getNoOfAcls()); + aclInfo.getRightsList().stream().forEach(a -> + aclRights.set(ACLType.valueOf(a.name()).ordinal())); return new OzoneAcl(aclType, aclInfo.getName(), aclRights); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java index 2c47000a6cf2a..f0b73ee6d75b8 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java @@ -20,7 +20,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.ozone.OzoneConsts; -import java.util.List; +import java.util.BitSet; /** * Public API for Ozone ACLs. Security providers providing support for Ozone @@ -54,6 +54,11 @@ enum ACLType { WRITE_ACL, ALL, NONE; + private static int length = ACLType.values().length; + + public static int getNoOfAcls() { + return length; + } /** * Returns the ACL rights based on passed in String. @@ -86,7 +91,7 @@ public static ACLType getACLRight(String type) { case OzoneConsts.OZONE_ACL_NONE: return ACLType.NONE; default: - throw new IllegalArgumentException(type + " ACL right is not " + + throw new IllegalArgumentException("[" + type + "] ACL right is not " + "recognized"); } @@ -98,10 +103,10 @@ public static ACLType getACLRight(String type) { * @param acls ACLType * @return String representation of acl */ - public static String getACLString(List acls) { + public static String getACLString(BitSet acls) { StringBuffer sb = new StringBuffer(); - acls.forEach(acl -> { - sb.append(getAclString(acl)); + acls.stream().forEach(acl -> { + sb.append(getAclString(ACLType.values()[acl])); }); return sb.toString(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java index bbb07b3d9b2fa..74d0aa5f7fa1d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java @@ -19,6 +19,10 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType; + +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.StoreType.*; /** * Class representing an unique ozone object. @@ -37,6 +41,13 @@ public abstract class OzoneObj implements IOzoneObj { this.storeType = storeType; } + public static OzoneManagerProtocolProtos.OzoneObj toProtobuf(OzoneObj obj) { + return OzoneManagerProtocolProtos.OzoneObj.newBuilder() + .setResType(ObjectType.valueOf(obj.getResourceType().name())) + .setStoreType(valueOf(obj.getStoreType().name())) + .setPath(obj.getPath()).build(); + } + public ResourceType getResourceType() { return resType; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java index ba1fa3730e194..16df10fb274aa 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java @@ -17,6 +17,9 @@ package org.apache.hadoop.ozone.security.acl; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +import java.util.StringTokenizer; /** * Class representing an ozone object. @@ -69,6 +72,34 @@ public String getKeyName() { return keyName; } + public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj + proto) { + Builder builder = new Builder() + .setResType(ResourceType.valueOf(proto.getResType().name())) + .setStoreType(StoreType.valueOf(proto.getStoreType().name())); + StringTokenizer tokenizer = new StringTokenizer(proto.getPath(), + OzoneConsts.OZONE_URI_DELIMITER); + // Set volume name. + if (tokenizer.hasMoreTokens()) { + builder.setVolumeName(tokenizer.nextToken()); + } + // Set bucket name. + if (tokenizer.hasMoreTokens()) { + builder.setBucketName(tokenizer.nextToken()); + } + // Set key name + if (tokenizer.hasMoreTokens()) { + StringBuffer sb = new StringBuffer(); + while (tokenizer.hasMoreTokens()) { + sb.append(OzoneConsts.OZONE_URI_DELIMITER); + sb.append(tokenizer.nextToken()); + sb.append(OzoneConsts.OZONE_URI_DELIMITER); + } + builder.setKeyName(sb.toString()); + } + return builder.build(); + } + /** * Inner builder class. */ diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index e82741bed5ec4..316acbcbbc40c 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -86,6 +86,11 @@ enum Type { CreateFile = 72; LookupFile = 73; ListStatus = 74; + AddAcl = 75; + RemoveAcl = 76; + SetAcl = 77; + GetAcl = 78; + } message OMRequest { @@ -143,6 +148,10 @@ message OMRequest { optional CreateFileRequest createFileRequest = 72; optional LookupFileRequest lookupFileRequest = 73; optional ListStatusRequest listStatusRequest = 74; + optional AddAclRequest addAclRequest = 75; + optional RemoveAclRequest removeAclRequest = 76; + optional SetAclRequest setAclRequest = 77; + optional GetAclRequest getAclRequest = 78; } message OMResponse { @@ -203,6 +212,10 @@ message OMResponse { optional CreateFileResponse createFileResponse = 72; optional LookupFileResponse lookupFileResponse = 73; optional ListStatusResponse listStatusResponse = 74; + optional AddAclResponse addAclResponse = 75; + optional RemoveAclResponse removeAclResponse = 76; + optional SetAclResponse setAclResponse = 77; + optional GetAclResponse getAclResponse = 78; } enum Status { @@ -446,6 +459,22 @@ message BucketArgs { repeated hadoop.hdds.KeyValue metadata = 7; } +message OzoneObj { + enum ObjectType { + VOLUME = 1; + BUCKET = 2; + KEY = 3; + } + + enum StoreType { + OZONE = 1; + S3 = 2; + } + required ObjectType resType = 1; + required StoreType storeType = 2 [default = S3]; + required string path = 3; +} + message OzoneAclInfo { enum OzoneAclType { USER = 1; @@ -471,12 +500,46 @@ message OzoneAclInfo { repeated OzoneAclRights rights = 3; } +message GetAclRequest { + required OzoneObj obj = 1; +} + +message GetAclResponse { + repeated OzoneAclInfo acls = 1; +} + +message AddAclRequest { + required OzoneObj obj = 1; + required OzoneAclInfo acl = 2; +} + +message AddAclResponse { + required bool response = 1; +} + +message RemoveAclRequest { + required OzoneObj obj = 1; + required OzoneAclInfo acl = 2; +} + +message RemoveAclResponse { + required bool response = 1; +} + +message SetAclRequest { + required OzoneObj obj = 1; + repeated OzoneAclInfo acl = 2; +} + +message SetAclResponse { + required bool response = 1; +} + message CreateBucketRequest { required BucketInfo bucketInfo = 1; } message CreateBucketResponse { - } message InfoBucketRequest { diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java index 2cd3d9ec98275..5d9a05dd9774e 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java @@ -20,14 +20,16 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; +import org.apache.hadoop.test.LambdaTestUtils; import org.junit.Test; -import java.util.Arrays; import java.util.HashMap; import java.util.Set; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.*; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; /** @@ -119,54 +121,85 @@ public void testAclParse() { } @Test - public void testAclValues() { + public void testAclValues() throws Exception { OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); assertEquals(acl.getName(), "bilbo"); - assertEquals(Arrays.asList(READ, WRITE), acl.getRights()); + assertTrue(acl.getAclBitSet().get(READ.ordinal())); + assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); + assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertFalse(acl.getAclBitSet().get(READ_ACL.ordinal())); assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:a"); assertEquals("bilbo", acl.getName()); - assertEquals(Arrays.asList(ALL), acl.getRights()); + assertTrue(acl.getAclBitSet().get(ALL.ordinal())); + assertFalse(acl.getAclBitSet().get(WRITE.ordinal())); assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:r"); assertEquals("bilbo", acl.getName()); - assertEquals(Arrays.asList(READ), acl.getRights()); + assertTrue(acl.getAclBitSet().get(READ.ordinal())); + assertFalse(acl.getAclBitSet().get(ALL.ordinal())); assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:w"); assertEquals("bilbo", acl.getName()); - assertEquals(Arrays.asList(WRITE), acl.getRights()); + assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); + assertFalse(acl.getAclBitSet().get(ALL.ordinal())); assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("group:hobbit:a"); assertEquals(acl.getName(), "hobbit"); - assertEquals(Arrays.asList(ALL), acl.getRights()); + assertTrue(acl.getAclBitSet().get(ALL.ordinal())); + assertFalse(acl.getAclBitSet().get(READ.ordinal())); assertEquals(ACLIdentityType.GROUP, acl.getType()); acl = OzoneAcl.parseAcl("world::a"); assertEquals(acl.getName(), ""); - assertEquals(Arrays.asList(ALL), acl.getRights()); + assertTrue(acl.getAclBitSet().get(ALL.ordinal())); + assertFalse(acl.getAclBitSet().get(WRITE.ordinal())); assertEquals(ACLIdentityType.WORLD, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy"); assertEquals(acl.getName(), "bilbo"); - assertEquals(Arrays.asList(READ, WRITE, DELETE, LIST, NONE, CREATE, - READ_ACL, WRITE_ACL), acl.getRights()); - assertEquals(ACLIdentityType.USER, acl.getType()); + assertTrue(acl.getAclBitSet().get(READ.ordinal())); + assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); + assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); + assertTrue(acl.getAclBitSet().get(LIST.ordinal())); + assertTrue(acl.getAclBitSet().get(NONE.ordinal())); + assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); + assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); + assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); + assertFalse(acl.getAclBitSet().get(ALL.ordinal())); acl = OzoneAcl.parseAcl("group:hadoop:rwdlncxy"); assertEquals(acl.getName(), "hadoop"); - assertEquals(Arrays.asList(READ, WRITE, DELETE, LIST, NONE, CREATE, - READ_ACL, WRITE_ACL), acl.getRights()); + assertTrue(acl.getAclBitSet().get(READ.ordinal())); + assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); + assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); + assertTrue(acl.getAclBitSet().get(LIST.ordinal())); + assertTrue(acl.getAclBitSet().get(NONE.ordinal())); + assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); + assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); + assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); + assertFalse(acl.getAclBitSet().get(ALL.ordinal())); assertEquals(ACLIdentityType.GROUP, acl.getType()); acl = OzoneAcl.parseAcl("world::rwdlncxy"); assertEquals(acl.getName(), ""); - assertEquals(Arrays.asList(READ, WRITE, DELETE, LIST, NONE, CREATE, - READ_ACL, WRITE_ACL), acl.getRights()); + assertTrue(acl.getAclBitSet().get(READ.ordinal())); + assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); + assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); + assertTrue(acl.getAclBitSet().get(LIST.ordinal())); + assertTrue(acl.getAclBitSet().get(NONE.ordinal())); + assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); + assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); + assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); + assertFalse(acl.getAclBitSet().get(ALL.ordinal())); assertEquals(ACLIdentityType.WORLD, acl.getType()); + + LambdaTestUtils.intercept(IllegalArgumentException.class, "ACL right" + + " is not", () -> OzoneAcl.parseAcl("world::rwdlncxncxdfsfgbny")); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 17e199511e7e2..5679edaf6254d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -84,6 +84,8 @@ import org.apache.hadoop.ozone.s3.util.OzoneS3Util; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.Time; @@ -97,9 +99,11 @@ import org.junit.Assert; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import org.junit.Ignore; import org.junit.Test; @@ -2130,6 +2134,44 @@ public void testListPartsWithInvalidUploadID() throws Exception { }); } + @Test + public void testNativeAclsForVolume() throws Exception { + String volumeName = UUID.randomUUID().toString(); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + OzoneObj ozObj = new OzoneObjInfo.Builder() + .setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.VOLUME) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); + // Get acls for volume. + List volAcls = store.getAcl(ozObj); + volAcls.forEach(a -> assertTrue(volume.getAcls().contains(a))); + + // Remove all acl's. + for (OzoneAcl a : volAcls) { + store.removeAcl(ozObj, a); + } + List newAcls = store.getAcl(ozObj); + OzoneVolume finalVolume = store.getVolume(volumeName); + assertTrue(finalVolume.getAcls().size() == 0); + assertTrue(newAcls.size() == 0); + + // Add acl's and then call getAcl. + for (OzoneAcl a : volAcls) { + assertFalse(finalVolume.getAcls().contains(a)); + store.addAcl(ozObj, a); + finalVolume = store.getVolume(volumeName); + assertTrue(finalVolume.getAcls().contains(a)); + } + + // Reset acl's. + store.setAcl(ozObj, newAcls); + finalVolume = store.getVolume(volumeName); + newAcls = store.getAcl(ozObj); + assertTrue(newAcls.size() == 0); + assertTrue(finalVolume.getAcls().size() == 0); + } private byte[] generateData(int size, byte val) { byte[] chars = new byte[size]; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java index e8fa1245a09c8..9c68ab1cec72a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java @@ -745,8 +745,8 @@ public void testUpdateBucket() throws Exception { OzoneAcl acl = bucket.getAcls().get(aclSize); assertTrue(acl.getName().equals("frodo") && acl.getType() == ACLIdentityType.USER - && acl.getRights().contains(ACLType.READ) - && acl.getRights().contains(ACLType.WRITE)); + && acl.getAclBitSet().get(ACLType.READ.ordinal()) + && acl.getAclBitSet().get(ACLType.WRITE.ordinal())); args = new String[] {"bucket", "update", url + "/" + vol.getName() + "/" + bucketName, "--removeAcl", @@ -758,7 +758,7 @@ public void testUpdateBucket() throws Exception { assertEquals(1 + aclSize, bucket.getAcls().size()); assertTrue(acl.getName().equals("samwise") && acl.getType() == ACLIdentityType.GROUP - && acl.getRights().contains(ACLType.READ)); + && acl.getAclBitSet().get(ACLType.READ.ordinal())); // test update bucket for a non-exist bucket args = new String[] {"bucket", "update", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 6b341bce352e7..531287915f2b4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -29,6 +29,7 @@ import java.security.KeyPair; import java.security.cert.CertificateException; import java.util.Collection; +import java.util.Collections; import java.util.Objects; import org.apache.hadoop.classification.InterfaceAudience; @@ -69,6 +70,7 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneIllegalArgumentException; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider; @@ -2984,6 +2986,89 @@ public List listStatus(OmKeyArgs args, boolean recursive, } } + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * @throws IOException if there is error. + */ + @Override + public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + if(isAclEnabled) { + checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL, + obj.getVolumeName(), obj.getBucketName(), obj.getKeyName()); + } + // TODO: Audit ACL operation. + if(obj.getResourceType().equals(ResourceType.VOLUME)) { + return volumeManager.addAcl(obj, acl); + } + + return false; + } + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * @throws IOException if there is error. + */ + @Override + public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + if(isAclEnabled) { + checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL, + obj.getVolumeName(), obj.getBucketName(), obj.getKeyName()); + } + if(obj.getResourceType().equals(ResourceType.VOLUME)) { + return volumeManager.removeAcl(obj, acl); + } + + return false; + } + + /** + * Acls to be set for given Ozone object. This operations reset ACL for given + * object to list of ACLs provided in argument. + * + * @param obj Ozone object. + * @param acls List of acls. + * @throws IOException if there is error. + */ + @Override + public boolean setAcl(OzoneObj obj, List acls) throws IOException { + if(isAclEnabled) { + checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL, + obj.getVolumeName(), obj.getBucketName(), obj.getKeyName()); + } + if(obj.getResourceType().equals(ResourceType.VOLUME)) { + return volumeManager.setAcl(obj, acls); + } + + return false; + } + + /** + * Returns list of ACLs for given Ozone object. + * + * @param obj Ozone object. + * @throws IOException if there is error. + */ + @Override + public List getAcl(OzoneObj obj) throws IOException { + if(isAclEnabled) { + checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.READ_ACL, + obj.getVolumeName(), obj.getBucketName(), obj.getKeyName()); + } + if(obj.getResourceType().equals(ResourceType.VOLUME)) { + return volumeManager.getAcl(obj); + } + + return Collections.emptyList(); + } + /** * Startup options. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java index a4e20c729672b..b7e28d396ebe9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.om; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmDeleteVolumeResponse; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeOwnerChangeResponse; @@ -23,6 +24,7 @@ .OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .VolumeList; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import java.io.IOException; import java.util.List; @@ -141,4 +143,43 @@ boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) */ List listVolumes(String userName, String prefix, String startKey, int maxKeys) throws IOException; + + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * + * @throws IOException if there is error. + * */ + boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException; + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * + * @throws IOException if there is error. + * */ + boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException; + + /** + * Acls to be set for given Ozone object. This operations reset ACL for + * given object to list of ACLs provided in argument. + * @param obj Ozone object. + * @param acls List of acls. + * + * @throws IOException if there is error. + * */ + boolean setAcl(OzoneObj obj, List acls) throws IOException; + + /** + * Returns list of ACLs for given Ozone object. + * @param obj Ozone object. + * + * @throws IOException if there is error. + * */ + List getAcl(OzoneObj obj) throws IOException; + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java index 7b17550a12775..19e94b5e45c22 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java @@ -19,15 +19,18 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Objects; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmDeleteVolumeResponse; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeOwnerChangeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.utils.db.BatchOperation; import com.google.common.base.Preconditions; @@ -508,4 +511,178 @@ public List listVolumes(String userName, metadataManager.getLock().releaseUserLock(userName); } } + + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * @throws IOException if there is error. + */ + @Override + public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + Objects.requireNonNull(obj); + Objects.requireNonNull(acl); + if (!obj.getResourceType().equals(OzoneObj.ResourceType.VOLUME)) { + throw new IllegalArgumentException("Unexpected argument passed to " + + "VolumeManager. OzoneObj type:" + obj.getResourceType()); + } + String volume = obj.getVolumeName(); + metadataManager.getLock().acquireVolumeLock(volume); + try { + String dbVolumeKey = metadataManager.getVolumeKey(volume); + OmVolumeArgs volumeArgs = + metadataManager.getVolumeTable().get(dbVolumeKey); + if (volumeArgs == null) { + LOG.debug("volume:{} does not exist", volume); + throw new OMException("Volume " + volume + " is not found", + ResultCodes.VOLUME_NOT_FOUND); + } + volumeArgs.addAcl(acl); + metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); + + Preconditions.checkState(volume.equals(volumeArgs.getVolume())); + //return volumeArgs.getAclMap().hasAccess(userAcl); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Add acl operation failed for volume:{} acl:{}", + volume, acl, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseVolumeLock(volume); + } + + return true; + } + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * @throws IOException if there is error. + */ + @Override + public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + Objects.requireNonNull(obj); + Objects.requireNonNull(acl); + if (!obj.getResourceType().equals(OzoneObj.ResourceType.VOLUME)) { + throw new IllegalArgumentException("Unexpected argument passed to " + + "VolumeManager. OzoneObj type:" + obj.getResourceType()); + } + String volume = obj.getVolumeName(); + metadataManager.getLock().acquireVolumeLock(volume); + try { + String dbVolumeKey = metadataManager.getVolumeKey(volume); + OmVolumeArgs volumeArgs = + metadataManager.getVolumeTable().get(dbVolumeKey); + if (volumeArgs == null) { + LOG.debug("volume:{} does not exist", volume); + throw new OMException("Volume " + volume + " is not found", + ResultCodes.VOLUME_NOT_FOUND); + } + volumeArgs.removeAcl(acl); + metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); + + Preconditions.checkState(volume.equals(volumeArgs.getVolume())); + //return volumeArgs.getAclMap().hasAccess(userAcl); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Remove acl operation failed for volume:{} acl:{}", + volume, acl, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseVolumeLock(volume); + } + + return true; + } + + /** + * Acls to be set for given Ozone object. This operations reset ACL for given + * object to list of ACLs provided in argument. + * + * @param obj Ozone object. + * @param acls List of acls. + * @throws IOException if there is error. + */ + @Override + public boolean setAcl(OzoneObj obj, List acls) throws IOException { + Objects.requireNonNull(obj); + Objects.requireNonNull(acls); + + if (!obj.getResourceType().equals(OzoneObj.ResourceType.VOLUME)) { + throw new IllegalArgumentException("Unexpected argument passed to " + + "VolumeManager. OzoneObj type:" + obj.getResourceType()); + } + String volume = obj.getVolumeName(); + metadataManager.getLock().acquireVolumeLock(volume); + try { + String dbVolumeKey = metadataManager.getVolumeKey(volume); + OmVolumeArgs volumeArgs = + metadataManager.getVolumeTable().get(dbVolumeKey); + if (volumeArgs == null) { + LOG.debug("volume:{} does not exist", volume); + throw new OMException("Volume " + volume + " is not found", + ResultCodes.VOLUME_NOT_FOUND); + } + volumeArgs.setAcls(acls); + metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); + + Preconditions.checkState(volume.equals(volumeArgs.getVolume())); + //return volumeArgs.getAclMap().hasAccess(userAcl); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Set acl operation failed for volume:{} acls:{}", + volume, acls, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseVolumeLock(volume); + } + + return true; + } + + /** + * Returns list of ACLs for given Ozone object. + * + * @param obj Ozone object. + * @throws IOException if there is error. + */ + @Override + public List getAcl(OzoneObj obj) throws IOException { + Objects.requireNonNull(obj); + + if (!obj.getResourceType().equals(OzoneObj.ResourceType.VOLUME)) { + throw new IllegalArgumentException("Unexpected argument passed to " + + "VolumeManager. OzoneObj type:" + obj.getResourceType()); + } + String volume = obj.getVolumeName(); + metadataManager.getLock().acquireVolumeLock(volume); + try { + String dbVolumeKey = metadataManager.getVolumeKey(volume); + OmVolumeArgs volumeArgs = + metadataManager.getVolumeTable().get(dbVolumeKey); + if (volumeArgs == null) { + LOG.debug("volume:{} does not exist", volume); + throw new OMException("Volume " + volume + " is not found", + ResultCodes.VOLUME_NOT_FOUND); + } + + Preconditions.checkState(volume.equals(volumeArgs.getVolume())); + return volumeArgs.getAclMap().getAcl(); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Get acl operation failed for volume:{}", volume, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseVolumeLock(volume); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 1b95a2eea90ef..808ac9a3c5d28 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.io.Text; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -43,7 +44,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerServerProtocol; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest; @@ -118,6 +119,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; @@ -127,6 +129,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; + /** * Command Handler for OM requests. OM State Machine calls this handler for * deserializing the client request and sending it to OM. @@ -339,20 +343,40 @@ public OMResponse handle(OMRequest request) { createDirectory(request.getCreateDirectoryRequest()); break; case CreateFile: - OzoneManagerProtocolProtos.CreateFileResponse createFileResponse = + CreateFileResponse createFileResponse = createFile(request.getCreateFileRequest()); responseBuilder.setCreateFileResponse(createFileResponse); break; case LookupFile: - OzoneManagerProtocolProtos.LookupFileResponse lookupFileResponse = + LookupFileResponse lookupFileResponse = lookupFile(request.getLookupFileRequest()); responseBuilder.setLookupFileResponse(lookupFileResponse); break; case ListStatus: - OzoneManagerProtocolProtos.ListStatusResponse listStatusResponse = + ListStatusResponse listStatusResponse = listStatus(request.getListStatusRequest()); responseBuilder.setListStatusResponse(listStatusResponse); break; + case AddAcl: + AddAclResponse addAclResponse = + addAcl(request.getAddAclRequest()); + responseBuilder.setAddAclResponse(addAclResponse); + break; + case RemoveAcl: + RemoveAclResponse removeAclResponse = + removeAcl(request.getRemoveAclRequest()); + responseBuilder.setRemoveAclResponse(removeAclResponse); + break; + case SetAcl: + SetAclResponse setAclResponse = + setAcl(request.getSetAclRequest()); + responseBuilder.setSetAclResponse(setAclResponse); + break; + case GetAcl: + GetAclResponse getAclResponse = + getAcl(request.getGetAclRequest()); + responseBuilder.setGetAclResponse(getAclResponse); + break; default: responseBuilder.setSuccess(false); responseBuilder.setMessage("Unrecognized Command Type: " + cmdType); @@ -369,6 +393,37 @@ public OMResponse handle(OMRequest request) { return responseBuilder.build(); } + private GetAclResponse getAcl(GetAclRequest req) throws IOException { + List acls = new ArrayList<>(); + + List aclList = + impl.getAcl(OzoneObjInfo.fromProtobuf(req.getObj())); + aclList.parallelStream().forEach(a -> acls.add(OzoneAcl.toProtobuf(a))); + return GetAclResponse.newBuilder().addAllAcls(acls).build(); + } + + private RemoveAclResponse removeAcl(RemoveAclRequest req) + throws IOException { + boolean response = impl.removeAcl(OzoneObjInfo.fromProtobuf(req.getObj()), + OzoneAcl.fromProtobuf(req.getAcl())); + return RemoveAclResponse.newBuilder().setResponse(response).build(); + } + + private SetAclResponse setAcl(SetAclRequest req) throws IOException { + List ozoneAcl = new ArrayList<>(); + req.getAclList().parallelStream().forEach(a -> + ozoneAcl.add(OzoneAcl.fromProtobuf(a))); + boolean response = impl.setAcl(OzoneObjInfo.fromProtobuf(req.getObj()), + ozoneAcl); + return SetAclResponse.newBuilder().setResponse(response).build(); + } + + private AddAclResponse addAcl(AddAclRequest req) throws IOException { + boolean response = impl.addAcl(OzoneObjInfo.fromProtobuf(req.getObj()), + OzoneAcl.fromProtobuf(req.getAcl())); + return AddAclResponse.newBuilder().setResponse(response).build(); + } + // Convert and exception to corresponding status code protected Status exceptionToResponseStatus(IOException ex) { if (ex instanceof OMException) { @@ -899,7 +954,7 @@ private MultipartUploadListPartsResponse listParts( List omPartInfoList = omMultipartUploadListParts.getPartInfoList(); - List partInfoList = + List partInfoList = new ArrayList<>(); omPartInfoList.forEach(partInfo -> partInfoList.add(partInfo.getProto())); @@ -962,11 +1017,11 @@ private CancelDelegationTokenResponseProto cancelDelegationToken( return rb.build(); } - private OzoneManagerProtocolProtos.GetS3SecretResponse getS3Secret( - OzoneManagerProtocolProtos.GetS3SecretRequest request) + private GetS3SecretResponse getS3Secret( + GetS3SecretRequest request) throws IOException { - OzoneManagerProtocolProtos.GetS3SecretResponse.Builder rb = - OzoneManagerProtocolProtos.GetS3SecretResponse.newBuilder(); + GetS3SecretResponse.Builder rb = + GetS3SecretResponse.newBuilder(); rb.setS3Secret(impl.getS3Secret(request.getKerberosID()).getProtobuf()); @@ -999,8 +1054,8 @@ private void createDirectory(CreateDirectoryRequest request) impl.createDirectory(omKeyArgs); } - private OzoneManagerProtocolProtos.CreateFileResponse createFile( - OzoneManagerProtocolProtos.CreateFileRequest request) throws IOException { + private CreateFileResponse createFile( + CreateFileRequest request) throws IOException { KeyArgs keyArgs = request.getKeyArgs(); OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() .setVolumeName(keyArgs.getVolumeName()) @@ -1013,15 +1068,15 @@ private OzoneManagerProtocolProtos.CreateFileResponse createFile( OpenKeySession keySession = impl.createFile(omKeyArgs, request.getIsOverwrite(), request.getIsRecursive()); - return OzoneManagerProtocolProtos.CreateFileResponse.newBuilder() + return CreateFileResponse.newBuilder() .setKeyInfo(keySession.getKeyInfo().getProtobuf()) .setID(keySession.getId()) .setOpenVersion(keySession.getOpenVersion()) .build(); } - private OzoneManagerProtocolProtos.LookupFileResponse lookupFile( - OzoneManagerProtocolProtos.LookupFileRequest request) + private LookupFileResponse lookupFile( + LookupFileRequest request) throws IOException { KeyArgs keyArgs = request.getKeyArgs(); OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() @@ -1029,7 +1084,7 @@ private OzoneManagerProtocolProtos.LookupFileResponse lookupFile( .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .build(); - return OzoneManagerProtocolProtos.LookupFileResponse.newBuilder() + return LookupFileResponse.newBuilder() .setKeyInfo(impl.lookupFile(omKeyArgs).getProtobuf()) .build(); } @@ -1038,8 +1093,8 @@ protected OzoneManagerServerProtocol getOzoneManagerServerProtocol() { return impl; } - private OzoneManagerProtocolProtos.ListStatusResponse listStatus( - OzoneManagerProtocolProtos.ListStatusRequest request) throws IOException { + private ListStatusResponse listStatus( + ListStatusRequest request) throws IOException { KeyArgs keyArgs = request.getKeyArgs(); OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() .setVolumeName(keyArgs.getVolumeName()) @@ -1049,9 +1104,9 @@ private OzoneManagerProtocolProtos.ListStatusResponse listStatus( List statuses = impl.listStatus(omKeyArgs, request.getRecursive(), request.getStartKey(), request.getNumEntries()); - OzoneManagerProtocolProtos.ListStatusResponse.Builder + ListStatusResponse.Builder listStatusResponseBuilder = - OzoneManagerProtocolProtos.ListStatusResponse.newBuilder(); + ListStatusResponse.newBuilder(); for (OzoneFileStatus status : statuses) { listStatusResponseBuilder.addStatuses(status.getProtobuf()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java index 8a8be357c8fbf..b5baabad61bea 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java @@ -124,7 +124,8 @@ public void testIsReadOnlyCapturesAllCmdTypeEnums() throws Exception { .setClientId(clientId) .build(); OmUtils.isReadOnly(request); - assertFalse(cmdtype + "is not categorized in OmUtils#isReadyOnly", + assertFalse(cmdtype + " is not categorized in " + + "OmUtils#isReadyOnly", logCapturer.getOutput().contains("CmdType " + cmdtype +" is not " + "categorized as readOnly or not.")); logCapturer.clearOutput(); From 2b303e9d5f2108fee621dd0c6e5582a861c401e4 Mon Sep 17 00:00:00 2001 From: Zhankun Tang Date: Thu, 30 May 2019 16:25:37 +0800 Subject: [PATCH 0068/1308] SUBMARINE-86. Bump TonY version to pick up a docker related fix & Update documentation. Contributed by Keqiu Hu. --- .../src/site/markdown/TonYRuntimeGuide.md | 48 +++++++++++-------- .../hadoop-submarine-tony-runtime/pom.xml | 7 ++- 2 files changed, 33 insertions(+), 22 deletions(-) diff --git a/hadoop-submarine/hadoop-submarine-core/src/site/markdown/TonYRuntimeGuide.md b/hadoop-submarine/hadoop-submarine-core/src/site/markdown/TonYRuntimeGuide.md index 105a72431ddb3..72e2d0ff1a10b 100644 --- a/hadoop-submarine/hadoop-submarine-core/src/site/markdown/TonYRuntimeGuide.md +++ b/hadoop-submarine/hadoop-submarine-core/src/site/markdown/TonYRuntimeGuide.md @@ -68,24 +68,31 @@ usage: memory-mb=2048,vcores=2,yarn.io/gpu=2 -localization Specify localization to remote/local file/directory available to all container(Docker). - Argument format is "RemoteUri:LocalFilePath[:rw]" - (ro permission is not supported yet). - The RemoteUri can be a file or directory in local - or HDFS or s3 or abfs or http .etc. - The LocalFilePath can be absolute or relative. - If relative, it'll be under container's implied - working directory. + Argument format is "RemoteUri:LocalFileName" + The LocalFilePath is the local file or folder name. + You should access it with relative path to working directory. This option can be set mutiple times. Examples are - -localization "hdfs:///user/yarn/mydir2:/opt/data" - -localization "s3a:///a/b/myfile1:./" - -localization "https:///a/b/myfile2:./myfile" - -localization "/user/yarn/mydir3:/opt/mydir3" - -localization "./mydir1:." + -localization "hdfs:///user/yarn/mydir2:data" + -localization "s3a:///a/b/myfile1:file1" + -localization "https:///a/b/myfile2:myfile" + -localization "/user/yarn/mydir3:mydir3" + -localization "./mydir1:mydir1" -insecure Whether running in an insecure cluster -conf Override configurations via commandline ``` +> Note: all --localization files will be localized to working directory. You should access them use +relative path. Alternatively, you could use `--conf tony.containers.resources +=src_file::dest_file_name,src_file2::dest_file_name2`. It accepts a list of resources to be localized to all containers, +delimited by comma. If a resource has no scheme like `hdfs://` or `s3://`, the file is considered a local file. You +could add #archive annotation, if an entry has `#archive`, the file will be automatically unzipped when localized to the +containers, folder name is the same as the file name. For example: `/user/khu/abc.zip#archive` would be inferred as a +local file and will be unarchived in containers. You would anticipate an abc.zip/ folder in your container's working +directory. Annotation `::` is added since TonY 0.3.3. If you use `PATH/TO/abc.txt::def.txt`, the `abc.txt` file +would be localized as `def.txt` in the container working directory. +Details: [tony configurations](https://github.com/linkedin/TonY/wiki/TonY-Configurations) + ### Submarine Configuration For submarine internal configuration, please create a `submarine.xml` which should be placed under `$HADOOP_CONF_DIR`. @@ -144,7 +151,7 @@ CLASSPATH=$(hadoop classpath --glob): \ ./hadoop-submarine-core/target/hadoop-submarine-core-0.2.0-SNAPSHOT.jar: \ ./hadoop-submarine-yarnservice-runtime/target/hadoop-submarine-score-yarnservice-runtime-0.2.0-SNAPSHOT.jar: \ ./hadoop-submarine-tony-runtime/target/hadoop-submarine-tony-runtime-0.2.0-SNAPSHOT.jar: \ -/home/pi/hadoop/TonY/tony-cli/build/libs/tony-cli-0.3.2-all.jar \ +/home/pi/hadoop/TonY/tony-cli/build/libs/tony-cli-0.3.11-all.jar \ java org.apache.hadoop.yarn.submarine.client.cli.Cli job run --name tf-job-001 \ --framework tensorflow \ @@ -156,9 +163,10 @@ java org.apache.hadoop.yarn.submarine.client.cli.Cli job run --name tf-job-001 \ --ps_launch_cmd "venv.zip/venv/bin/python mnist_distributed.py --steps 1000 --data_dir /tmp/data --working_dir /tmp/mode" \ --insecure --conf tony.containers.resources=PATH_TO_VENV_YOU_CREATED/venv.zip#archive,PATH_TO_MNIST_EXAMPLE/mnist_distributed.py, \ -PATH_TO_TONY_CLI_JAR/tony-cli-0.3.2-all.jar +PATH_TO_TONY_CLI_JAR/tony-cli-0.3.11-all.jar ``` + You should then be able to see links and status of the jobs from command line: ``` @@ -181,7 +189,7 @@ CLASSPATH=$(hadoop classpath --glob): \ ./hadoop-submarine-core/target/hadoop-submarine-core-0.2.0-SNAPSHOT.jar: \ ./hadoop-submarine-yarnservice-runtime/target/hadoop-submarine-score-yarnservice-runtime-0.2.0-SNAPSHOT.jar: \ ./hadoop-submarine-tony-runtime/target/hadoop-submarine-tony-runtime-0.2.0-SNAPSHOT.jar: \ -/home/pi/hadoop/TonY/tony-cli/build/libs/tony-cli-0.3.2-all.jar \ +/home/pi/hadoop/TonY/tony-cli/build/libs/tony-cli-0.3.11-all.jar \ java org.apache.hadoop.yarn.submarine.client.cli.Cli job run --name tf-job-001 \ --framework tensorflow \ @@ -197,7 +205,7 @@ java org.apache.hadoop.yarn.submarine.client.cli.Cli job run --name tf-job-001 \ --env HADOOP_COMMON_HOME=/hadoop-3.1.0 \ --env HADOOP_HDFS_HOME=/hadoop-3.1.0 \ --env HADOOP_CONF_DIR=/hadoop-3.1.0/etc/hadoop \ - --conf tony.containers.resources=--conf tony.containers.resources=/home/pi/hadoop/TonY/tony-cli/build/libs/tony-cli-0.3.2-all.jar + --conf tony.containers.resources=/home/pi/hadoop/TonY/tony-cli/build/libs/tony-cli-0.3.11-all.jar ``` @@ -244,7 +252,7 @@ CLASSPATH=$(hadoop classpath --glob): \ ./hadoop-submarine-core/target/hadoop-submarine-core-0.2.0-SNAPSHOT.jar: \ ./hadoop-submarine-yarnservice-runtime/target/hadoop-submarine-score-yarnservice-runtime-0.2.0-SNAPSHOT.jar: \ ./hadoop-submarine-tony-runtime/target/hadoop-submarine-tony-runtime-0.2.0-SNAPSHOT.jar: \ -/home/pi/hadoop/TonY/tony-cli/build/libs/tony-cli-0.3.2-all.jar \ +/home/pi/hadoop/TonY/tony-cli/build/libs/tony-cli-0.3.11-all.jar \ java org.apache.hadoop.yarn.submarine.client.cli.Cli job run --name tf-job-001 \ --num_workers 2 \ @@ -255,7 +263,7 @@ java org.apache.hadoop.yarn.submarine.client.cli.Cli job run --name tf-job-001 \ --ps_launch_cmd "venv.zip/venv/bin/python mnist_distributed.py" \ --insecure \ --conf tony.containers.resources=PATH_TO_VENV_YOU_CREATED/venv.zip#archive,PATH_TO_MNIST_EXAMPLE/mnist_distributed.py, \ -PATH_TO_TONY_CLI_JAR/tony-cli-0.3.2-all.jar \ +PATH_TO_TONY_CLI_JAR/tony-cli-0.3.11-all.jar \ --conf tony.application.framework=pytorch ``` @@ -281,7 +289,7 @@ CLASSPATH=$(hadoop classpath --glob): \ ./hadoop-submarine-core/target/hadoop-submarine-core-0.2.0-SNAPSHOT.jar: \ ./hadoop-submarine-yarnservice-runtime/target/hadoop-submarine-score-yarnservice-runtime-0.2.0-SNAPSHOT.jar: \ ./hadoop-submarine-tony-runtime/target/hadoop-submarine-tony-runtime-0.2.0-SNAPSHOT.jar: \ -/home/pi/hadoop/TonY/tony-cli/build/libs/tony-cli-0.3.2-all.jar \ +/home/pi/hadoop/TonY/tony-cli/build/libs/tony-cli-0.3.11-all.jar \ java org.apache.hadoop.yarn.submarine.client.cli.Cli job run --name tf-job-001 \ --docker_image hadoopsubmarine/tf-1.8.0-cpu:0.0.3 \ @@ -296,6 +304,6 @@ java org.apache.hadoop.yarn.submarine.client.cli.Cli job run --name tf-job-001 \ --env HADOOP_COMMON_HOME=/hadoop-3.1.0 \ --env HADOOP_HDFS_HOME=/hadoop-3.1.0 \ --env HADOOP_CONF_DIR=/hadoop-3.1.0/etc/hadoop \ - --conf tony.containers.resources=PATH_TO_TONY_CLI_JAR/tony-cli-0.3.2-all.jar \ + --conf tony.containers.resources=PATH_TO_TONY_CLI_JAR/tony-cli-0.3.11-all.jar \ --conf tony.application.framework=pytorch ``` diff --git a/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml b/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml index 6254538342982..cc2ebfc67743a 100644 --- a/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml +++ b/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml @@ -34,12 +34,16 @@ com.linkedin.tony tony-core - 0.3.3 + 0.3.11 com.linkedin.tony tony-mini + + com.linkedin.azkaban + azkaban-common + com.linkedin.azkaban az-hadoop-jobtype-plugin @@ -63,5 +67,4 @@ test - From 18c1eebc08f93055ffdef1812247b439c8404163 Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Thu, 30 May 2019 16:17:45 +0530 Subject: [PATCH 0069/1308] HDDS-1502. Add metrics for Ozone Ratis performance.Contributed by Shashikant Banerjee(#833). --- .../transport/server/ratis/CSMMetrics.java | 58 +++++++++++++++++-- .../server/ratis/ContainerStateMachine.java | 16 ++++- .../server/ratis/TestCSMMetrics.java | 28 +++++---- 3 files changed, 82 insertions(+), 20 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java index 9ccf88ac77763..1ae3c53d909f4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java @@ -37,13 +37,18 @@ public class CSMMetrics { // ratis op metrics metrics private @Metric MutableCounterLong numWriteStateMachineOps; - private @Metric MutableCounterLong numReadStateMachineOps; + private @Metric MutableCounterLong numQueryStateMachineOps; private @Metric MutableCounterLong numApplyTransactionOps; + private @Metric MutableCounterLong numReadStateMachineOps; + private @Metric MutableCounterLong numBytesWrittenCount; + private @Metric MutableCounterLong numBytesCommittedCount; // Failure Metrics private @Metric MutableCounterLong numWriteStateMachineFails; - private @Metric MutableCounterLong numReadStateMachineFails; + private @Metric MutableCounterLong numQueryStateMachineFails; private @Metric MutableCounterLong numApplyTransactionFails; + private @Metric MutableCounterLong numReadStateMachineFails; + private @Metric MutableCounterLong numReadStateMachineMissCount; public CSMMetrics() { } @@ -59,6 +64,10 @@ public void incNumWriteStateMachineOps() { numWriteStateMachineOps.incr(); } + public void incNumQueryStateMachineOps() { + numQueryStateMachineOps.incr(); + } + public void incNumReadStateMachineOps() { numReadStateMachineOps.incr(); } @@ -71,10 +80,26 @@ public void incNumWriteStateMachineFails() { numWriteStateMachineFails.incr(); } + public void incNumQueryStateMachineFails() { + numQueryStateMachineFails.incr(); + } + + public void incNumBytesWrittenCount(long value) { + numBytesWrittenCount.incr(value); + } + + public void incNumBytesCommittedCount(long value) { + numBytesCommittedCount.incr(value); + } + public void incNumReadStateMachineFails() { numReadStateMachineFails.incr(); } + public void incNumReadStateMachineMissCount() { + numReadStateMachineMissCount.incr(); + } + public void incNumApplyTransactionsFails() { numApplyTransactionFails.incr(); } @@ -85,8 +110,8 @@ public long getNumWriteStateMachineOps() { } @VisibleForTesting - public long getNumReadStateMachineOps() { - return numReadStateMachineOps.value(); + public long getNumQueryStateMachineOps() { + return numQueryStateMachineOps.value(); } @VisibleForTesting @@ -100,8 +125,8 @@ public long getNumWriteStateMachineFails() { } @VisibleForTesting - public long getNumReadStateMachineFails() { - return numReadStateMachineFails.value(); + public long getNumQueryStateMachineFails() { + return numQueryStateMachineFails.value(); } @VisibleForTesting @@ -109,6 +134,27 @@ public long getNumApplyTransactionsFails() { return numApplyTransactionFails.value(); } + @VisibleForTesting + public long getNumReadStateMachineFails() { + return numReadStateMachineFails.value(); + } + + @VisibleForTesting + public long getNumReadStateMachineMissCount() { + return numReadStateMachineMissCount.value(); + } + + @VisibleForTesting + public long getNumBytesWrittenCount() { + return numBytesWrittenCount.value(); + } + + @VisibleForTesting + public long getNumBytesCommittedCount() { + return numBytesCommittedCount.value(); + } + + public void unRegister() { MetricsSystem ms = DefaultMetricsSystem.instance(); ms.unregisterSource(SOURCE_NAME); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 42a4d997a8f6c..7a7baec3001b2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -391,6 +391,8 @@ private CompletableFuture handleWriteChunk( // Remove the future once it finishes execution from the // writeChunkFutureMap. writeChunkFuture.thenApply(r -> { + metrics.incNumBytesWrittenCount( + requestProto.getWriteChunk().getChunkData().getLen()); writeChunkFutureMap.remove(entryIndex); LOG.debug("writeChunk writeStateMachineData completed: blockId " + write .getBlockID() + " logIndex " + entryIndex + " chunkName " + write @@ -438,12 +440,12 @@ public CompletableFuture writeStateMachineData(LogEntryProto entry) { @Override public CompletableFuture query(Message request) { try { - metrics.incNumReadStateMachineOps(); + metrics.incNumQueryStateMachineOps(); final ContainerCommandRequestProto requestProto = getContainerCommandRequestProto(request.getContent()); return CompletableFuture.completedFuture(runCommand(requestProto, null)); } catch (IOException e) { - metrics.incNumReadStateMachineFails(); + metrics.incNumQueryStateMachineFails(); return completeExceptionally(e); } } @@ -520,10 +522,14 @@ public CompletableFuture flushStateMachineData(long index) { public CompletableFuture readStateMachineData( LogEntryProto entry) { StateMachineLogEntryProto smLogEntryProto = entry.getStateMachineLogEntry(); + metrics.incNumReadStateMachineOps(); if (!getStateMachineData(smLogEntryProto).isEmpty()) { return CompletableFuture.completedFuture(ByteString.EMPTY); } try { + // the stateMachine data is not present in the stateMachine cache, + // increment the stateMachine cache miss count + metrics.incNumReadStateMachineMissCount(); final ContainerCommandRequestProto requestProto = getContainerCommandRequestProto( entry.getStateMachineLogEntry().getLogData()); @@ -537,6 +543,7 @@ public CompletableFuture readStateMachineData( getCachedStateMachineData(entry.getIndex(), entry.getTerm(), requestProto)); } catch (ExecutionException e) { + metrics.incNumReadStateMachineFails(); future.completeExceptionally(e); } return future; @@ -547,6 +554,7 @@ public CompletableFuture readStateMachineData( + " cannot have state machine data"); } } catch (Exception e) { + metrics.incNumReadStateMachineFails(); LOG.error("unable to read stateMachineData:" + e); return completeExceptionally(e); } @@ -618,6 +626,10 @@ public CompletableFuture applyTransaction(TransactionContext trx) { applyTransactionCompletionMap .put(index, trx.getLogEntry().getTerm()); Preconditions.checkState(previous == null); + if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile) { + metrics.incNumBytesCommittedCount( + requestProto.getWriteChunk().getChunkData().getLen()); + } updateLastApplied(); }); return future; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index dc9e13375cb5b..21593242a2e28 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -14,8 +14,10 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; + */ -package org.apache.hadoop.ozone.container.common.transport.server.ratis; + package org.apache.hadoop.ozone.container.common.transport.server.ratis; import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; @@ -29,9 +31,9 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; + .ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; + .ContainerCommandResponseProto; import org.apache.hadoop.hdds.scm.*; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -42,7 +44,7 @@ import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.transport.server - .XceiverServerSpi; + .XceiverServerSpi; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -57,13 +59,11 @@ import org.junit.Test; import org.junit.Assert; -/** - * This class tests the metrics of ContainerStateMachine. - */ -public class TestCSMMetrics { - static final String TEST_DIR - = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; - + /** + * This class tests the metrics of ContainerStateMachine. + */ + public class TestCSMMetrics { + static final String TEST_DIR @FunctionalInterface interface CheckedBiFunction { OUT apply(LEFT left, RIGHT right) throws THROWABLE; @@ -112,6 +112,8 @@ static void runContainerStateMachineMetrics( assertCounter("NumWriteStateMachineOps", 0L, metric); assertCounter("NumReadStateMachineOps", 0L, metric); assertCounter("NumApplyTransactionOps", 0L, metric); + assertCounter("NumBytesWrittenCount", 0L, metric); + assertCounter("NumBytesCommittedCount", 0L, metric); // Write Chunk BlockID blockID = ContainerTestHelper.getTestBlockID(ContainerTestHelper. @@ -127,7 +129,9 @@ static void runContainerStateMachineMetrics( metric = getMetrics(CSMMetrics.SOURCE_NAME + RaftGroupId.valueOf(pipeline.getId().getId()).toString()); assertCounter("NumWriteStateMachineOps", 1L, metric); + assertCounter("NumBytesWrittenCount", 1024L, metric); assertCounter("NumApplyTransactionOps", 1L, metric); + assertCounter("NumBytesCommittedCount", 1024L, metric); //Read Chunk ContainerProtos.ContainerCommandRequestProto readChunkRequest = @@ -139,7 +143,7 @@ static void runContainerStateMachineMetrics( metric = getMetrics(CSMMetrics.SOURCE_NAME + RaftGroupId.valueOf(pipeline.getId().getId()).toString()); - assertCounter("NumReadStateMachineOps", 1L, metric); + assertCounter("NumQueryStateMachineOps", 1L, metric); assertCounter("NumApplyTransactionOps", 1L, metric); } finally { if (client != null) { From 8c8cb2d6aabba0179e346a55b63596bfd7ade540 Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Thu, 30 May 2019 18:44:22 +0530 Subject: [PATCH 0070/1308] HDDS-1502. Fix the compilation issue with commit 18c1eebc08f93055ffdef1812247b439c8404163. --- .../common/transport/server/ratis/TestCSMMetrics.java | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index 21593242a2e28..fa740eacc8e49 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -13,11 +13,10 @@ * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and - * limitations under the License. - = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; - + * limitations under the License */ - package org.apache.hadoop.ozone.container.common.transport.server.ratis; + +package org.apache.hadoop.ozone.container.common.transport.server.ratis; import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; @@ -63,7 +62,9 @@ * This class tests the metrics of ContainerStateMachine. */ public class TestCSMMetrics { - static final String TEST_DIR + static final String TEST_DIR = + GenericTestUtils.getTestDir("dfs").getAbsolutePath() + + File.separator; @FunctionalInterface interface CheckedBiFunction { OUT apply(LEFT left, RIGHT right) throws THROWABLE; From 1b041d4fd4ec0c8c4cfdcd6fa28711cf7fcd56fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Elek?= Date: Thu, 30 May 2019 16:46:06 +0200 Subject: [PATCH 0071/1308] HDDS-1497. Refactor blockade Tests. Contributed by Nilotpal Nandi. --- .../main/blockade/blockadeUtils/blockade.py | 30 +- .../main/blockade/clusterUtils/__init__.py | 2 +- .../blockade/clusterUtils/cluster_utils.py | 11 +- .../dist/src/main/blockade/ozone/__init__.py | 14 + .../dist/src/main/blockade/ozone/cluster.py | 295 ++++++++++++++++++ .../test_blockade_datanode_isolation.py | 219 +++++++------ .../src/main/blockade/test_blockade_flaky.py | 48 +-- hadoop-ozone/dist/src/main/blockade/util.py | 52 +++ .../main/compose/ozoneblockade/docker-config | 6 + 9 files changed, 521 insertions(+), 156 deletions(-) create mode 100644 hadoop-ozone/dist/src/main/blockade/ozone/__init__.py create mode 100644 hadoop-ozone/dist/src/main/blockade/ozone/cluster.py create mode 100644 hadoop-ozone/dist/src/main/blockade/util.py diff --git a/hadoop-ozone/dist/src/main/blockade/blockadeUtils/blockade.py b/hadoop-ozone/dist/src/main/blockade/blockadeUtils/blockade.py index f371865a0500e..7809c70a3b14d 100644 --- a/hadoop-ozone/dist/src/main/blockade/blockadeUtils/blockade.py +++ b/hadoop-ozone/dist/src/main/blockade/blockadeUtils/blockade.py @@ -18,9 +18,8 @@ """This module has apis to create and remove a blockade cluster""" from subprocess import call -import subprocess import logging -import random +import util from clusterUtils.cluster_utils import ClusterUtils logger = logging.getLogger(__name__) @@ -39,23 +38,13 @@ def blockade_up(cls): @classmethod def blockade_status(cls): - exit_code, output = ClusterUtils.run_cmd("blockade status") + exit_code, output = util.run_cmd("blockade status") return exit_code, output @classmethod - def make_flaky(cls, flaky_node, container_list): - # make the network flaky - om, scm, _, datanodes = \ - ClusterUtils.find_om_scm_client_datanodes(container_list) - node_dict = { - "all": "--all", - "scm" : scm[0], - "om" : om[0], - "datanode": random.choice(datanodes) - }[flaky_node] - logger.info("flaky node: %s", node_dict) - - output = call(["blockade", "flaky", node_dict]) + def make_flaky(cls, flaky_node): + logger.info("flaky node: %s", flaky_node) + output = call(["blockade", "flaky", flaky_node]) assert output == 0, "flaky command failed with exit code=[%s]" % output @classmethod @@ -69,7 +58,7 @@ def blockade_create_partition(cls, *args): for node_list in args: nodes = nodes + ','.join(node_list) + " " exit_code, output = \ - ClusterUtils.run_cmd("blockade partition %s" % nodes) + util.run_cmd("blockade partition %s" % nodes) assert exit_code == 0, \ "blockade partition command failed with exit code=[%s]" % output @@ -95,4 +84,9 @@ def blockade_start(cls, node, all_nodes=False): else: output = call(["blockade", "start", node]) assert output == 0, "blockade start command failed with " \ - "exit code=[%s]" % output \ No newline at end of file + "exit code=[%s]" % output + + @classmethod + def blockade_add(cls, node): + output = call(["blockade", "add", node]) + assert output == 0, "blockade add command failed" \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/blockade/clusterUtils/__init__.py b/hadoop-ozone/dist/src/main/blockade/clusterUtils/__init__.py index ae1e83eeb3d49..13878a13a7f86 100644 --- a/hadoop-ozone/dist/src/main/blockade/clusterUtils/__init__.py +++ b/hadoop-ozone/dist/src/main/blockade/clusterUtils/__init__.py @@ -11,4 +11,4 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the License. +# limitations under the License. \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py b/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py index 3a04103d95ba8..cf67380255c37 100644 --- a/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py +++ b/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py @@ -17,6 +17,7 @@ from subprocess import call + import subprocess import logging import time @@ -292,9 +293,15 @@ def get_key(cls, docker_compose_file, bucket_name, volume_name, assert exit_code == 0, "Ozone get Key failed with output=[%s]" % output @classmethod - def find_checksum(cls, docker_compose_file, filepath): + def find_checksum(cls, docker_compose_file, filepath, client="ozone_client"): + """ + This function finds the checksum of a file present in a docker container. + Before running any 'putKey' operation, this function is called to store + the original checksum of the file. The file is then uploaded as a key. + """ command = "docker-compose -f %s " \ - "exec ozone_client md5sum %s" % (docker_compose_file, filepath) + "exec %s md5sum %s" % \ + (docker_compose_file, client, filepath) exit_code, output = cls.run_cmd(command) assert exit_code == 0, "Cant find checksum" myoutput = output.split("\n") diff --git a/hadoop-ozone/dist/src/main/blockade/ozone/__init__.py b/hadoop-ozone/dist/src/main/blockade/ozone/__init__.py new file mode 100644 index 0000000000000..13878a13a7f86 --- /dev/null +++ b/hadoop-ozone/dist/src/main/blockade/ozone/__init__.py @@ -0,0 +1,14 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/blockade/ozone/cluster.py b/hadoop-ozone/dist/src/main/blockade/ozone/cluster.py new file mode 100644 index 0000000000000..4347f86c0d371 --- /dev/null +++ b/hadoop-ozone/dist/src/main/blockade/ozone/cluster.py @@ -0,0 +1,295 @@ +#!/usr/bin/python + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import re +import subprocess +import yaml +import util +from subprocess import call +from blockadeUtils.blockade import Blockade + + +class Command(object): + docker = "docker" + blockade = "blockade" + docker_compose = "docker-compose" + ozone = "/opt/hadoop/bin/ozone" + freon = "/opt/hadoop/bin/ozone freon" + + +class Configuration: + """ + Configurations to be used while starting Ozone Cluster. + Here @property decorators is used to achieve getters, setters and delete + behaviour for 'datanode_count' attribute. + @datanode_count.setter will set the value for 'datanode_count' attribute. + @datanode_count.deleter will delete the current value of 'datanode_count' + attribute. + """ + + def __init__(self): + __parent_dir__ = os.path.dirname(os.path.dirname( + os.path.dirname(os.path.realpath(__file__)))) + self.docker_compose_file = os.path.join(__parent_dir__, + "compose", "ozoneblockade", + "docker-compose.yaml") + self._datanode_count = 3 + os.environ["DOCKER_COMPOSE_FILE"] = self.docker_compose_file + + @property + def datanode_count(self): + return self._datanode_count + + @datanode_count.setter + def datanode_count(self, datanode_count): + self._datanode_count = datanode_count + + @datanode_count.deleter + def datanode_count(self): + del self._datanode_count + + +class Cluster(object): + """ + This represents Ozone Cluster. + Here @property decorators is used to achieve getters, setters and delete + behaviour for 'om', 'scm', 'datanodes' and 'clients' attributes. + """ + + __logger__ = logging.getLogger(__name__) + + def __init__(self, conf): + self.conf = conf + self.docker_compose_file = conf.docker_compose_file + self._om = None + self._scm = None + self._datanodes = None + self._clients = None + self.scm_uuid = None + self.datanode_dir = None + + @property + def om(self): + return self._om + + @om.setter + def om(self, om): + self._om = om + + @om.deleter + def om(self): + del self._om + + @property + def scm(self): + return self._scm + + @scm.setter + def scm(self, scm): + self._scm = scm + + @scm.deleter + def scm(self): + del self._scm + + @property + def datanodes(self): + return self._datanodes + + @datanodes.setter + def datanodes(self, datanodes): + self._datanodes = datanodes + + @datanodes.deleter + def datanodes(self): + del self._datanodes + + @property + def clients(self): + return self._clients + + @clients.setter + def clients(self, clients): + self._clients = clients + + @clients.deleter + def clients(self): + del self._clients + + @classmethod + def create(cls, config=Configuration()): + return Cluster(config) + + def start(self): + """ + Start Ozone Cluster in docker containers. + """ + Cluster.__logger__.info("Starting Ozone Cluster") + Blockade.blockade_destroy() + call([Command.docker_compose, "-f", self.docker_compose_file, + "up", "-d", "--scale", + "datanode=" + str(self.conf.datanode_count)]) + Cluster.__logger__.info("Waiting 10s for cluster start up...") + # Remove the sleep and wait only till the cluster is out of safemode + # time.sleep(10) + output = subprocess.check_output([Command.docker_compose, "-f", + self.docker_compose_file, "ps"]) + node_list = [] + for out in output.split("\n")[2:-1]: + node = out.split(" ")[0] + node_list.append(node) + Blockade.blockade_add(node) + + Blockade.blockade_status() + self.om = filter(lambda x: 'om' in x, node_list)[0] + self.scm = filter(lambda x: 'scm' in x, node_list)[0] + self.datanodes = sorted(list(filter(lambda x: 'datanode' in x, node_list))) + self.clients = filter(lambda x: 'ozone_client' in x, node_list) + self.scm_uuid = self.__get_scm_uuid__() + self.datanode_dir = self.get_conf_value("hdds.datanode.dir") + + assert node_list, "no node found in the cluster!" + Cluster.__logger__.info("blockade created with nodes %s", + ' '.join(node_list)) + + def get_conf_value(self, key): + """ + Returns the value of given configuration key. + """ + command = [Command.ozone, "getconf -confKey " + key] + exit_code, output = self.__run_docker_command__(command, self.om) + return str(output).strip() + + def scale_datanode(self, datanode_count): + """ + Commission new datanodes to the running cluster. + """ + call([Command.docker_compose, "-f", self.docker_compose_file, + "up", "-d", "--scale", "datanode=" + datanode_count]) + + def partition_network(self, *args): + """ + Partition the network which is used by the cluster. + """ + Blockade.blockade_create_partition(*args) + + + def restore_network(self): + """ + Restores the network partition. + """ + Blockade.blockade_join() + + + def __get_scm_uuid__(self): + """ + Returns SCM's UUID. + """ + ozone_metadata_dir = self.get_conf_value("ozone.metadata.dirs") + command = "cat %s/scm/current/VERSION" % ozone_metadata_dir + exit_code, output = self.__run_docker_command__(command, self.scm) + output_list = output.split("\n") + key_value = [x for x in output_list if re.search(r"\w+=\w+", x)] + uuid = [token for token in key_value if 'scmUuid' in token] + return uuid.pop().split("=")[1].strip() + + def get_container_states(self, datanode): + """ + Returns the state of all the containers in the given datanode. + """ + container_parent_path = "%s/hdds/%s/current/containerDir0" % \ + (self.datanode_dir, self.scm_uuid) + command = "find %s -type f -name '*.container'" % container_parent_path + exit_code, output = self.__run_docker_command__(command, datanode) + container_state = {} + + container_list = map(str.strip, output.split("\n")) + for container_path in container_list: + # Reading the container file. + exit_code, output = self.__run_docker_command__( + "cat " + container_path, datanode) + if exit_code is not 0: + continue + data = output.split("\n") + # Reading key value pairs from container file. + key_value = [x for x in data if re.search(r"\w+:\s\w+", x)] + content = "\n".join(key_value) + content_yaml = yaml.load(content) + if content_yaml is None: + continue + for key, value in content_yaml.items(): + content_yaml[key] = str(value).lstrip() + # Stores the container state in a dictionary. + container_state[content_yaml['containerID']] = content_yaml['state'] + return container_state + + def run_freon(self, num_volumes, num_buckets, num_keys, key_size, + replication_type="RATIS", replication_factor="THREE", + run_on=None): + """ + Runs freon on the cluster. + """ + if run_on is None: + run_on = self.om + command = [Command.freon, + " rk", + " --numOfVolumes " + str(num_volumes), + " --numOfBuckets " + str(num_buckets), + " --numOfKeys " + str(num_keys), + " --keySize " + str(key_size), + " --replicationType " + replication_type, + " --factor " + replication_factor] + return self.__run_docker_command__(command, run_on) + + def __run_docker_command__(self, command, run_on): + if isinstance(command, list): + command = ' '.join(command) + command = [Command.docker, + "exec " + run_on, + command] + return util.run_cmd(command) + + def stop(self): + """ + Stops the Ozone Cluster. + """ + Cluster.__logger__.info("Stopping Ozone Cluster") + call([Command.docker_compose, "-f", self.docker_compose_file, "down"]) + Blockade.blockade_destroy() + + def container_state_predicate_all_closed(self, datanodes): + for datanode in datanodes: + container_states_dn = self.get_container_states(datanode) + if not container_states_dn \ + or container_states_dn.popitem()[1] != 'CLOSED': + return False + return True + + def container_state_predicate_one_closed(self, datanodes): + for datanode in datanodes: + container_states_dn = self.get_container_states(datanode) + if container_states_dn and container_states_dn.popitem()[1] == 'CLOSED': + return True + return False + + def container_state_predicate(self, datanode, state): + container_states_dn = self.get_container_states(datanode) + if container_states_dn and container_states_dn.popitem()[1] == state: + return True + return False \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/blockade/test_blockade_datanode_isolation.py b/hadoop-ozone/dist/src/main/blockade/test_blockade_datanode_isolation.py index 1e53a32a5fd45..dfa1b703ba814 100644 --- a/hadoop-ozone/dist/src/main/blockade/test_blockade_datanode_isolation.py +++ b/hadoop-ozone/dist/src/main/blockade/test_blockade_datanode_isolation.py @@ -16,132 +16,123 @@ # limitations under the License. import os -import time -import re import logging -from blockadeUtils.blockade import Blockade -from clusterUtils.cluster_utils import ClusterUtils +import util +from ozone.cluster import Cluster logger = logging.getLogger(__name__) -parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -FILE = os.path.join(parent_dir, "compose", "ozoneblockade", - "docker-compose.yaml") -os.environ["DOCKER_COMPOSE_FILE"] = FILE -SCALE = 3 -INCREASED_SCALE = 5 -CONTAINER_LIST = [] -OM = [] -SCM = [] -DATANODES = [] -def setup(): - global CONTAINER_LIST, OM, SCM, DATANODES - Blockade.blockade_destroy() - CONTAINER_LIST = ClusterUtils.cluster_setup(FILE, SCALE) - exit_code, output = Blockade.blockade_status() - assert exit_code == 0, "blockade status command failed with output=[%s]" % \ - output - OM, SCM, _, DATANODES = \ - ClusterUtils.find_om_scm_client_datanodes(CONTAINER_LIST) +def setup_function(function): + global cluster + cluster = Cluster.create() + cluster.start() - exit_code, output = ClusterUtils.run_freon(FILE, 1, 1, 1, 10240, "RATIS", - "THREE") - assert exit_code == 0, "freon run failed with output=[%s]" % output +def teardown_function(function): + cluster.stop() -def teardown(): - logger.info("Inside teardown") - Blockade.blockade_destroy() +def test_isolate_single_datanode(): + """ + In this test case we will create a network partition in such a way that + one of the datanode will not be able to communicate with other datanodes + but it will be able to communicate with SCM. -def teardown_module(): - ClusterUtils.cluster_destroy(FILE) + Once the network partition happens, SCM detects it and closes the pipeline, + which in-turn closes the containers. + The container on the first two datanode will get CLOSED as they have quorum. + The container replica on the third node will be QUASI_CLOSED as it is not + able to connect with the other datanodes and it doesn't have latest BCSID. + + Once we restore the network, the stale replica on the third datanode will be + deleted and a latest replica will be copied from any one of the other + datanodes. -def test_isolatedatanode_singlenode(run_second_phase): - """ - In this test, one of the datanodes (first datanode) cannot communicate - with other two datanodes. - All datanodes can communicate with SCM. - Expectation : - The container replica state in first datanode should be quasi-closed. - The container replica state in other datanodes should be closed. """ - first_set = [OM[0], SCM[0], DATANODES[0]] - second_set = [OM[0], SCM[0], DATANODES[1], DATANODES[2]] - Blockade.blockade_create_partition(first_set, second_set) - Blockade.blockade_status() - ClusterUtils.run_freon(FILE, 1, 1, 1, 10240, "RATIS", "THREE") - logger.info("Waiting for %s seconds before checking container status", - os.environ["CONTAINER_STATUS_SLEEP"]) - time.sleep(int(os.environ["CONTAINER_STATUS_SLEEP"])) - all_datanodes_container_status = \ - ClusterUtils.findall_container_status(FILE, SCALE) - first_datanode_status = all_datanodes_container_status[0] - closed_container_datanodes = [x for x in all_datanodes_container_status - if x == 'CLOSED'] - assert first_datanode_status == 'QUASI_CLOSED' - assert len(closed_container_datanodes) == 2, \ - "The container should have two closed replicas." - - if str(run_second_phase).lower() == "true": - ClusterUtils.cluster_setup(FILE, INCREASED_SCALE, False) - Blockade.blockade_status() - logger.info("Waiting for %s seconds before checking container status", - os.environ["CONTAINER_STATUS_SLEEP"]) - time.sleep(int(os.environ["CONTAINER_STATUS_SLEEP"])) - all_datanodes_container_status = \ - ClusterUtils.findall_container_status(FILE, INCREASED_SCALE) - closed_container_datanodes = [x for x in all_datanodes_container_status - if x == 'CLOSED'] - assert len(closed_container_datanodes) >= 3, \ - "The container should have at least three closed replicas." - Blockade.blockade_join() - Blockade.blockade_status() - _, output = \ - ClusterUtils.run_freon(FILE, 1, 1, 1, 10240, "RATIS", "THREE") - assert re.search("Status: Success", output) is not None - - -def test_datanode_isolation_all(run_second_phase): + cluster.run_freon(1, 1, 1, 10240) + first_set = [cluster.om, cluster.scm, + cluster.datanodes[0], cluster.datanodes[1]] + second_set = [cluster.om, cluster.scm, cluster.datanodes[2]] + logger.info("Partitioning the network") + cluster.partition_network(first_set, second_set) + cluster.run_freon(1, 1, 1, 10240) + logger.info("Waiting for container to be QUASI_CLOSED") + + util.wait_until(lambda: cluster.get_container_states(cluster.datanodes[2]) + .popitem()[1] == 'QUASI_CLOSED', + int(os.environ["CONTAINER_STATUS_SLEEP"]), 10) + container_states_dn_0 = cluster.get_container_states(cluster.datanodes[0]) + container_states_dn_1 = cluster.get_container_states(cluster.datanodes[1]) + container_states_dn_2 = cluster.get_container_states(cluster.datanodes[2]) + assert len(container_states_dn_0) != 0 + assert len(container_states_dn_1) != 0 + assert len(container_states_dn_2) != 0 + for key in container_states_dn_0: + assert container_states_dn_0.get(key) == 'CLOSED' + for key in container_states_dn_1: + assert container_states_dn_1.get(key) == 'CLOSED' + for key in container_states_dn_2: + assert container_states_dn_2.get(key) == 'QUASI_CLOSED' + + # Since the replica in datanode[2] doesn't have the latest BCSID, + # ReplicationManager will delete it and copy a closed replica. + # We will now restore the network and datanode[2] should get a + # closed replica of the container + logger.info("Restoring the network") + cluster.restore_network() + + logger.info("Waiting for the replica to be CLOSED") + util.wait_until( + lambda: cluster.container_state_predicate(cluster.datanodes[2], 'CLOSED'), + int(os.environ["CONTAINER_STATUS_SLEEP"]), 10) + container_states_dn_2 = cluster.get_container_states(cluster.datanodes[2]) + assert len(container_states_dn_2) != 0 + for key in container_states_dn_2: + assert container_states_dn_2.get(key) == 'CLOSED' + + +def test_datanode_isolation_all(): """ - In this test, none of the datanodes can communicate with other two - datanodes. - All datanodes can communicate with SCM. - Expectation : The container should eventually have at least two closed - replicas. + In this test case we will create a network partition in such a way that + all datanodes cannot communicate with each other. + All datanodes will be able to communicate with SCM. + + Once the network partition happens, SCM detects it and closes the pipeline, + which in-turn tries to close the containers. + At least one of the replica should be in closed state + + Once we restore the network, there will be three closed replicas. + """ - first_set = [OM[0], SCM[0], DATANODES[0]] - second_set = [OM[0], SCM[0], DATANODES[1]] - third_set = [OM[0], SCM[0], DATANODES[2]] - Blockade.blockade_create_partition(first_set, second_set, third_set) - Blockade.blockade_status() - ClusterUtils.run_freon(FILE, 1, 1, 1, 10240, "RATIS", "THREE") - logger.info("Waiting for %s seconds before checking container status", - os.environ["CONTAINER_STATUS_SLEEP"]) - time.sleep(int(os.environ["CONTAINER_STATUS_SLEEP"])) - all_datanodes_container_status = \ - ClusterUtils.findall_container_status(FILE, SCALE) - closed_container_datanodes = [x for x in all_datanodes_container_status - if x == 'CLOSED'] - assert len(closed_container_datanodes) >= 2, \ - "The container should have at least two closed replicas." - - if str(run_second_phase).lower() == "true": - ClusterUtils.cluster_setup(FILE, INCREASED_SCALE, False) - Blockade.blockade_status() - logger.info("Waiting for %s seconds before checking container status", - os.environ["CONTAINER_STATUS_SLEEP"]) - time.sleep(int(os.environ["CONTAINER_STATUS_SLEEP"])) - all_datanodes_container_status = \ - ClusterUtils.findall_container_status(FILE, INCREASED_SCALE) - closed_container_datanodes = [x for x in all_datanodes_container_status - if x == 'CLOSED'] - assert len(closed_container_datanodes) >= 3, \ - "The container should have at least three closed replicas." - Blockade.blockade_join() - Blockade.blockade_status() - _, output = \ - ClusterUtils.run_freon(FILE, 1, 1, 1, 10240, "RATIS", "THREE") - assert re.search("Status: Success", output) is not None \ No newline at end of file + cluster.run_freon(1, 1, 1, 10240) + + assert len(cluster.get_container_states(cluster.datanodes[0])) != 0 + assert len(cluster.get_container_states(cluster.datanodes[1])) != 0 + assert len(cluster.get_container_states(cluster.datanodes[2])) != 0 + + logger.info("Partitioning the network") + first_set = [cluster.om, cluster.scm, cluster.datanodes[0]] + second_set = [cluster.om, cluster.scm, cluster.datanodes[1]] + third_set = [cluster.om, cluster.scm, cluster.datanodes[2]] + cluster.partition_network(first_set, second_set, third_set) + + logger.info("Waiting for the replica to be CLOSED") + util.wait_until( + lambda: cluster.container_state_predicate_one_closed(cluster.datanodes), + int(os.environ["CONTAINER_STATUS_SLEEP"]), 10) + + # At least one of the replica should be in closed state + assert cluster.container_state_predicate_one_closed(cluster.datanodes) + + # After restoring the network all the replicas should be in + # CLOSED state + logger.info("Restoring the network") + cluster.restore_network() + + logger.info("Waiting for the container to be replicated") + util.wait_until( + lambda: cluster.container_state_predicate_all_closed(cluster.datanodes), + int(os.environ["CONTAINER_STATUS_SLEEP"]), 10) + assert cluster.container_state_predicate_all_closed(cluster.datanodes) \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/blockade/test_blockade_flaky.py b/hadoop-ozone/dist/src/main/blockade/test_blockade_flaky.py index 312960027dbf9..a79bd4fcc2afc 100644 --- a/hadoop-ozone/dist/src/main/blockade/test_blockade_flaky.py +++ b/hadoop-ozone/dist/src/main/blockade/test_blockade_flaky.py @@ -16,11 +16,11 @@ # limitations under the License. import os -import time import logging +import random import pytest from blockadeUtils.blockade import Blockade -from clusterUtils.cluster_utils import ClusterUtils +from ozone.cluster import Cluster logger = logging.getLogger(__name__) @@ -32,30 +32,36 @@ CONTAINER_LIST = [] -def setup_module(): - global CONTAINER_LIST - Blockade.blockade_destroy() - CONTAINER_LIST = ClusterUtils.cluster_setup(FILE, SCALE) - exit_code, output = Blockade.blockade_status() - assert exit_code == 0, "blockade status command failed with output=[%s]" % \ - output +def setup_function(function): + global cluster + cluster = Cluster.create() + cluster.start() -def teardown_module(): - Blockade.blockade_destroy() - ClusterUtils.cluster_destroy(FILE) +def teardown_function(function): + cluster.stop() -def teardown(): - logger.info("Inside teardown") - Blockade.blockade_fast_all() - time.sleep(5) +@pytest.mark.parametrize("flaky_node", ["datanode", "scm", "om", "all"]) +def test_flaky(flaky_node): + """ + In these tests, we make the network of the nodes as flaky using blockade. + There are 4 tests : + 1) one of the datanodes selected randomly and network of the datanode is + made flaky. + 2) scm network is made flaky. + 3) om network is made flaky. + 4) Network of all the nodes are made flaky. + """ + flaky_container_name = { + "scm": cluster.scm, + "om": cluster.om, + "datanode": random.choice(cluster.datanodes), + "all": "--all" + }[flaky_node] -@pytest.mark.parametrize("flaky_nodes", ["datanode", "scm", "om", "all"]) -def test_flaky(flaky_nodes): - Blockade.make_flaky(flaky_nodes, CONTAINER_LIST) + Blockade.make_flaky(flaky_container_name) Blockade.blockade_status() - exit_code, output = ClusterUtils.run_freon(FILE, 1, 1, 1, 10240, "RATIS", - "THREE") + exit_code, output = cluster.run_freon(1, 1, 1, 10240) assert exit_code == 0, "freon run failed with output=[%s]" % output \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/blockade/util.py b/hadoop-ozone/dist/src/main/blockade/util.py new file mode 100644 index 0000000000000..84f7fdaca6891 --- /dev/null +++ b/hadoop-ozone/dist/src/main/blockade/util.py @@ -0,0 +1,52 @@ +#!/usr/bin/python + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import re +import logging +import subprocess + +logger = logging.getLogger(__name__) + +def wait_until(predicate, timeout, check_frequency=1): + deadline = time.time() + timeout + while time.time() < deadline: + if predicate(): + return + time.sleep(check_frequency) + + +def run_cmd(cmd): + command = cmd + if isinstance(cmd, list): + command = ' '.join(cmd) + logger.info(" RUNNING: %s", command) + all_output = "" + my_process = subprocess.Popen(command, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, shell=True) + while my_process.poll() is None: + op = my_process.stdout.readline() + if op: + all_output += op + logger.info(op) + other_output = my_process.communicate() + other_output = other_output[0].strip() + if other_output != "": + all_output += other_output + reg = re.compile(r"(\r\n|\n)$") + all_output = reg.sub("", all_output, 1) + return my_process.returncode, all_output diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config index dae9ddb44c6b6..f5e6a9225367c 100644 --- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config @@ -26,6 +26,12 @@ OZONE-SITE.XML_ozone.scm.client.address=scm OZONE-SITE.XML_ozone.scm.dead.node.interval=5m OZONE-SITE.XML_ozone.replication=1 OZONE-SITE.XML_hdds.datanode.dir=/data/hdds +OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 +OZONE-SITE.XML_ozone.scm.pipeline.destroy.timeout=15s +OZONE-SITE.XML_hdds.heartbeat.interval=2s +OZONE-SITE.XML_hdds.scm.replication.thread.interval=5s +OZONE-SITE.XML_hdds.scm.replication.event.timeout=7s +OZONE-SITE.XML_dfs.ratis.server.failure.duration=25s HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout From 0b907bc0e85df899904e2aa3cd9b05758c30cdac Mon Sep 17 00:00:00 2001 From: dineshchitlangia Date: Thu, 30 May 2019 11:39:19 -0400 Subject: [PATCH 0072/1308] HDDS-1581. Atleast one of the metadata dir config property must be tagged as REQUIRED. --- .../src/main/resources/ozone-default.xml | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 9b941a00cc5dc..d6379795c79fa 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -642,17 +642,18 @@ exist then the OM will attempt to create it. If undefined, then the OM will log a warning and fallback to - ozone.metadata.dirs. + ozone.metadata.dirs. This fallback approach is not recommended for + production environments. ozone.metadata.dirs - OZONE, OM, SCM, CONTAINER, STORAGE + OZONE, OM, SCM, CONTAINER, STORAGE, REQUIRED This setting is the fallback location for SCM, OM and DataNodes - to store their metadata. This setting may be used in test/PoC clusters - to simplify configuration. + to store their metadata. This setting may be used only in test/PoC + clusters to simplify configuration. For production clusters or any time you care about performance, it is recommended that ozone.om.db.dirs, ozone.scm.db.dirs and @@ -694,7 +695,8 @@ does not exist then the SCM will attempt to create it. If undefined, then the SCM will log a warning and fallback to - ozone.metadata.dirs. + ozone.metadata.dirs. This fallback approach is not recommended for + production environments. @@ -2275,8 +2277,10 @@ Directory where the Recon Server stores its metadata. This should be specified as a single directory. If the directory does not exist then the Recon will attempt to create it. + If undefined, then the Recon will log a warning and fallback to - ozone.metadata.dirs. + ozone.metadata.dirs. This fallback approach is not recommended for + production environments. @@ -2306,8 +2310,10 @@ Directory where the Recon Server stores its OM snapshot DB. This should be specified as a single directory. If the directory does not exist then the Recon will attempt to create it. + If undefined, then the Recon will log a warning and fallback to - ozone.metadata.dirs. + ozone.metadata.dirs. This fallback approach is not recommended for + production environments. From 33419a980a8fdbab85991d130c463ab2dd79c40e Mon Sep 17 00:00:00 2001 From: avijayanhwx <14299376+avijayanhwx@users.noreply.github.com> Date: Thu, 30 May 2019 08:48:14 -0700 Subject: [PATCH 0073/1308] HDDS-1568 : Add RocksDB metrics to OM. Contributed by Aravindan Vijayan --- .../org/apache/hadoop/utils/RocksDBStore.java | 3 +- .../hadoop/utils/RocksDBStoreMBean.java | 79 ++++++++- .../hadoop/utils/db/DBStoreBuilder.java | 17 +- .../org/apache/hadoop/utils/db/RDBStore.java | 3 +- .../hadoop/utils/TestRocksDBStoreMBean.java | 164 ++++++++++++++++-- .../hdds/server/PrometheusMetricsSink.java | 9 + .../server/TestPrometheusMetricsSink.java | 5 + 7 files changed, 262 insertions(+), 18 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java index 0ca99b68d394f..2038d840dd473 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java @@ -75,7 +75,8 @@ public RocksDBStore(File dbFile, Options options) jmxProperties.put("dbName", dbFile.getName()); statMBeanName = HddsUtils.registerWithJmxProperties( "Ozone", "RocksDbStore", jmxProperties, - new RocksDBStoreMBean(dbOptions.statistics())); + RocksDBStoreMBean.create(dbOptions.statistics(), + dbFile.getName())); if (statMBeanName == null) { LOG.warn("jmx registration failed during RocksDB init, db path :{}", dbFile.getAbsolutePath()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java index 88c093e62bfc7..62b9b2166adfd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java @@ -18,10 +18,18 @@ package org.apache.hadoop.utils; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.Interns; import org.rocksdb.HistogramData; import org.rocksdb.HistogramType; import org.rocksdb.Statistics; import org.rocksdb.TickerType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import javax.management.Attribute; import javax.management.AttributeList; @@ -41,13 +49,21 @@ /** * Adapter JMX bean to publish all the Rocksdb metrics. */ -public class RocksDBStoreMBean implements DynamicMBean { +public class RocksDBStoreMBean implements DynamicMBean, MetricsSource { private Statistics statistics; private Set histogramAttributes = new HashSet<>(); - public RocksDBStoreMBean(Statistics statistics) { + private String contextName; + + private static final Logger LOG = + LoggerFactory.getLogger(RocksDBStoreMBean.class); + + public final static String ROCKSDB_CONTEXT_PREFIX = "Rocksdb_"; + + public RocksDBStoreMBean(Statistics statistics, String dbName) { + this.contextName = ROCKSDB_CONTEXT_PREFIX + dbName; this.statistics = statistics; histogramAttributes.add("Average"); histogramAttributes.add("Median"); @@ -56,6 +72,22 @@ public RocksDBStoreMBean(Statistics statistics) { histogramAttributes.add("StandardDeviation"); } + public static RocksDBStoreMBean create(Statistics statistics, + String contextName) { + + RocksDBStoreMBean rocksDBStoreMBean = new RocksDBStoreMBean( + statistics, contextName); + MetricsSystem ms = DefaultMetricsSystem.instance(); + MetricsSource metricsSource = ms.getSource(rocksDBStoreMBean.contextName); + if (metricsSource != null) { + return (RocksDBStoreMBean)metricsSource; + } else { + return ms.register(rocksDBStoreMBean.contextName, + "RocksDB Metrics", + rocksDBStoreMBean); + } + } + @Override public Object getAttribute(String attribute) throws AttributeNotFoundException, MBeanException, ReflectionException { @@ -141,4 +173,47 @@ public MBeanInfo getMBeanInfo() { attributes.toArray(new MBeanAttributeInfo[0]), null, null, null); } + + @Override + public void getMetrics(MetricsCollector metricsCollector, boolean b) { + MetricsRecordBuilder rb = metricsCollector.addRecord(contextName); + getHistogramData(rb); + getTickerTypeData(rb); + } + + /** + * Collect all histogram metrics from RocksDB statistics. + * @param rb Metrics Record Builder. + */ + private void getHistogramData(MetricsRecordBuilder rb) { + for (HistogramType histogramType : HistogramType.values()) { + HistogramData histogram = + statistics.getHistogramData( + HistogramType.valueOf(histogramType.name())); + for (String histogramAttribute : histogramAttributes) { + try { + Method method = + HistogramData.class.getMethod("get" + histogramAttribute); + double metricValue = (double) method.invoke(histogram); + rb.addGauge(Interns.info(histogramType.name() + "_" + + histogramAttribute.toUpperCase(), "RocksDBStat"), + metricValue); + } catch (Exception e) { + LOG.error("Error reading histogram data {} ", e); + } + } + } + } + + /** + * Collect all Counter metrics from RocksDB statistics. + * @param rb Metrics Record Builder. + */ + private void getTickerTypeData(MetricsRecordBuilder rb) { + for (TickerType tickerType : TickerType.values()) { + rb.addCounter(Interns.info(tickerType.name(), "RocksDBStat"), + statistics.getTickerCount(tickerType)); + } + } + } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java index 3459b2032edea..fe5787cc7c32d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java @@ -28,6 +28,8 @@ import org.rocksdb.ColumnFamilyOptions; import org.rocksdb.DBOptions; import org.rocksdb.RocksDB; +import org.rocksdb.Statistics; +import org.rocksdb.StatsLevel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,6 +44,9 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_DB_PROFILE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF; /** * DBStore Builder. @@ -57,12 +62,16 @@ public final class DBStoreBuilder { private List tableNames; private Configuration configuration; private CodecRegistry registry; + private String rocksDbStat; private DBStoreBuilder(Configuration configuration) { tables = new HashSet<>(); tableNames = new LinkedList<>(); this.configuration = configuration; this.registry = new CodecRegistry(); + this.rocksDbStat = configuration.getTrimmed( + OZONE_METADATA_STORE_ROCKSDB_STATISTICS, + OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT); } public static DBStoreBuilder newBuilder(Configuration configuration) { @@ -187,7 +196,13 @@ private DBOptions getDbProfile() { if (option == null) { LOG.info("Using default options. {}", dbProfile.toString()); - return dbProfile.getDBOptions(); + option = dbProfile.getDBOptions(); + } + + if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { + Statistics statistics = new Statistics(); + statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); + option = option.setStatistics(statistics); } return option; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java index d293c1d215dc6..27862c7847653 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java @@ -108,7 +108,8 @@ public RDBStore(File dbFile, DBOptions options, Set families, jmxProperties.put("dbName", dbFile.getName()); statMBeanName = HddsUtils.registerWithJmxProperties( "Ozone", "RocksDbStore", jmxProperties, - new RocksDBStoreMBean(dbOptions.statistics())); + RocksDBStoreMBean.create(dbOptions.statistics(), + dbFile.getName())); if (statMBeanName == null) { LOG.warn("jmx registration failed during RocksDB init, db path :{}", dbFile.getAbsolutePath()); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java index ccf19b0e1656d..7aef559ad4c1d 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java @@ -19,6 +19,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.metrics2.AbstractMetric; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.MetricsTag; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; @@ -27,9 +35,14 @@ import javax.management.MBeanServer; import java.io.File; +import java.io.IOException; import java.lang.management.ManagementFactory; +import java.util.HashMap; +import java.util.Map; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; /** * Test the JMX interface for the rocksdb metastore implementation. @@ -49,18 +62,8 @@ public void init() throws Exception { @Test public void testJmxBeans() throws Exception { - File testDir = - GenericTestUtils.getTestDir(getClass().getSimpleName() + "-withstat"); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS, "ALL"); - - RocksDBStore metadataStore = - (RocksDBStore) MetadataStoreBuilder.newBuilder().setConf(conf) - .setCreateIfMissing(true).setDbFile(testDir).build(); - - for (int i = 0; i < 10; i++) { - metadataStore.put("key".getBytes(UTF_8), "value".getBytes(UTF_8)); - } + RocksDBStore metadataStore = getTestRocksDBStoreWithData(); MBeanServer platformMBeanServer = ManagementFactory.getPlatformMBeanServer(); @@ -69,11 +72,11 @@ public void testJmxBeans() throws Exception { Object keysWritten = platformMBeanServer .getAttribute(metadataStore.getStatMBeanName(), "NUMBER_KEYS_WRITTEN"); - Assert.assertEquals(10L, keysWritten); + assertEquals(10L, keysWritten); Object dbWriteAverage = platformMBeanServer .getAttribute(metadataStore.getStatMBeanName(), "DB_WRITE_AVERAGE"); - Assert.assertTrue((double) dbWriteAverage > 0); + assertTrue((double) dbWriteAverage > 0); metadataStore.close(); @@ -93,4 +96,139 @@ public void testDisabledStat() throws Exception { Assert.assertNull(metadataStore.getStatMBeanName()); } + + @Test + public void testMetricsSystemIntegration() throws Exception { + + RocksDBStore metadataStore = getTestRocksDBStoreWithData(); + Thread.sleep(2000); + + MetricsSystem ms = DefaultMetricsSystem.instance(); + MetricsSource rdbSource = + ms.getSource("Rocksdb_TestRocksDBStoreMBean-withstat"); + + BufferedMetricsCollector metricsCollector = new BufferedMetricsCollector(); + rdbSource.getMetrics(metricsCollector, true); + + Map metrics = metricsCollector.getMetricsRecordBuilder() + .getMetrics(); + assertTrue(10.0 == metrics.get("NUMBER_KEYS_WRITTEN")); + assertTrue(metrics.get("DB_WRITE_AVERAGE") > 0); + metadataStore.close(); + } + + private RocksDBStore getTestRocksDBStoreWithData() throws IOException { + File testDir = + GenericTestUtils.getTestDir(getClass().getSimpleName() + "-withstat"); + + conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS, "ALL"); + + RocksDBStore metadataStore = + (RocksDBStore) MetadataStoreBuilder.newBuilder().setConf(conf) + .setCreateIfMissing(true).setDbFile(testDir).build(); + + for (int i = 0; i < 10; i++) { + metadataStore.put("key".getBytes(UTF_8), "value".getBytes(UTF_8)); + } + + return metadataStore; + } +} + +/** + * Test class to buffer a single MetricsRecordBuilder instance. + */ +class BufferedMetricsCollector implements MetricsCollector { + + private BufferedMetricsRecordBuilderImpl metricsRecordBuilder; + + BufferedMetricsCollector() { + metricsRecordBuilder = new BufferedMetricsRecordBuilderImpl(); + } + + public BufferedMetricsRecordBuilderImpl getMetricsRecordBuilder() { + return metricsRecordBuilder; + } + + @Override + public MetricsRecordBuilder addRecord(String s) { + metricsRecordBuilder.setContext(s); + return metricsRecordBuilder; + } + + @Override + public MetricsRecordBuilder addRecord(MetricsInfo metricsInfo) { + return metricsRecordBuilder; + } + + /** + * Test class to buffer a single snapshot of metrics. + */ + class BufferedMetricsRecordBuilderImpl extends MetricsRecordBuilder { + + private Map metrics = new HashMap<>(); + private String contextName; + + public Map getMetrics() { + return metrics; + } + + @Override + public MetricsRecordBuilder tag(MetricsInfo metricsInfo, String s) { + return null; + } + + @Override + public MetricsRecordBuilder add(MetricsTag metricsTag) { + return null; + } + + @Override + public MetricsRecordBuilder add(AbstractMetric abstractMetric) { + return null; + } + + @Override + public MetricsRecordBuilder setContext(String s) { + this.contextName = s; + return this; + } + + @Override + public MetricsRecordBuilder addCounter(MetricsInfo metricsInfo, int i) { + return null; + } + + @Override + public MetricsRecordBuilder addCounter(MetricsInfo metricsInfo, long l) { + metrics.put(metricsInfo.name(), (double)l); + return this; + } + + @Override + public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, int i) { + return null; + } + + @Override + public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, long l) { + return null; + } + + @Override + public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, float v) { + return null; + } + + @Override + public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, double v) { + metrics.put(metricsInfo.name(), v); + return this; + } + + @Override + public MetricsCollector parent() { + return null; + } + } } \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java index 5a3dd48d1ddfc..52532f1723928 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdds.server; +import static org.apache.hadoop.utils.RocksDBStoreMBean.ROCKSDB_CONTEXT_PREFIX; + import java.io.IOException; import java.io.Writer; import java.util.HashMap; @@ -24,6 +26,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricType; import org.apache.hadoop.metrics2.MetricsRecord; @@ -90,6 +93,12 @@ public void putMetrics(MetricsRecord metricsRecord) { */ public String prometheusName(String recordName, String metricName) { + + //RocksDB metric names already have underscores as delimiters. + if (StringUtils.isNotEmpty(recordName) && + recordName.startsWith(ROCKSDB_CONTEXT_PREFIX)) { + return recordName.toLowerCase() + "_" + metricName.toLowerCase(); + } String baseName = upperFirst(recordName) + upperFirst(metricName); Matcher m = UPPER_CASE_SEQ.matcher(baseName); StringBuffer sb = new StringBuffer(); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java index 9ec8bcde1564b..0a8eb676d8351 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java @@ -82,6 +82,11 @@ public void testNaming() throws IOException { Assert.assertEquals("rpc_time_small", sink.prometheusName("RpcTime", "small")); + + //RocksDB metrics are handled differently. + + Assert.assertEquals("rocksdb_om.db_num_open_connections", + sink.prometheusName("Rocksdb_om.db", "num_open_connections")); } /** From baee71551d5a9c39760631de463684d810fa96fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Thu, 30 May 2019 18:20:16 +0200 Subject: [PATCH 0074/1308] HDDS-1597. Remove hdds-server-scm dependency from ozone-common. (#860) --- .../hdds/scm/exceptions/SCMException.java | 0 .../hdds/scm/exceptions/package-info.java | 0 .../hadoop/hdds/server/ServerUtils.java | 21 +++++++++++++++++++ .../org/apache/hadoop/hdds/scm/ScmUtils.java | 21 ++----------------- hadoop-ozone/common/pom.xml | 4 ---- .../java/org/apache/hadoop/ozone/OmUtils.java | 4 ++-- hadoop-ozone/integration-test/pom.xml | 9 ++++++++ .../hadoop/ozone/om/TestKeyManagerImpl.java | 0 hadoop-ozone/pom.xml | 5 +++++ hadoop-ozone/tools/pom.xml | 5 +++++ 10 files changed, 44 insertions(+), 25 deletions(-) rename hadoop-hdds/{server-scm => common}/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java (100%) rename hadoop-hdds/{server-scm => common}/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java (100%) rename hadoop-ozone/{ozone-manager => integration-test}/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java (100%) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java similarity index 100% rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java similarity index 100% rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java index f775ca104b326..33a1ca9558b04 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java @@ -203,4 +203,25 @@ public static void setOzoneMetaDirPath(OzoneConfiguration conf, conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, path); } + /** + * Returns with the service specific metadata directory. + *

    + * If the directory is missing the method tries to create it. + * + * @param conf The ozone configuration object + * @param key The configuration key which specify the directory. + * @return The path of the directory. + */ + public static File getDBPath(Configuration conf, String key) { + final File dbDirPath = + getDirectoryFromConfig(conf, key, "OM"); + if (dbDirPath != null) { + return dbDirPath; + } + + LOG.warn("{} is not configured. We recommend adding this setting. " + + "Falling back to {} instead.", key, + HddsConfigKeys.OZONE_METADATA_DIRS); + return ServerUtils.getOzoneMetaDirPath(conf); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java index 37702532ac760..426341a32f40d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java @@ -18,18 +18,13 @@ package org.apache.hadoop.hdds.scm; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; -import org.apache.hadoop.hdds.scm.safemode.Precheck; import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.hdds.scm.safemode.Precheck; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; - /** * SCM utility class. */ @@ -53,16 +48,4 @@ public static void preCheck(ScmOps operation, Precheck... preChecks) } } - public static File getDBPath(Configuration conf, String dbDirectory) { - final File dbDirPath = - ServerUtils.getDirectoryFromConfig(conf, dbDirectory, "OM"); - if (dbDirPath != null) { - return dbDirPath; - } - - LOG.warn("{} is not configured. We recommend adding this setting. " - + "Falling back to {} instead.", dbDirectory, - HddsConfigKeys.OZONE_METADATA_DIRS); - return ServerUtils.getOzoneMetaDirPath(conf); - } } diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index 06973b397533a..050022c949546 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -60,10 +60,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds-server-framework - - org.apache.hadoop - hadoop-hdds-server-scm - org.apache.hadoop hadoop-hdds-container-service diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 5cd51421cb72d..f4c33d3a7eaf2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -39,7 +39,7 @@ import org.apache.commons.compress.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.ScmUtils; +import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -164,7 +164,7 @@ public static int getOmRestPort(Configuration conf) { * @return File path, after creating all the required Directories. */ public static File getOmDbDir(Configuration conf) { - return ScmUtils.getDBPath(conf, OMConfigKeys.OZONE_OM_DB_DIRS); + return ServerUtils.getDBPath(conf, OMConfigKeys.OZONE_OM_DB_DIRS); } /** diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index 26387f83617d9..821a2c43e0a90 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -34,6 +34,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone-common + + org.apache.hadoop + hadoop-hdds-server-scm + org.apache.hadoop hadoop-ozone-ozone-manager @@ -60,6 +64,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone-client + + commons-lang + commons-lang + test + org.apache.hadoop hadoop-ozone-ozone-manager diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java similarity index 100% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 614c6d95ebf55..52fd608b1deb0 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -209,6 +209,11 @@ bcprov-jdk15on ${bouncycastle.version} + + commons-lang + commons-lang + 2.6 + diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index f8ed807990636..7ce6f6b2a53b5 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -37,6 +37,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone-common + + + org.apache.hadoop + hadoop-hdds-server-scm + org.apache.hadoop hadoop-ozone-client From 30c6dd92e1d4075d143adc891dc8ec536dddc0d9 Mon Sep 17 00:00:00 2001 From: Sunil G Date: Thu, 30 May 2019 22:32:36 +0530 Subject: [PATCH 0075/1308] YARN-9452. Fix TestDistributedShell and TestTimelineAuthFilterForV2 failures. Contributed by Prabhu Joseph. --- .../distributedshell/ApplicationMaster.java | 17 +- .../applications/distributedshell/Client.java | 2 + .../timelineservice/NMTimelinePublisher.java | 12 -- .../security/TestTimelineAuthFilterForV2.java | 177 ++++++++++-------- 4 files changed, 107 insertions(+), 101 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index c30dc4dc01c7c..bb300db26d25a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -226,6 +226,8 @@ public enum DSEntity { @VisibleForTesting UserGroupInformation appSubmitterUgi; + private Path homeDirectory; + // Handle to communicate with the Node Manager private NMClientAsync nmClientAsync; // Listen to process the response from the Node Manager @@ -513,6 +515,7 @@ public boolean init(String[] args) throws ParseException, IOException { + "retrieved by" + " the new application attempt "); opts.addOption("localized_files", true, "List of localized files"); + opts.addOption("homedir", true, "Home Directory of Job Owner"); opts.addOption("help", false, "Print usage"); CommandLine cliParser = new GnuParser().parse(opts, args); @@ -544,6 +547,11 @@ public boolean init(String[] args) throws ParseException, IOException { dumpOutDebugInfo(); } + homeDirectory = cliParser.hasOption("homedir") ? + new Path(cliParser.getOptionValue("homedir")) : + new Path("/user/" + System.getenv(ApplicationConstants. + Environment.USER.name())); + if (cliParser.hasOption("placement_spec")) { String placementSpec = cliParser.getOptionValue("placement_spec"); String decodedSpec = getDecodedPlacementSpec(placementSpec); @@ -779,7 +787,7 @@ private void cleanup() { @Override public Void run() throws IOException { FileSystem fs = FileSystem.get(conf); - Path dst = new Path(getAppSubmitterHomeDir(), + Path dst = new Path(homeDirectory, getRelativePath(appName, appId.toString(), "")); fs.delete(dst, true); return null; @@ -790,11 +798,6 @@ public Void run() throws IOException { } } - private Path getAppSubmitterHomeDir() { - return new Path("/user/" + - System.getenv(ApplicationConstants.Environment.USER.name())); - } - /** * Main run function for the application master * @@ -1495,7 +1498,7 @@ public void run() { String relativePath = getRelativePath(appName, appId.toString(), fileName); Path dst = - new Path(getAppSubmitterHomeDir(), relativePath); + new Path(homeDirectory, relativePath); FileStatus fileStatus = fs.getFileStatus(dst); LocalResource localRes = LocalResource.newInstance( URL.fromURI(dst.toUri()), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index 08c6b83797f0d..4bd57dd27f6cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -986,6 +986,8 @@ public boolean run() throws IOException, YarnException { } vargs.add("--appname " + appName); + vargs.add("--homedir " + fs.getHomeDirectory()); + vargs.addAll(containerRetryOptions); vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java index ba574952f7b38..5a4de1f4b6a38 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java @@ -273,11 +273,7 @@ private void publishContainerResumedEvent( TimelineEvent tEvent = new TimelineEvent(); tEvent.setId(ContainerMetricsConstants.RESUMED_EVENT_TYPE); tEvent.setTimestamp(event.getTimestamp()); - - long containerStartTime = container.getContainerStartTime(); entity.addEvent(tEvent); - entity - .setIdPrefix(TimelineServiceHelper.invertLong(containerStartTime)); dispatcher.getEventHandler().handle(new TimelinePublishEvent(entity, containerId.getApplicationAttemptId().getApplicationId())); } @@ -302,11 +298,7 @@ private void publishContainerPausedEvent( TimelineEvent tEvent = new TimelineEvent(); tEvent.setId(ContainerMetricsConstants.PAUSED_EVENT_TYPE); tEvent.setTimestamp(event.getTimestamp()); - - long containerStartTime = container.getContainerStartTime(); entity.addEvent(tEvent); - entity - .setIdPrefix(TimelineServiceHelper.invertLong(containerStartTime)); dispatcher.getEventHandler().handle(new TimelinePublishEvent(entity, containerId.getApplicationAttemptId().getApplicationId())); } @@ -333,11 +325,7 @@ private void publishContainerKilledEvent( TimelineEvent tEvent = new TimelineEvent(); tEvent.setId(ContainerMetricsConstants.KILLED_EVENT_TYPE); tEvent.setTimestamp(event.getTimestamp()); - - long containerStartTime = container.getContainerStartTime(); entity.addEvent(tEvent); - entity - .setIdPrefix(TimelineServiceHelper.invertLong(containerStartTime)); dispatcher.getEventHandler().handle(new TimelinePublishEvent(entity, containerId.getApplicationAttemptId().getApplicationId())); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java index 95a008a875387..0c70a5afdab02 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java @@ -34,14 +34,15 @@ import java.io.BufferedReader; import java.io.File; +import java.io.FileFilter; import java.io.FileReader; import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.Callable; -import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; @@ -100,6 +101,8 @@ public class TestTimelineAuthFilterForV2 { getKeytabFile()); private static String httpSpnegoPrincipal = KerberosTestUtils. getServerPrincipal(); + private static final String ENTITY_TYPE = "dummy_type"; + private static final AtomicInteger ENTITY_TYPE_SUFFIX = new AtomicInteger(0); // First param indicates whether HTTPS access or HTTP access and second param // indicates whether it is kerberos access or token based access. @@ -274,11 +277,20 @@ private static TimelineEntity createEntity(String id, String type) { } private static void verifyEntity(File entityTypeDir, String id, String type) - throws IOException { + throws InterruptedException, IOException { File entityFile = new File(entityTypeDir, id + FileSystemTimelineWriterImpl.TIMELINE_SERVICE_STORAGE_EXTENSION); + TimelineEntity entity = null; + for (int i = 0; i < 50; i++) { + if (entityFile.exists()) { + entity = readEntityFile(entityFile); + if (entity != null) { + break; + } + } + Thread.sleep(50); + } assertTrue(entityFile.exists()); - TimelineEntity entity = readEntityFile(entityFile); assertNotNull(entity); assertEquals(id, entity.getId()); assertEquals(type, entity.getType()); @@ -333,7 +345,8 @@ private boolean publishWithRetries(ApplicationId appId, File entityTypeDir, @Test public void testPutTimelineEntities() throws Exception { - final String entityType = "dummy_type"; + final String entityType = ENTITY_TYPE + + ENTITY_TYPE_SUFFIX.getAndIncrement(); ApplicationId appId = ApplicationId.newInstance(0, 1); File entityTypeDir = new File(TEST_ROOT_DIR.getAbsolutePath() + File.separator + "entities" + File.separator + @@ -342,92 +355,92 @@ public void testPutTimelineEntities() throws Exception { File.separator + "test_flow_name" + File.separator + "test_flow_version" + File.separator + "1" + File.separator + appId.toString() + File.separator + entityType); - try { - if (withKerberosLogin) { - KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable() { - @Override - public Void call() throws Exception { - publishAndVerifyEntity(appId, entityTypeDir, entityType, 1); - return null; - } - }); - } else { - assertTrue("Entities should have been published successfully.", - publishWithRetries(appId, entityTypeDir, entityType, 1)); - - AppLevelTimelineCollector collector = - (AppLevelTimelineCollector) collectorManager.get(appId); - Token token = - collector.getDelegationTokenForApp(); - assertNotNull(token); + if (withKerberosLogin) { + KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable() { + @Override + public Void call() throws Exception { + publishAndVerifyEntity(appId, entityTypeDir, entityType, 1); + return null; + } + }); + } else { + assertTrue("Entities should have been published successfully.", + publishWithRetries(appId, entityTypeDir, entityType, 1)); - // Verify if token is renewed automatically and entities can still be - // published. - Thread.sleep(1000); - // Entities should publish successfully after renewal. - assertTrue("Entities should have been published successfully.", - publishWithRetries(appId, entityTypeDir, entityType, 2)); - assertNotNull(collector); - verify(collectorManager.getTokenManagerService(), atLeastOnce()). - renewToken(eq(collector.getDelegationTokenForApp()), - any(String.class)); + AppLevelTimelineCollector collector = + (AppLevelTimelineCollector) collectorManager.get(appId); + Token token = + collector.getDelegationTokenForApp(); + assertNotNull(token); - // Wait to ensure lifetime of token expires and ensure its regenerated - // automatically. - Thread.sleep(3000); - for (int i = 0; i < 40; i++) { - if (!token.equals(collector.getDelegationTokenForApp())) { - break; - } - Thread.sleep(50); - } - assertNotEquals("Token should have been regenerated.", token, - collector.getDelegationTokenForApp()); - Thread.sleep(1000); - // Try publishing with the old token in UGI. Publishing should fail due - // to invalid token. - try { - publishAndVerifyEntity(appId, entityTypeDir, entityType, 2); - fail("Exception should have been thrown due to Invalid Token."); - } catch (YarnException e) { - assertTrue("Exception thrown should have been due to Invalid Token.", - e.getCause().getMessage().contains("InvalidToken")); - } + // Verify if token is renewed automatically and entities can still be + // published. + Thread.sleep(1000); + // Entities should publish successfully after renewal. + assertTrue("Entities should have been published successfully.", + publishWithRetries(appId, entityTypeDir, entityType, 2)); + assertNotNull(collector); + verify(collectorManager.getTokenManagerService(), atLeastOnce()). + renewToken(eq(collector.getDelegationTokenForApp()), + any(String.class)); - // Update the regenerated token in UGI and retry publishing entities. - Token regeneratedToken = - collector.getDelegationTokenForApp(); - regeneratedToken.setService(new Text("localhost" + - regeneratedToken.getService().toString().substring( - regeneratedToken.getService().toString().indexOf(":")))); - UserGroupInformation.getCurrentUser().addToken(regeneratedToken); - assertTrue("Entities should have been published successfully.", - publishWithRetries(appId, entityTypeDir, entityType, 2)); - // Token was generated twice, once when app collector was created and - // later after token lifetime expiry. - verify(collectorManager.getTokenManagerService(), times(2)). - generateToken(any(UserGroupInformation.class), any(String.class)); - assertEquals(1, ((DummyNodeTimelineCollectorManager) collectorManager). - getTokenExpiredCnt()); - } - // Wait for async entity to be published. - for (int i = 0; i < 50; i++) { - if (entityTypeDir.listFiles().length == 2) { + // Wait to ensure lifetime of token expires and ensure its regenerated + // automatically. + Thread.sleep(3000); + for (int i = 0; i < 40; i++) { + if (!token.equals(collector.getDelegationTokenForApp())) { break; } Thread.sleep(50); } - assertEquals(2, entityTypeDir.listFiles().length); - verifyEntity(entityTypeDir, "entity2", entityType); - AppLevelTimelineCollector collector = - (AppLevelTimelineCollector)collectorManager.get(appId); - assertNotNull(collector); - auxService.removeApplication(appId); - verify(collectorManager.getTokenManagerService()).cancelToken( - eq(collector.getDelegationTokenForApp()), any(String.class)); - } finally { - FileUtils.deleteQuietly(entityTypeDir); + assertNotEquals("Token should have been regenerated.", token, + collector.getDelegationTokenForApp()); + Thread.sleep(1000); + // Try publishing with the old token in UGI. Publishing should fail due + // to invalid token. + try { + publishAndVerifyEntity(appId, entityTypeDir, entityType, 2); + fail("Exception should have been thrown due to Invalid Token."); + } catch (YarnException e) { + assertTrue("Exception thrown should have been due to Invalid Token.", + e.getCause().getMessage().contains("InvalidToken")); + } + + // Update the regenerated token in UGI and retry publishing entities. + Token regeneratedToken = + collector.getDelegationTokenForApp(); + regeneratedToken.setService(new Text("localhost" + + regeneratedToken.getService().toString().substring( + regeneratedToken.getService().toString().indexOf(":")))); + UserGroupInformation.getCurrentUser().addToken(regeneratedToken); + assertTrue("Entities should have been published successfully.", + publishWithRetries(appId, entityTypeDir, entityType, 2)); + // Token was generated twice, once when app collector was created and + // later after token lifetime expiry. + verify(collectorManager.getTokenManagerService(), times(2)). + generateToken(any(UserGroupInformation.class), any(String.class)); + assertEquals(1, ((DummyNodeTimelineCollectorManager) collectorManager). + getTokenExpiredCnt()); + } + // Wait for async entity to be published. + FileFilter tmpFilter = (pathname -> !pathname.getName().endsWith(".tmp")); + File[] entities = null; + for (int i = 0; i < 50; i++) { + entities = entityTypeDir.listFiles(tmpFilter); + if (entities != null && entities.length == 2) { + break; + } + Thread.sleep(50); } + assertNotNull("Error reading entityTypeDir", entities); + assertEquals(2, entities.length); + verifyEntity(entityTypeDir, "entity2", entityType); + AppLevelTimelineCollector collector = + (AppLevelTimelineCollector)collectorManager.get(appId); + assertNotNull(collector); + auxService.removeApplication(appId); + verify(collectorManager.getTokenManagerService()).cancelToken( + eq(collector.getDelegationTokenForApp()), any(String.class)); } private static class DummyNodeTimelineCollectorManager extends From f1552f6edb8fe152003fd71944851b2b46a6677d Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Thu, 30 May 2019 11:42:27 -0700 Subject: [PATCH 0076/1308] YARN-9553. Fix NPE in EntityGroupFSTimelineStore#getEntityTimelines. Contributed by Prabhu Joseph. --- .../server/timeline/EntityGroupFSTimelineStore.java | 5 +++++ .../timeline/TestEntityGroupFSTimelineStore.java | 11 +++++++++++ 2 files changed, 16 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java index 498230ae7586a..a5e5b419d50d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java @@ -1115,6 +1115,11 @@ public TimelineEvents getEntityTimelines(String entityType, LOG.debug("getEntityTimelines type={} ids={}", entityType, entityIds); TimelineEvents returnEvents = new TimelineEvents(); List relatedCacheItems = new ArrayList<>(); + + if (entityIds == null || entityIds.isEmpty()) { + return returnEvents; + } + for (String entityId : entityIds) { LOG.debug("getEntityTimeline type={} id={}", entityType, entityId); List stores diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java index dc10912586aac..8fcc696aad42b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java @@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field; import org.apache.hadoop.yarn.util.ConverterUtils; import org.junit.After; +import org.junit.Assert; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -379,6 +380,16 @@ public void testCleanBuckets() throws Exception { assertFalse(fs.exists(clusterTimeStampDir1)); } + @Test + public void testNullCheckGetEntityTimelines() throws Exception { + try { + store.getEntityTimelines("YARN_APPLICATION", null, null, null, null, + null); + } catch (NullPointerException e) { + Assert.fail("NPE when getEntityTimelines called with Null EntityIds"); + } + } + @Test public void testPluginRead() throws Exception { // Verify precondition From 6f5a36c13c4d2eb24e207aa99a9781c0be11660b Mon Sep 17 00:00:00 2001 From: Shweta Yakkali Date: Thu, 30 May 2019 13:20:44 -0700 Subject: [PATCH 0077/1308] HADOOP-13656. fs -expunge to take a filesystem. Contributed by Shweta. Signed-off-by: Wei-Chiu Chuang --- .../java/org/apache/hadoop/fs/FileSystem.java | 6 + .../java/org/apache/hadoop/fs/FsShell.java | 4 +- .../org/apache/hadoop/fs/shell/Delete.java | 16 +- .../src/site/markdown/FileSystemShell.md | 11 +- .../java/org/apache/hadoop/fs/TestTrash.java | 326 ++++++++++-------- .../hadoop/fs/viewfs/TestViewFsTrash.java | 2 +- .../apache/hadoop/hdfs/tools/DFSAdmin.java | 13 +- .../org/apache/hadoop/hdfs/TestDFSShell.java | 3 - .../org/apache/hadoop/hdfs/TestHDFSTrash.java | 2 +- .../hdfs/server/namenode/TestCheckpoint.java | 2 - 10 files changed, 215 insertions(+), 170 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 19f38af69998f..4580451bc0532 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -206,6 +206,12 @@ static void addFileSystemForTesting(URI uri, Configuration conf, CACHE.map.put(new Cache.Key(uri, conf), fs); } + @VisibleForTesting + static void removeFileSystemForTesting(URI uri, Configuration conf, + FileSystem fs) throws IOException { + CACHE.map.remove(new Cache.Key(uri, conf), fs); + } + /** * Get a FileSystem instance based on the uri, the passed in * configuration and the user. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java index 5be6e5f829b8c..680e742a36059 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java @@ -97,7 +97,7 @@ protected Help getHelp() throws IOException { return this.help; } - protected void init() throws IOException { + protected void init() { getConf().setQuietMode(true); UserGroupInformation.setConfiguration(getConf()); if (commandFactory == null) { @@ -298,7 +298,7 @@ private TableListing createOptionTableListing() { * run */ @Override - public int run(String argv[]) throws Exception { + public int run(String[] argv) { // initialize FsShell init(); Tracer tracer = new Tracer.Builder("FsShell"). diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java index 57b543acc21e6..3c9368ca2ed9b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java @@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.PathIOException; @@ -218,26 +219,35 @@ protected void processPath(PathData item) throws IOException { // delete files from the trash that are older // than the retention threshold. static class Expunge extends FsCommand { + private static final String OPTION_FILESYSTEM = "fs"; + public static final String NAME = "expunge"; public static final String USAGE = - "[-immediate]"; + "[-immediate] [-" + OPTION_FILESYSTEM + " ]"; public static final String DESCRIPTION = "Delete files from the trash that are older " + "than the retention threshold"; private boolean emptyImmediately = false; + private String fsArgument; - // TODO: should probably allow path arguments for the filesystems @Override protected void processOptions(LinkedList args) throws IOException { - CommandFormat cf = new CommandFormat(0, 1, "immediate"); + CommandFormat cf = new CommandFormat(0, 2, "immediate"); + cf.addOptionWithValue(OPTION_FILESYSTEM); cf.parse(args); emptyImmediately = cf.getOpt("immediate"); + fsArgument = cf.getOptValue(OPTION_FILESYSTEM); } @Override protected void processArguments(LinkedList args) throws IOException { + if (fsArgument != null && fsArgument.length() != 0) { + getConf().set( + CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsArgument); + } + FileSystem[] childFileSystems = FileSystem.get(getConf()).getChildFileSystems(); if (null != childFileSystems) { diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md index f050e30832c75..44927f2464489 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md @@ -268,7 +268,7 @@ Displays a summary of file lengths. expunge ------- -Usage: `hadoop fs -expunge [-immediate]` +Usage: `hadoop fs -expunge [-immediate] [-fs ]` Permanently delete files in checkpoints older than the retention threshold from trash directory, and create new checkpoint. @@ -286,6 +286,15 @@ This value should be smaller or equal to `fs.trash.interval`. If the `-immediate` option is passed, all files in the trash for the current user are immediately deleted, ignoring the `fs.trash.interval` setting. +If the `-fs` option is passed, the supplied filesystem will be expunged, +rather than the default filesystem and checkpoint is created. + +For example + +``` +hadoop fs -expunge --immediate -fs s3a://landsat-pds/ +``` + Refer to the [HDFS Architecture guide](../hadoop-hdfs/HdfsDesign.html#File_Deletes_and_Undeletes) for more information about trash feature of HDFS. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java index cf22f3b10b58d..e8e028732b2a8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java @@ -38,7 +38,10 @@ import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.TrashPolicyDefault.Emptier; @@ -112,7 +115,7 @@ static void checkNotInTrash(FileSystem fs, Path trashRoot, String pathname) * @throws IOException */ public static void trashShell(final FileSystem fs, final Path base) - throws IOException { + throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", fs.getUri().toString()); trashShell(conf, base, null, null); @@ -127,8 +130,7 @@ public static void trashShell(final FileSystem fs, final Path base) * @throws IOException */ public static void trashShell(final Configuration conf, final Path base, - FileSystem trashRootFs, Path trashRoot) - throws IOException { + FileSystem trashRootFs, Path trashRoot) throws Exception { FileSystem fs = FileSystem.get(conf); conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled @@ -163,13 +165,9 @@ public static void trashShell(final Configuration conf, final Path base, String[] args = new String[1]; args[0] = "-expunge"; int val = -1; - try { - val = shell.run(args); - } catch (Exception e) { - System.err.println("Exception raised from Trash.run " + - e.getLocalizedMessage()); - } - assertTrue(val == 0); + val = shell.run(args); + + assertEquals("Expunge should return zero", 0, val); } // Verify that we succeed in removing the file we created. @@ -179,15 +177,10 @@ public static void trashShell(final Configuration conf, final Path base, args[0] = "-rm"; args[1] = myFile.toString(); int val = -1; - try { - val = shell.run(args); - } catch (Exception e) { - System.err.println("Exception raised from Trash.run " + - e.getLocalizedMessage()); - } - assertTrue(val == 0); + val = shell.run(args); + + assertEquals("Remove should return zero", 0, val); - checkTrash(trashRootFs, trashRoot, fs.makeQualified(myFile)); } @@ -200,13 +193,9 @@ public static void trashShell(final Configuration conf, final Path base, args[0] = "-rm"; args[1] = new Path(base, "test/mkdirs/myFile").toString(); int val = -1; - try { - val = shell.run(args); - } catch (Exception e) { - System.err.println("Exception raised from Trash.run " + - e.getLocalizedMessage()); - } - assertTrue(val == 0); + val = shell.run(args); + + assertEquals("Remove should return zero", 0, val); } // Verify that we can recreate the file @@ -219,13 +208,9 @@ public static void trashShell(final Configuration conf, final Path base, args[0] = "-rmr"; args[1] = new Path(base, "test/mkdirs").toString(); int val = -1; - try { - val = shell.run(args); - } catch (Exception e) { - System.err.println("Exception raised from Trash.run " + - e.getLocalizedMessage()); - } - assertTrue(val == 0); + val = shell.run(args); + + assertEquals("Recursive Remove should return zero", 0, val); } // recreate directory @@ -237,29 +222,22 @@ public static void trashShell(final Configuration conf, final Path base, args[0] = "-rmr"; args[1] = new Path(base, "test/mkdirs").toString(); int val = -1; - try { - val = shell.run(args); - } catch (Exception e) { - System.err.println("Exception raised from Trash.run " + - e.getLocalizedMessage()); - } - assertTrue(val == 0); + val = shell.run(args); + + assertEquals("Recursive Remove should return zero", 0, val); } // Check that we can delete a file from the trash { - Path toErase = new Path(trashRoot, "toErase"); - int retVal = -1; - writeFile(trashRootFs, toErase, 10); - try { - retVal = shell.run(new String[] {"-rm", toErase.toString()}); - } catch (Exception e) { - System.err.println("Exception raised from Trash.run " + - e.getLocalizedMessage()); - } - assertTrue(retVal == 0); - checkNotInTrash (trashRootFs, trashRoot, toErase.toString()); - checkNotInTrash (trashRootFs, trashRoot, toErase.toString()+".1"); + Path toErase = new Path(trashRoot, "toErase"); + int val = -1; + writeFile(trashRootFs, toErase, 10); + + val = shell.run(new String[] {"-rm", toErase.toString()}); + + assertEquals("Recursive Remove should return zero", 0, val); + checkNotInTrash(trashRootFs, trashRoot, toErase.toString()); + checkNotInTrash(trashRootFs, trashRoot, toErase.toString()+".1"); } // simulate Trash removal @@ -267,17 +245,14 @@ public static void trashShell(final Configuration conf, final Path base, String[] args = new String[1]; args[0] = "-expunge"; int val = -1; - try { - val = shell.run(args); - } catch (Exception e) { - System.err.println("Exception raised from Trash.run " + - e.getLocalizedMessage()); - } - assertTrue(val == 0); + val = shell.run(args); + + assertEquals("Expunge should return zero", 0, val); } // verify that after expunging the Trash, it really goes away - checkNotInTrash(trashRootFs, trashRoot, new Path(base, "test/mkdirs/myFile").toString()); + checkNotInTrash(trashRootFs, trashRoot, new Path( + base, "test/mkdirs/myFile").toString()); // recreate directory and file mkdir(fs, myPath); @@ -289,26 +264,18 @@ public static void trashShell(final Configuration conf, final Path base, args[0] = "-rm"; args[1] = myFile.toString(); int val = -1; - try { - val = shell.run(args); - } catch (Exception e) { - System.err.println("Exception raised from Trash.run " + - e.getLocalizedMessage()); - } - assertTrue(val == 0); + val = shell.run(args); + + assertEquals("Remove should return zero", 0, val); checkTrash(trashRootFs, trashRoot, myFile); args = new String[2]; args[0] = "-rmr"; args[1] = myPath.toString(); val = -1; - try { - val = shell.run(args); - } catch (Exception e) { - System.err.println("Exception raised from Trash.run " + - e.getLocalizedMessage()); - } - assertTrue(val == 0); + val = shell.run(args); + + assertEquals("Recursive Remove should return zero", 0, val); checkTrash(trashRootFs, trashRoot, myPath); } @@ -318,13 +285,9 @@ public static void trashShell(final Configuration conf, final Path base, args[0] = "-rmr"; args[1] = trashRoot.getParent().getParent().toString(); int val = -1; - try { - val = shell.run(args); - } catch (Exception e) { - System.err.println("Exception raised from Trash.run " + - e.getLocalizedMessage()); - } - assertEquals("exit code", 1, val); + val = shell.run(args); + + assertEquals("Recursive Remove should return exit code 1", 1, val); assertTrue(trashRootFs.exists(trashRoot)); } @@ -341,23 +304,18 @@ public static void trashShell(final Configuration conf, final Path base, args[1] = "-skipTrash"; args[2] = myFile.toString(); int val = -1; - try { - // Clear out trash - assertEquals("-expunge failed", - 0, shell.run(new String [] { "-expunge" } )); - - val = shell.run(args); - - }catch (Exception e) { - System.err.println("Exception raised from Trash.run " + - e.getLocalizedMessage()); - } + // Clear out trash + assertEquals("-expunge failed", + 0, shell.run(new String[] {"-expunge" })); + + val = shell.run(args); + assertFalse("Expected TrashRoot (" + trashRoot + ") to exist in file system:" + trashRootFs.getUri(), trashRootFs.exists(trashRoot)); // No new Current should be created assertFalse(fs.exists(myFile)); - assertTrue(val == 0); + assertEquals("Remove with skipTrash should return zero", 0, val); } // recreate directory and file @@ -372,64 +330,52 @@ public static void trashShell(final Configuration conf, final Path base, args[2] = myPath.toString(); int val = -1; - try { - // Clear out trash - assertEquals(0, shell.run(new String [] { "-expunge" } )); - - val = shell.run(args); + // Clear out trash + assertEquals(0, shell.run(new String[] {"-expunge" })); - }catch (Exception e) { - System.err.println("Exception raised from Trash.run " + - e.getLocalizedMessage()); - } + val = shell.run(args); assertFalse(trashRootFs.exists(trashRoot)); // No new Current should be created assertFalse(fs.exists(myPath)); assertFalse(fs.exists(myFile)); - assertTrue(val == 0); + assertEquals("Remove with skipTrash should return zero", 0, val); } // deleting same file multiple times { int val = -1; mkdir(fs, myPath); - - try { - assertEquals(0, shell.run(new String [] { "-expunge" } )); - } catch (Exception e) { - System.err.println("Exception raised from fs expunge " + - e.getLocalizedMessage()); - } + assertEquals("Expunge should return zero", + 0, shell.run(new String[] {"-expunge" })); + // create a file in that directory. myFile = new Path(base, "test/mkdirs/myFile"); - String [] args = new String[] {"-rm", myFile.toString()}; + String[] args = new String[] {"-rm", myFile.toString()}; int num_runs = 10; - for(int i=0;i Date: Thu, 30 May 2019 13:27:48 -0700 Subject: [PATCH 0078/1308] HDFS-14497. Write lock held by metasave impact following RPC processing. Contributed by He Xiaoqiao. Signed-off-by: Wei-Chiu Chuang --- .../server/blockmanagement/BlockManager.java | 2 +- .../hdfs/server/namenode/FSNamesystem.java | 28 +++++---- .../hdfs/server/namenode/TestMetaSave.java | 60 +++++++++++++++++++ 3 files changed, 79 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 4e351c021bf74..9cfa18098e1b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -733,7 +733,7 @@ public BlockPlacementPolicy getBlockPlacementPolicy() { /** Dump meta data to out. */ public void metaSave(PrintWriter out) { - assert namesystem.hasWriteLock(); // TODO: block manager read lock and NS write lock + assert namesystem.hasReadLock(); // TODO: block manager read lock and NS write lock final List live = new ArrayList(); final List dead = new ArrayList(); datanodeManager.fetchDatanodes(live, dead, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 5c9341f82b78a..70b65f3982cb3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -592,6 +592,12 @@ private void logAuditEvent(boolean succeeded, private boolean resourceLowSafeMode = false; private String nameNodeHostName = null; + /** + * HDFS-14497: Concurrency control when many metaSave request to write + * meta to same out stream after switch to read lock. + */ + private Object metaSaveLock = new Object(); + /** * Notify that loading of this FSDirectory is complete, and * it is imageLoaded for use @@ -1769,24 +1775,26 @@ void metaSave(String filename) throws IOException { String operationName = "metaSave"; checkSuperuserPrivilege(operationName); checkOperation(OperationCategory.READ); - writeLock(); + readLock(); try { checkOperation(OperationCategory.READ); - File file = new File(System.getProperty("hadoop.log.dir"), filename); - PrintWriter out = new PrintWriter(new BufferedWriter( - new OutputStreamWriter(Files.newOutputStream(file.toPath()), - Charsets.UTF_8))); - metaSave(out); - out.flush(); - out.close(); + synchronized(metaSaveLock) { + File file = new File(System.getProperty("hadoop.log.dir"), filename); + PrintWriter out = new PrintWriter(new BufferedWriter( + new OutputStreamWriter(Files.newOutputStream(file.toPath()), + Charsets.UTF_8))); + metaSave(out); + out.flush(); + out.close(); + } } finally { - writeUnlock(operationName); + readUnlock(operationName); } logAuditEvent(true, operationName, null); } private void metaSave(PrintWriter out) { - assert hasWriteLock(); + assert hasReadLock(); long totalInodes = this.dir.totalInodes(); long totalBlocks = this.getBlocksTotal(); out.println(totalInodes + " files and directories, " + totalBlocks diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index 8cc1433cef5f6..d4748f3d60140 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -27,6 +27,7 @@ import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; +import java.util.ArrayList; import java.util.concurrent.TimeoutException; import com.google.common.base.Supplier; @@ -215,6 +216,65 @@ public void testMetaSaveOverwrite() throws Exception { } } + class MetaSaveThread extends Thread { + NamenodeProtocols nnRpc; + String filename; + public MetaSaveThread(NamenodeProtocols nnRpc, String filename) { + this.nnRpc = nnRpc; + this.filename = filename; + } + + @Override + public void run() { + try { + nnRpc.metaSave(filename); + } catch (IOException e) { + } + } + } + + /** + * Tests that metasave concurrent output file (not append). + */ + @Test + public void testConcurrentMetaSave() throws Exception { + ArrayList threads = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + threads.add(new MetaSaveThread(nnRpc, "metaSaveConcurrent.out.txt")); + } + for (int i = 0; i < 10; i++) { + threads.get(i).start(); + } + for (int i = 0; i < 10; i++) { + threads.get(i).join(); + } + // Read output file. + FileInputStream fis = null; + InputStreamReader isr = null; + BufferedReader rdr = null; + try { + fis = new FileInputStream(getLogFile("metaSaveConcurrent.out.txt")); + isr = new InputStreamReader(fis); + rdr = new BufferedReader(isr); + + // Validate that file was overwritten (not appended) by checking for + // presence of only one "Live Datanodes" line. + boolean foundLiveDatanodesLine = false; + String line = rdr.readLine(); + while (line != null) { + if (line.startsWith("Live Datanodes")) { + if (foundLiveDatanodesLine) { + fail("multiple Live Datanodes lines, output file not overwritten"); + } + foundLiveDatanodesLine = true; + } + line = rdr.readLine(); + } + } finally { + IOUtils.cleanup(null, rdr, isr, fis); + } + } + @After public void tearDown() throws IOException { if (fileSys != null) From c965f7f499011d4497547304d754b1085fdf79d4 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Thu, 30 May 2019 15:28:15 -0700 Subject: [PATCH 0079/1308] HDDS-1551. Implement Bucket Write Requests to use Cache and DoubleBuffer. (#850) --- .../utils/db/cache/PartialTableCache.java | 17 +- .../om/protocol/OzoneManagerHAProtocol.java | 52 ---- .../src/main/proto/OzoneManagerProtocol.proto | 6 +- .../apache/hadoop/ozone/om/TestOmMetrics.java | 6 +- .../hadoop/ozone/om/TestOzoneManagerHA.java | 4 +- .../apache/hadoop/ozone/om/BucketManager.java | 27 +- .../hadoop/ozone/om/BucketManagerImpl.java | 65 +---- .../apache/hadoop/ozone/om/OzoneManager.java | 69 +---- .../hadoop/ozone/om/S3BucketManagerImpl.java | 14 +- .../om/ratis/OzoneManagerDoubleBuffer.java | 17 +- .../om/ratis/OzoneManagerRatisServer.java | 122 +++++++- .../om/ratis/OzoneManagerStateMachine.java | 19 +- .../ratis/utils/OzoneManagerRatisUtils.java | 74 +++++ .../ozone/om/ratis/utils/package-info.java | 21 ++ .../ozone/om/request/OMClientRequest.java | 73 +++++ .../request/bucket/OMBucketCreateRequest.java | 204 +++++++++++++ .../request/bucket/OMBucketDeleteRequest.java | 117 ++++++++ .../bucket/OMBucketSetPropertyRequest.java | 189 ++++++++++++ .../ozone/om/request/bucket/package-info.java | 23 ++ .../hadoop/ozone/om/request/package-info.java | 21 ++ .../ozone/om/response/OMClientResponse.java | 28 +- .../om/response/OMVolumeCreateResponse.java | 16 +- .../om/response/OMVolumeDeleteResponse.java | 16 +- .../{ => bucket}/OMBucketCreateResponse.java | 12 +- .../{ => bucket}/OMBucketDeleteResponse.java | 11 +- .../bucket/OMBucketSetPropertyResponse.java | 51 ++++ .../om/response/bucket/package-info.java | 23 ++ .../OzoneManagerHARequestHandler.java | 4 +- .../OzoneManagerHARequestHandlerImpl.java | 170 +++-------- ...ManagerProtocolServerSideTranslatorPB.java | 64 ++++- .../OzoneManagerRequestHandler.java | 14 +- ...eManagerDoubleBufferWithDummyResponse.java | 18 +- ...zoneManagerDoubleBufferWithOMResponse.java | 36 ++- .../om/ratis/TestOzoneManagerRatisServer.java | 25 +- .../ratis/TestOzoneManagerStateMachine.java | 13 +- .../ozone/om/request/TestOMRequestUtils.java | 73 +++++ .../bucket/TestOMBucketCreateRequest.java | 270 ++++++++++++++++++ .../bucket/TestOMBucketDeleteRequest.java | 152 ++++++++++ .../TestOMBucketSetPropertyRequest.java | 162 +++++++++++ .../ozone/om/request/bucket/package-info.java | 23 ++ .../om/response/TestOMResponseUtils.java | 40 +++ .../bucket/TestOMBucketCreateResponse.java | 87 ++++++ .../bucket/TestOMBucketDeleteResponse.java | 97 +++++++ .../TestOMBucketSetPropertyResponse.java | 88 ++++++ .../om/response/bucket/package-info.java | 23 ++ .../org.mockito.plugins.MockMaker | 16 ++ 46 files changed, 2247 insertions(+), 425 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/package-info.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java rename hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/{ => bucket}/OMBucketCreateResponse.java (80%) rename hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/{ => bucket}/OMBucketDeleteResponse.java (80%) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketCreateResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketDeleteResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketSetPropertyResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java create mode 100644 hadoop-ozone/ozone-manager/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java index 4d3711269a168..fc3009605b4f1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java @@ -83,13 +83,16 @@ private void evictCache(long epoch) { iterator.hasNext();) { currentEntry = iterator.next(); CACHEKEY cachekey = currentEntry.getCachekey(); - CacheValue cacheValue = cache.get(cachekey); - if (cacheValue.getEpoch() <= epoch) { - cache.remove(cachekey); - iterator.remove(); - } else { - // If currentEntry epoch is greater than epoch, we have deleted all - // entries less than specified epoch. So, we can break. + CacheValue cacheValue = cache.computeIfPresent(cachekey, ((k, v) -> { + if (v.getEpoch() <= epoch) { + iterator.remove(); + return null; + } + return v; + })); + // If currentEntry epoch is greater than epoch, we have deleted all + // entries less than specified epoch. So, we can break. + if (cacheValue != null && cacheValue.getEpoch() >= epoch) { break; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java index eb514d0961317..ad2bc316f5e19 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java @@ -18,8 +18,6 @@ package org.apache.hadoop.ozone.om.protocol; -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDeleteVolumeResponse; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -168,54 +166,4 @@ void applySetOwner(String oldOwner, VolumeList oldOwnerVolumeList, */ void applyDeleteVolume(String volume, String owner, VolumeList newVolumeList) throws IOException; - - /** - * Start Create Bucket Transaction. - * @param omBucketInfo - * @return OmBucketInfo - * @throws IOException - */ - OmBucketInfo startCreateBucket(OmBucketInfo omBucketInfo) throws IOException; - - /** - * Apply Create Bucket Changes to OM DB. - * @param omBucketInfo - * @throws IOException - */ - void applyCreateBucket(OmBucketInfo omBucketInfo) throws IOException; - - /** - * Start Delete Bucket Transaction. - * @param volumeName - * @param bucketName - * @throws IOException - */ - void startDeleteBucket(String volumeName, String bucketName) - throws IOException; - - /** - * Apply Delete Bucket changes to OM DB. - * @param volumeName - * @param bucketName - * @throws IOException - */ - void applyDeleteBucket(String volumeName, String bucketName) - throws IOException; - - /** - * Start SetBucket Property Transaction. - * @param omBucketArgs - * @return OmBucketInfo - * @throws IOException - */ - OmBucketInfo startSetBucketProperty(OmBucketArgs omBucketArgs) - throws IOException; - - /** - * Apply SetBucket Property changes to OM DB. - * @param omBucketInfo - * @throws IOException - */ - void applySetBucketProperty(OmBucketInfo omBucketInfo) throws IOException; - } diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index 316acbcbbc40c..6577de7368ee8 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -383,7 +383,7 @@ message BucketInfo { repeated OzoneAclInfo acls = 3; required bool isVersionEnabled = 4 [default = false]; required StorageTypeProto storageType = 5 [default = DISK]; - required uint64 creationTime = 6; + optional uint64 creationTime = 6; repeated hadoop.hdds.KeyValue metadata = 7; optional BucketEncryptionInfoProto beinfo = 8; } @@ -553,11 +553,7 @@ message InfoBucketResponse { } message SetBucketPropertyRequest { - //TODO: See if we can merge bucketArgs and bucketInfo optional BucketArgs bucketArgs = 1; - // This will be set during startTransaction, and used to apply to OM DB - // during applyTransaction. - optional BucketInfo bucketInfo = 2; } message SetBucketPropertyResponse { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 39fada8959d14..5d739c2988a62 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -171,11 +171,11 @@ public void testBucketOps() throws IOException { Mockito.doNothing().when(mockS3Bm).deleteS3Bucket("random"); Mockito.doReturn(true).when(mockS3Bm).createOzoneVolumeIfNeeded(null); - Mockito.doReturn(null).when(mockBm).createBucket(null); - Mockito.doReturn(null).when(mockBm).createBucket(null); + Mockito.doNothing().when(mockBm).createBucket(null); + Mockito.doNothing().when(mockBm).createBucket(null); Mockito.doNothing().when(mockBm).deleteBucket(null, null); Mockito.doReturn(null).when(mockBm).getBucketInfo(null, null); - Mockito.doReturn(null).when(mockBm).setBucketProperty(null); + Mockito.doNothing().when(mockBm).setBucketProperty(null); Mockito.doReturn(null).when(mockBm).listBuckets(null, null, null, 0); HddsWhiteboxTestUtils.setInternalState( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java index 422a02c0f4cc7..3c168b3241a07 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java @@ -405,7 +405,7 @@ private void createKeyTest(boolean checkSuccess) throws Exception { // last running OM as it would fail to get a quorum. if (e instanceof RemoteException) { GenericTestUtils.assertExceptionContains( - "RaftRetryFailureException", e); + "NotLeaderException", e); } } else { throw e; @@ -446,7 +446,7 @@ private void createVolumeTest(boolean checkSuccess) throws Exception { // last running OM as it would fail to get a quorum. if (e instanceof RemoteException) { GenericTestUtils.assertExceptionContains( - "RaftRetryFailureException", e); + "NotLeaderException", e); } } else { throw e; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java index 460ac1157c34d..4417567d9b6db 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java @@ -30,14 +30,7 @@ public interface BucketManager { * Creates a bucket. * @param bucketInfo - OmBucketInfo for creating bucket. */ - OmBucketInfo createBucket(OmBucketInfo bucketInfo) throws IOException; - - /** - * Apply Create Bucket changes to OM DB. - * @param omBucketInfo - * @throws IOException - */ - void applyCreateBucket(OmBucketInfo omBucketInfo) throws IOException; + void createBucket(OmBucketInfo bucketInfo) throws IOException; /** @@ -53,14 +46,7 @@ OmBucketInfo getBucketInfo(String volumeName, String bucketName) * @param args - BucketArgs. * @throws IOException */ - OmBucketInfo setBucketProperty(OmBucketArgs args) throws IOException; - - /** - * Apply SetBucket Property changes to OM DB. - * @param omBucketInfo - * @throws IOException - */ - void applySetBucketProperty(OmBucketInfo omBucketInfo) throws IOException; + void setBucketProperty(OmBucketArgs args) throws IOException; /** * Deletes an existing empty bucket from volume. @@ -70,15 +56,6 @@ OmBucketInfo getBucketInfo(String volumeName, String bucketName) */ void deleteBucket(String volumeName, String bucketName) throws IOException; - /** - * Apply Delete Bucket changes to OM DB. - * @param volumeName - * @param bucketName - * @throws IOException - */ - void applyDeleteBucket(String volumeName, String bucketName) - throws IOException; - /** * Returns a list of buckets represented by {@link OmBucketInfo} * in the given volume. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 68cd2a9dd6232..1a6c628ae0385 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -102,7 +102,7 @@ KeyProviderCryptoExtension getKMSProvider() { * @param bucketInfo - OmBucketInfo. */ @Override - public OmBucketInfo createBucket(OmBucketInfo bucketInfo) throws IOException { + public void createBucket(OmBucketInfo bucketInfo) throws IOException { Preconditions.checkNotNull(bucketInfo); String volumeName = bucketInfo.getVolumeName(); String bucketName = bucketInfo.getBucketName(); @@ -165,11 +165,8 @@ public OmBucketInfo createBucket(OmBucketInfo bucketInfo) throws IOException { } OmBucketInfo omBucketInfo = omBucketInfoBuilder.build(); - if (!isRatisEnabled) { - commitCreateBucketInfoToDB(omBucketInfo); - } + commitBucketInfoToDB(omBucketInfo); LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName); - return omBucketInfo; } catch (IOException | DBException ex) { if (!(ex instanceof OMException)) { LOG.error("Bucket creation failed for bucket:{} in volume:{}", @@ -182,19 +179,7 @@ public OmBucketInfo createBucket(OmBucketInfo bucketInfo) throws IOException { } } - - public void applyCreateBucket(OmBucketInfo omBucketInfo) throws IOException { - Preconditions.checkNotNull(omBucketInfo); - try { - commitCreateBucketInfoToDB(omBucketInfo); - } catch (IOException ex) { - LOG.error("Apply CreateBucket Failed for bucket: {}, volume: {}", - omBucketInfo.getBucketName(), omBucketInfo.getVolumeName(), ex); - throw ex; - } - } - - private void commitCreateBucketInfoToDB(OmBucketInfo omBucketInfo) + private void commitBucketInfoToDB(OmBucketInfo omBucketInfo) throws IOException { String dbBucketKey = metadataManager.getBucketKey(omBucketInfo.getVolumeName(), @@ -243,7 +228,7 @@ public OmBucketInfo getBucketInfo(String volumeName, String bucketName) * @throws IOException - On Failure. */ @Override - public OmBucketInfo setBucketProperty(OmBucketArgs args) throws IOException { + public void setBucketProperty(OmBucketArgs args) throws IOException { Preconditions.checkNotNull(args); String volumeName = args.getVolumeName(); String bucketName = args.getBucketName(); @@ -296,11 +281,7 @@ public OmBucketInfo setBucketProperty(OmBucketArgs args) throws IOException { bucketInfoBuilder.setCreationTime(oldBucketInfo.getCreationTime()); OmBucketInfo omBucketInfo = bucketInfoBuilder.build(); - - if (!isRatisEnabled) { - commitSetBucketPropertyInfoToDB(omBucketInfo); - } - return omBucketInfo; + commitBucketInfoToDB(omBucketInfo); } catch (IOException | DBException ex) { if (!(ex instanceof OMException)) { LOG.error("Setting bucket property failed for bucket:{} in volume:{}", @@ -312,23 +293,6 @@ public OmBucketInfo setBucketProperty(OmBucketArgs args) throws IOException { } } - public void applySetBucketProperty(OmBucketInfo omBucketInfo) - throws IOException { - try { - commitSetBucketPropertyInfoToDB(omBucketInfo); - } catch (IOException ex) { - LOG.error("Apply SetBucket property failed for bucket:{} in " + - "volume:{}", omBucketInfo.getBucketName(), - omBucketInfo.getVolumeName(), ex); - throw ex; - } - } - - private void commitSetBucketPropertyInfoToDB(OmBucketInfo omBucketInfo) - throws IOException { - commitCreateBucketInfoToDB(omBucketInfo); - } - /** * Updates the existing ACL list with remove and add ACLs that are passed. * Remove is done before Add. @@ -377,10 +341,7 @@ public void deleteBucket(String volumeName, String bucketName) throw new OMException("Bucket is not empty", OMException.ResultCodes.BUCKET_NOT_EMPTY); } - - if (!isRatisEnabled) { - commitDeleteBucketInfoToOMDB(bucketKey); - } + commitDeleteBucketInfoToOMDB(bucketKey); } catch (IOException ex) { if (!(ex instanceof OMException)) { LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName, @@ -392,20 +353,6 @@ public void deleteBucket(String volumeName, String bucketName) } } - public void applyDeleteBucket(String volumeName, String bucketName) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - try { - commitDeleteBucketInfoToOMDB(metadataManager.getBucketKey(volumeName, - bucketName)); - } catch (IOException ex) { - LOG.error("Apply DeleteBucket Failed for bucket: {}, volume: {}", - bucketName, volumeName, ex); - throw ex; - } - } - private void commitDeleteBucketInfoToOMDB(String dbBucketKey) throws IOException { metadataManager.getBucketTable().delete(dbBucketKey); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 531287915f2b4..845e53a757f52 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -316,6 +316,7 @@ private OzoneManager(OzoneConfiguration conf) throws IOException, RPC.setProtocolEngine(configuration, OzoneManagerProtocolPB.class, ProtobufRpcEngine.class); + metadataManager = new OmMetadataManagerImpl(configuration); startRatisServer(); startRatisClient(); @@ -328,7 +329,6 @@ private OzoneManager(OzoneConfiguration conf) throws IOException, secConfig = new SecurityConfig(configuration); - metadataManager = new OmMetadataManagerImpl(configuration); volumeManager = new VolumeManagerImpl(metadataManager, configuration); // Create the KMS Key Provider @@ -1273,7 +1273,7 @@ private RPC.Server getRpcServer(OzoneConfiguration conf) throws IOException { BlockingService omService = newReflectiveBlockingService( new OzoneManagerProtocolServerSideTranslatorPB(this, omRatisServer, - omRatisClient, isRatisEnabled)); + isRatisEnabled)); return startRpcServer(configuration, omNodeRpcAddr, OzoneManagerProtocolPB.class, omService, handlerCount); @@ -1724,67 +1724,6 @@ public void applyDeleteVolume(String volume, String owner, volumeManager.applyDeleteVolume(volume, owner, newVolumeList); } - - @Override - public OmBucketInfo startCreateBucket(OmBucketInfo omBucketInfo) - throws IOException { - Preconditions.checkNotNull(omBucketInfo); - if(isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.CREATE, - omBucketInfo.getVolumeName(), omBucketInfo.getBucketName(), null); - } - - return bucketManager.createBucket(omBucketInfo); - } - - @Override - public void applyCreateBucket(OmBucketInfo omBucketInfo) throws IOException { - // TODO: Need to add metrics and Audit log for HA requests - bucketManager.applyCreateBucket(omBucketInfo); - } - - - @Override - public void startDeleteBucket(String volumeName, String bucketName) - throws IOException { - // TODO: Need to add metrics and Audit log for HA requests - if(isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.CREATE, - volumeName, bucketName, null); - } - - bucketManager.deleteBucket(volumeName, bucketName); - } - - - @Override - public void applyDeleteBucket(String volumeName, String bucketName) - throws IOException { - // TODO: Need to add metrics and Audit log for HA requests - bucketManager.applyDeleteBucket(volumeName, bucketName); - } - - - @Override - public OmBucketInfo startSetBucketProperty(OmBucketArgs omBucketArgs) - throws IOException { - Preconditions.checkNotNull(omBucketArgs); - // TODO: Need to add metrics and Audit log for HA requests - if(isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.CREATE, - omBucketArgs.getVolumeName(), omBucketArgs.getBucketName(), null); - } - return bucketManager.setBucketProperty(omBucketArgs); - } - - - @Override - public void applySetBucketProperty(OmBucketInfo omBucketInfo) - throws IOException { - // TODO: Need to add metrics and Audit log for HA requests - bucketManager.applySetBucketProperty(omBucketInfo); - } - /** * Checks if current caller has acl permissions. * @@ -3135,4 +3074,8 @@ public String getComponent() { public OMFailoverProxyProvider getOMFailoverProxyProvider() { return null; } + + public OMMetrics getOmMetrics() { + return metrics; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManagerImpl.java index 87fc8cd99b040..c234266f887e2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManagerImpl.java @@ -137,13 +137,7 @@ public void deleteS3Bucket(String bucketName) throws IOException { OMException.ResultCodes.S3_BUCKET_NOT_FOUND); } - if (isRatisEnabled) { - bucketManager.deleteBucket(getOzoneVolumeName(bucketName), bucketName); - bucketManager.applyDeleteBucket(getOzoneVolumeName(bucketName), - bucketName); - } else { - bucketManager.deleteBucket(getOzoneVolumeName(bucketName), bucketName); - } + bucketManager.deleteBucket(getOzoneVolumeName(bucketName), bucketName); omMetadataManager.getS3Table().delete(bucketName); } catch(IOException ex) { throw ex; @@ -202,11 +196,7 @@ private void createOzoneBucket(String volumeName, String bucketName) .setIsVersionEnabled(Boolean.FALSE) .setStorageType(StorageType.DEFAULT) .build(); - if (isRatisEnabled) { - bucketManager.applyCreateBucket(bucketManager.createBucket(bucketInfo)); - } else { - bucketManager.createBucket(bucketInfo); - } + bucketManager.createBucket(bucketInfo); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index a68b94a9813c1..4927b4c2e7bdb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -46,7 +46,7 @@ public class OzoneManagerDoubleBuffer { private static final Logger LOG = - LoggerFactory.getLogger(OzoneManagerDoubleBuffer.class.getName()); + LoggerFactory.getLogger(OzoneManagerDoubleBuffer.class); // Taken unbounded queue, if sync thread is taking too long time, we // might end up taking huge memory to add entries to the buffer. @@ -109,7 +109,14 @@ private void flushTransactions() { LOG.debug("Sync Iteration {} flushed transactions in this " + "iteration{}", flushIterations.get(), flushedTransactionsSize); + + long lastRatisTransactionIndex = + readyBuffer.stream().map(DoubleBufferEntry::getTrxLogIndex) + .max(Long::compareTo).get(); + readyBuffer.clear(); + // cleanup cache. + cleanupCache(lastRatisTransactionIndex); // TODO: update the last updated index in OzoneManagerStateMachine. } } catch (InterruptedException ex) { @@ -134,6 +141,14 @@ private void flushTransactions() { } } + private void cleanupCache(long lastRatisTransactionIndex) { + // As now only bucket transactions are handled only called cleanupCache + // on bucketTable. + // TODO: After supporting all write operations we need to call + // cleanupCache on the tables only when buffer has entries for that table. + omMetadataManager.getBucketTable().cleanupCache(lastRatisTransactionIndex); + } + /** * Stop OM DoubleBuffer flush thread. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index b16f9f23ca313..d78cc66e37aa2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -35,12 +35,20 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.scm.HddsServerUtil; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMNodeDetails; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerServerProtocol; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; import org.apache.ratis.RaftConfigKeys; import org.apache.ratis.client.RaftClientConfigKeys; import org.apache.ratis.conf.RaftProperties; @@ -51,10 +59,15 @@ import org.apache.ratis.protocol.ClientId; import org.apache.ratis.protocol.GroupInfoReply; import org.apache.ratis.protocol.GroupInfoRequest; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.NotLeaderException; +import org.apache.ratis.protocol.RaftClientReply; +import org.apache.ratis.protocol.RaftClientRequest; import org.apache.ratis.protocol.RaftGroup; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.protocol.StateMachineException; import org.apache.ratis.rpc.RpcType; import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.server.RaftServer; @@ -66,6 +79,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.om.exceptions.OMException.STATUS_CODE; + /** * Creates a Ratis server endpoint for OM. */ @@ -80,7 +95,7 @@ public final class OzoneManagerRatisServer { private final RaftGroup raftGroup; private final RaftPeerId raftPeerId; - private final OzoneManagerServerProtocol ozoneManager; + private final OzoneManager ozoneManager; private final OzoneManagerStateMachine omStateMachine; private final ClientId clientId = ClientId.randomId(); @@ -97,6 +112,102 @@ private static long nextCallId() { return CALL_ID_COUNTER.getAndIncrement() & Long.MAX_VALUE; } + /** + * Submit request to Ratis server. + * @param omRequest + * @return OMResponse - response returned to the client. + * @throws ServiceException + */ + public OMResponse submitRequest(OMRequest omRequest) throws ServiceException { + RaftClientRequest raftClientRequest = + createWriteRaftClientRequest(omRequest); + RaftClientReply raftClientReply; + try { + raftClientReply = server.submitClientRequestAsync(raftClientRequest) + .get(); + } catch (Exception ex) { + throw new ServiceException(ex.getMessage(), ex); + } + + return processReply(omRequest, raftClientReply); + } + + /** + * Create Write RaftClient request from OMRequest. + * @param omRequest + * @return RaftClientRequest - Raft Client request which is submitted to + * ratis server. + */ + private RaftClientRequest createWriteRaftClientRequest(OMRequest omRequest) { + return new RaftClientRequest(clientId, server.getId(), raftGroupId, + nextCallId(), + Message.valueOf(OMRatisHelper.convertRequestToByteString(omRequest)), + RaftClientRequest.writeRequestType(), null); + } + + /** + * Process the raftClientReply and return OMResponse. + * @param omRequest + * @param reply + * @return OMResponse - response which is returned to client. + * @throws ServiceException + */ + private OMResponse processReply(OMRequest omRequest, RaftClientReply reply) + throws ServiceException { + // NotLeader exception is thrown only when the raft server to which the + // request is submitted is not the leader. This can happen first time + // when client is submitting request to OM. + NotLeaderException notLeaderException = reply.getNotLeaderException(); + if (notLeaderException != null) { + throw new ServiceException(notLeaderException); + } + StateMachineException stateMachineException = + reply.getStateMachineException(); + if (stateMachineException != null) { + OMResponse.Builder omResponse = OMResponse.newBuilder(); + omResponse.setCmdType(omRequest.getCmdType()); + omResponse.setSuccess(false); + omResponse.setMessage(stateMachineException.getCause().getMessage()); + omResponse.setStatus(parseErrorStatus( + stateMachineException.getCause().getMessage())); + return omResponse.build(); + } + + try { + return OMRatisHelper.getOMResponseFromRaftClientReply(reply); + } catch (InvalidProtocolBufferException ex) { + if (ex.getMessage() != null) { + throw new ServiceException(ex.getMessage(), ex); + } else { + throw new ServiceException(ex); + } + } + + // TODO: Still need to handle RaftRetry failure exception and + // NotReplicated exception. + } + + /** + * Parse errorMessage received from the exception and convert to + * {@link OzoneManagerProtocolProtos.Status}. + * @param errorMessage + * @return OzoneManagerProtocolProtos.Status + */ + private OzoneManagerProtocolProtos.Status parseErrorStatus( + String errorMessage) { + if (errorMessage.contains(STATUS_CODE)) { + String errorCode = errorMessage.substring( + errorMessage.indexOf(STATUS_CODE) + STATUS_CODE.length()); + LOG.debug("Parsing error message for error code " + + errorCode); + return OzoneManagerProtocolProtos.Status.valueOf(errorCode.trim()); + } else { + return OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; + } + + } + + /** * Returns an OM Ratis server. * @param conf configuration @@ -108,7 +219,7 @@ private static long nextCallId() { * @throws IOException */ private OzoneManagerRatisServer(Configuration conf, - OzoneManagerServerProtocol om, + OzoneManager om, String raftGroupIdStr, RaftPeerId localRaftPeerId, InetSocketAddress addr, List raftPeers) throws IOException { @@ -157,7 +268,7 @@ public void run() { * Creates an instance of OzoneManagerRatisServer. */ public static OzoneManagerRatisServer newOMRatisServer( - Configuration ozoneConf, OzoneManagerServerProtocol omProtocol, + Configuration ozoneConf, OzoneManager omProtocol, OMNodeDetails omNodeDetails, List peerNodes) throws IOException { @@ -202,7 +313,7 @@ private OzoneManagerStateMachine getStateMachine() { return new OzoneManagerStateMachine(this); } - public OzoneManagerServerProtocol getOzoneManager() { + public OzoneManager getOzoneManager() { return ozoneManager; } @@ -219,6 +330,7 @@ public void start() throws IOException { public void stop() { try { server.close(); + omStateMachine.stop(); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 02b8b88e10800..7160b49adcd45 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -29,7 +29,6 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerServerProtocol; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .MultipartInfoApplyInitiateRequest; @@ -67,15 +66,19 @@ public class OzoneManagerStateMachine extends BaseStateMachine { private final SimpleStateMachineStorage storage = new SimpleStateMachineStorage(); private final OzoneManagerRatisServer omRatisServer; - private final OzoneManagerServerProtocol ozoneManager; + private final OzoneManager ozoneManager; private OzoneManagerHARequestHandler handler; private RaftGroupId raftGroupId; private long lastAppliedIndex = 0; + private final OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; public OzoneManagerStateMachine(OzoneManagerRatisServer ratisServer) { this.omRatisServer = ratisServer; this.ozoneManager = omRatisServer.getOzoneManager(); - this.handler = new OzoneManagerHARequestHandlerImpl(ozoneManager); + this.ozoneManagerDoubleBuffer = + new OzoneManagerDoubleBuffer(ozoneManager.getMetadataManager()); + this.handler = new OzoneManagerHARequestHandlerImpl(ozoneManager, + ozoneManagerDoubleBuffer); } /** @@ -192,9 +195,6 @@ private TransactionContext handleStartTransactionRequests( case CreateVolume: case SetVolumeProperty: case DeleteVolume: - case CreateBucket: - case SetBucketProperty: - case DeleteBucket: newOmRequest = handler.handleStartTransaction(omRequest); break; case AllocateBlock: @@ -403,7 +403,7 @@ private IOException constructExceptionForFailedRequest( * @throws ServiceException */ private Message runCommand(OMRequest request, long trxLogIndex) { - OMResponse response = handler.handleApplyTransaction(request); + OMResponse response = handler.handleApplyTransaction(request, trxLogIndex); lastAppliedIndex = trxLogIndex; return OMRatisHelper.convertResponseToMessage(response); } @@ -439,4 +439,9 @@ public void setRaftGroupId(RaftGroupId raftGroupId) { this.raftGroupId = raftGroupId; } + + public void stop() { + ozoneManagerDoubleBuffer.stop(); + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java new file mode 100644 index 0000000000000..696c015db20ed --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.ratis.utils; + +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest; +import org.apache.hadoop.ozone.om.request.bucket.OMBucketDeleteRequest; +import org.apache.hadoop.ozone.om.request.bucket.OMBucketSetPropertyRequest; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; + +import java.io.IOException; + +/** + * Utility class used by OzoneManager HA. + */ +public final class OzoneManagerRatisUtils { + + private OzoneManagerRatisUtils() { + } + /** + * Create OMClientRequest which enacpsulates the OMRequest. + * @param omRequest + * @return OMClientRequest + * @throws IOException + */ + public static OMClientRequest createClientRequest(OMRequest omRequest) + throws IOException { + Type cmdType = omRequest.getCmdType(); + switch (cmdType) { + case CreateBucket: + return new OMBucketCreateRequest(omRequest); + case DeleteBucket: + return new OMBucketDeleteRequest(omRequest); + case SetBucketProperty: + return new OMBucketSetPropertyRequest(omRequest); + default: + // TODO: will update once all request types are implemented. + return null; + } + } + + /** + * Convert exception result to {@link OzoneManagerProtocolProtos.Status}. + * @param exception + * @return OzoneManagerProtocolProtos.Status + */ + public static Status exceptionToResponseStatus(IOException exception) { + if (exception instanceof OMException) { + return Status.values()[((OMException) exception).getResult().ordinal()]; + } else { + return Status.INTERNAL_ERROR; + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/package-info.java new file mode 100644 index 0000000000000..94fd0c89565fb --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/package-info.java @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om.ratis.utils; + +/** + * Utility class used by OzoneManager HA. + */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java new file mode 100644 index 0000000000000..51f7c8de827a7 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request; + +import java.io.IOException; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; + +/** + * OMClientRequest provides methods which every write OM request should + * implement. + */ +public abstract class OMClientRequest { + + private final OMRequest omRequest; + + public OMClientRequest(OMRequest omRequest) { + Preconditions.checkNotNull(omRequest); + this.omRequest = omRequest; + } + /** + * Perform pre-execute steps on a OMRequest. + * + * Called from the RPC context, and generates a OMRequest object which has + * all the information that will be either persisted + * in RocksDB or returned to the caller once this operation + * is executed. + * + * @return OMRequest that will be serialized and handed off to Ratis for + * consensus. + */ + public abstract OMRequest preExecute(OzoneManager ozoneManager) + throws IOException; + + /** + * Validate the OMRequest and update the cache. + * This step should verify that the request can be executed, perform + * any authorization steps and update the in-memory cache. + + * This step does not persist the changes to the database. + * + * @return the response that will be returned to the client. + */ + public abstract OMClientResponse validateAndUpdateCache( + OzoneManager ozoneManager, long transactionLogIndex); + + @VisibleForTesting + public OMRequest getOmRequest() { + return omRequest; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java new file mode 100644 index 0000000000000..83dd9552e2f0c --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -0,0 +1,204 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.bucket; + +import java.io.IOException; + +import com.google.common.base.Optional; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.crypto.CipherSuite; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .BucketEncryptionInfoProto; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateBucketRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateBucketResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .BucketInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.protocolPB.OMPBHelper; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheValue; + +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CryptoProtocolVersionProto.ENCRYPTION_ZONES; + +/** + * Handles CreateBucket Request. + */ +public class OMBucketCreateRequest extends OMClientRequest { + private static final Logger LOG = + LoggerFactory.getLogger(OMBucketCreateRequest.class); + + public OMBucketCreateRequest(OMRequest omRequest) { + super(omRequest); + } + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + + // Get original request. + CreateBucketRequest createBucketRequest = + getOmRequest().getCreateBucketRequest(); + BucketInfo bucketInfo = createBucketRequest.getBucketInfo(); + + // Get KMS provider. + KeyProviderCryptoExtension kmsProvider = + ozoneManager.getKmsProvider(); + + // Create new Bucket request with new bucket info. + CreateBucketRequest.Builder newCreateBucketRequest = + createBucketRequest.toBuilder(); + + BucketInfo.Builder newBucketInfo = bucketInfo.toBuilder(); + + // Set creation time. + newBucketInfo.setCreationTime(Time.now()); + + if (bucketInfo.hasBeinfo()) { + newBucketInfo.setBeinfo(getBeinfo(kmsProvider, bucketInfo)); + } + + newCreateBucketRequest.setBucketInfo(newBucketInfo.build()); + return getOmRequest().toBuilder().setCreateBucketRequest( + newCreateBucketRequest.build()).build(); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long transactionLogIndex) { + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumBucketCreates(); + + OMMetadataManager metadataManager = ozoneManager.getMetadataManager(); + + BucketInfo bucketInfo = getBucketInfoFromRequest(); + + String volumeName = bucketInfo.getVolumeName(); + String bucketName = bucketInfo.getBucketName(); + + OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( + OzoneManagerProtocolProtos.Type.CreateBucket).setStatus( + OzoneManagerProtocolProtos.Status.OK); + OmBucketInfo omBucketInfo = null; + + + metadataManager.getLock().acquireVolumeLock(volumeName); + metadataManager.getLock().acquireBucketLock(volumeName, bucketName); + + try { + String volumeKey = metadataManager.getVolumeKey(volumeName); + String bucketKey = metadataManager.getBucketKey(volumeName, bucketName); + + //Check if the volume exists + if (metadataManager.getVolumeTable().get(volumeKey) == null) { + LOG.debug("volume: {} not found ", volumeName); + throw new OMException("Volume doesn't exist", + OMException.ResultCodes.VOLUME_NOT_FOUND); + } + //Check if bucket already exists + if (metadataManager.getBucketTable().get(bucketKey) != null) { + LOG.debug("bucket: {} already exists ", bucketName); + throw new OMException("Bucket already exist", + OMException.ResultCodes.BUCKET_ALREADY_EXISTS); + } + + omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo); + LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName); + omMetrics.incNumBuckets(); + + // Update table cache. + metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), + new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex)); + + // TODO: check acls. + } catch (IOException ex) { + omMetrics.incNumBucketCreateFails(); + LOG.error("Bucket creation failed for bucket:{} in volume:{}", + bucketName, volumeName, ex); + omResponse.setStatus( + OzoneManagerRatisUtils.exceptionToResponseStatus(ex)); + omResponse.setMessage(ex.getMessage()); + omResponse.setSuccess(false); + } finally { + metadataManager.getLock().releaseBucketLock(volumeName, bucketName); + metadataManager.getLock().releaseVolumeLock(volumeName); + } + omResponse.setCreateBucketResponse( + CreateBucketResponse.newBuilder().build()); + return new OMBucketCreateResponse(omBucketInfo, omResponse.build()); + } + + + private BucketInfo getBucketInfoFromRequest() { + CreateBucketRequest createBucketRequest = + getOmRequest().getCreateBucketRequest(); + return createBucketRequest.getBucketInfo(); + } + + private BucketEncryptionInfoProto getBeinfo( + KeyProviderCryptoExtension kmsProvider, BucketInfo bucketInfo) + throws IOException { + BucketEncryptionInfoProto bek = bucketInfo.getBeinfo(); + BucketEncryptionInfoProto.Builder bekb = null; + if (kmsProvider == null) { + throw new OMException("Invalid KMS provider, check configuration " + + CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH, + OMException.ResultCodes.INVALID_KMS_PROVIDER); + } + if (bek.getKeyName() == null) { + throw new OMException("Bucket encryption key needed.", OMException + .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); + } + // Talk to KMS to retrieve the bucket encryption key info. + KeyProvider.Metadata metadata = kmsProvider.getMetadata( + bek.getKeyName()); + if (metadata == null) { + throw new OMException("Bucket encryption key " + bek.getKeyName() + + " doesn't exist.", + OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); + } + // If the provider supports pool for EDEKs, this will fill in the pool + kmsProvider.warmUpEncryptedKeys(bek.getKeyName()); + bekb = BucketEncryptionInfoProto.newBuilder() + .setKeyName(bek.getKeyName()) + .setCryptoProtocolVersion(ENCRYPTION_ZONES) + .setSuite(OMPBHelper.convert( + CipherSuite.convert(metadata.getCipher()))); + return bekb.build(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java new file mode 100644 index 0000000000000..9f1ceb643ee99 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.bucket; + +import java.io.IOException; + +import com.google.common.base.Optional; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheValue; + +/** + * Handles DeleteBucket Request. + */ +public class OMBucketDeleteRequest extends OMClientRequest { + private static final Logger LOG = + LoggerFactory.getLogger(OMBucketDeleteRequest.class); + + public OMBucketDeleteRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + // For Delete Bucket there are no preExecute steps + return getOmRequest(); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long transactionLogIndex) { + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumBucketDeletes(); + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + + OMRequest omRequest = getOmRequest(); + String volumeName = omRequest.getDeleteBucketRequest().getVolumeName(); + String bucketName = omRequest.getDeleteBucketRequest().getBucketName(); + + // acquire lock + omMetadataManager.getLock().acquireBucketLock(volumeName, bucketName); + + // Generate end user response + OMResponse.Builder omResponse = OMResponse.newBuilder() + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCmdType(omRequest.getCmdType()); + + try { + // No need to check volume exists here, as bucket cannot be created + // with out volume creation. + //Check if bucket exists + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo omBucketInfo = + omMetadataManager.getBucketTable().get(bucketKey); + if (omBucketInfo == null) { + LOG.debug("bucket: {} not found ", bucketName); + throw new OMException("Bucket doesn't exist", + OMException.ResultCodes.BUCKET_NOT_FOUND); + } + //Check if bucket is empty + if (!omMetadataManager.isBucketEmpty(volumeName, bucketName)) { + LOG.debug("bucket: {} is not empty ", bucketName); + throw new OMException("Bucket is not empty", + OMException.ResultCodes.BUCKET_NOT_EMPTY); + } + omMetrics.decNumBuckets(); + + // Update table cache. + omMetadataManager.getBucketTable().addCacheEntry( + new CacheKey<>(bucketKey), + new CacheValue<>(Optional.absent(), transactionLogIndex)); + // TODO: check acls. + } catch (IOException ex) { + omMetrics.incNumBucketDeleteFails(); + LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName, + volumeName, ex); + omResponse.setSuccess(false).setMessage(ex.getMessage()) + .setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(ex)); + } finally { + omMetadataManager.getLock().releaseBucketLock(volumeName, bucketName); + } + return new OMBucketDeleteResponse(volumeName, bucketName, + omResponse.build()); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java new file mode 100644 index 0000000000000..6039867bf24fc --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -0,0 +1,189 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.bucket; + +import java.io.IOException; +import java.util.List; + +import com.google.common.base.Optional; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.response.bucket.OMBucketSetPropertyResponse; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .BucketArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; + +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheValue; + +/** + * Handle SetBucketProperty Request. + */ +public class OMBucketSetPropertyRequest extends OMClientRequest { + private static final Logger LOG = + LoggerFactory.getLogger(OMBucketSetPropertyRequest.class); + + public OMBucketSetPropertyRequest(OMRequest omRequest) { + super(omRequest); + } + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + return getOmRequest(); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long transactionLogIndex) { + + OMMetrics omMetrics = ozoneManager.getOmMetrics(); + + // This will never be null, on a real Ozone cluster. For tests this might + // be null. using mockito, to set omMetrics object, but still getting + // null. For now added this not null check. + //TODO: Removed not null check from here, once tests got fixed. + if (omMetrics != null) { + omMetrics.incNumBucketUpdates(); + } + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + + BucketArgs bucketArgs = + getOmRequest().getSetBucketPropertyRequest().getBucketArgs(); + OmBucketArgs omBucketArgs = OmBucketArgs.getFromProtobuf(bucketArgs); + + String volumeName = bucketArgs.getVolumeName(); + String bucketName = bucketArgs.getBucketName(); + + // acquire lock + omMetadataManager.getLock().acquireBucketLock(volumeName, bucketName); + + OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( + OzoneManagerProtocolProtos.Type.CreateBucket).setStatus( + OzoneManagerProtocolProtos.Status.OK); + OmBucketInfo omBucketInfo = null; + + try { + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo oldBucketInfo = + omMetadataManager.getBucketTable().get(bucketKey); + //Check if bucket exist + if (oldBucketInfo == null) { + LOG.debug("bucket: {} not found ", bucketName); + throw new OMException("Bucket doesn't exist", + OMException.ResultCodes.BUCKET_NOT_FOUND); + } + OmBucketInfo.Builder bucketInfoBuilder = OmBucketInfo.newBuilder(); + bucketInfoBuilder.setVolumeName(oldBucketInfo.getVolumeName()) + .setBucketName(oldBucketInfo.getBucketName()); + bucketInfoBuilder.addAllMetadata(KeyValueUtil + .getFromProtobuf(bucketArgs.getMetadataList())); + + //Check ACLs to update + if (omBucketArgs.getAddAcls() != null || + omBucketArgs.getRemoveAcls() != null) { + bucketInfoBuilder.setAcls(getUpdatedAclList(oldBucketInfo.getAcls(), + omBucketArgs.getRemoveAcls(), omBucketArgs.getAddAcls())); + LOG.debug("Updating ACLs for bucket: {} in volume: {}", + bucketName, volumeName); + } else { + bucketInfoBuilder.setAcls(oldBucketInfo.getAcls()); + } + + //Check StorageType to update + StorageType storageType = omBucketArgs.getStorageType(); + if (storageType != null) { + bucketInfoBuilder.setStorageType(storageType); + LOG.debug("Updating bucket storage type for bucket: {} in volume: {}", + bucketName, volumeName); + } else { + bucketInfoBuilder.setStorageType(oldBucketInfo.getStorageType()); + } + + //Check Versioning to update + Boolean versioning = omBucketArgs.getIsVersionEnabled(); + if (versioning != null) { + bucketInfoBuilder.setIsVersionEnabled(versioning); + LOG.debug("Updating bucket versioning for bucket: {} in volume: {}", + bucketName, volumeName); + } else { + bucketInfoBuilder + .setIsVersionEnabled(oldBucketInfo.getIsVersionEnabled()); + } + bucketInfoBuilder.setCreationTime(oldBucketInfo.getCreationTime()); + + omBucketInfo = bucketInfoBuilder.build(); + + // Update table cache. + omMetadataManager.getBucketTable().addCacheEntry( + new CacheKey<>(bucketKey), + new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex)); + + // TODO: check acls. + } catch (IOException ex) { + if (omMetrics != null) { + omMetrics.incNumBucketUpdateFails(); + } + LOG.error("Setting bucket property failed for bucket:{} in volume:{}", + bucketName, volumeName, ex); + omResponse.setSuccess(false).setMessage(ex.getMessage()) + .setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(ex)); + } finally { + omMetadataManager.getLock().releaseBucketLock(volumeName, bucketName); + } + return new OMBucketSetPropertyResponse(omBucketInfo, omResponse.build()); + } + + /** + * Updates the existing ACL list with remove and add ACLs that are passed. + * Remove is done before Add. + * + * @param existingAcls - old ACL list. + * @param removeAcls - ACLs to be removed. + * @param addAcls - ACLs to be added. + * @return updated ACL list. + */ + private List< OzoneAcl > getUpdatedAclList(List existingAcls, + List removeAcls, List addAcls) { + if (removeAcls != null && !removeAcls.isEmpty()) { + existingAcls.removeAll(removeAcls); + } + if (addAcls != null && !addAcls.isEmpty()) { + addAcls.stream().filter(acl -> !existingAcls.contains(acl)).forEach( + existingAcls::add); + } + return existingAcls; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java new file mode 100644 index 0000000000000..f0ca3b4d23aac --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/** + * Package contains classes related to bucket requests. + */ +package org.apache.hadoop.ozone.om.request.bucket; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java new file mode 100644 index 0000000000000..ee324cf7df9bd --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains classes for handling OMRequests. + */ \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java index 2603421a59a7f..dfde25e2698ee 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java @@ -18,16 +18,23 @@ package org.apache.hadoop.ozone.om.response; -import org.apache.commons.lang.NotImplementedException; +import java.io.IOException; + import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; import org.apache.hadoop.utils.db.BatchOperation; -import java.io.IOException; - /** * Interface for OM Responses, each OM response should implement this interface. */ -public interface OMClientResponse { +public abstract class OMClientResponse { + + private OMResponse omResponse; + + public OMClientResponse(OMResponse omResponse) { + this.omResponse = omResponse; + } /** * Implement logic to add the response to batch. @@ -35,10 +42,15 @@ public interface OMClientResponse { * @param batchOperation * @throws IOException */ - default void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - throw new NotImplementedException("Not implemented, Each OM Response " + - "should implement this method"); + public abstract void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException; + + /** + * Return OMResponse. + * @return OMResponse + */ + public OMResponse getOMResponse() { + return omResponse; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeCreateResponse.java index 857f03aa01abf..4f7c11586e60f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeCreateResponse.java @@ -22,19 +22,24 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .VolumeList; + import org.apache.hadoop.utils.db.BatchOperation; /** * Response for CreateBucket request. */ -public class OMVolumeCreateResponse implements OMClientResponse { +public class OMVolumeCreateResponse extends OMClientResponse { - private OzoneManagerProtocolProtos.VolumeList volumeList; + private VolumeList volumeList; private OmVolumeArgs omVolumeArgs; public OMVolumeCreateResponse(OmVolumeArgs omVolumeArgs, - OzoneManagerProtocolProtos.VolumeList volumeList) { + VolumeList volumeList, OMResponse omResponse) { + super(omResponse); this.omVolumeArgs = omVolumeArgs; this.volumeList = volumeList; } @@ -53,12 +58,13 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, volumeList); } - public OzoneManagerProtocolProtos.VolumeList getVolumeList() { + public VolumeList getVolumeList() { return volumeList; } public OmVolumeArgs getOmVolumeArgs() { return omVolumeArgs; } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeDeleteResponse.java index 02663cb8887fc..44963a36f48e8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeDeleteResponse.java @@ -21,19 +21,23 @@ import java.io.IOException; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .VolumeList; import org.apache.hadoop.utils.db.BatchOperation; /** * Response for CreateVolume request. */ -public class OMVolumeDeleteResponse implements OMClientResponse { +public class OMVolumeDeleteResponse extends OMClientResponse { private String volume; private String owner; - private OzoneManagerProtocolProtos.VolumeList updatedVolumeList; + private VolumeList updatedVolumeList; public OMVolumeDeleteResponse(String volume, String owner, - OzoneManagerProtocolProtos.VolumeList updatedVolumeList) { + VolumeList updatedVolumeList, OMResponse omResponse) { + super(omResponse); this.volume = volume; this.owner = owner; this.updatedVolumeList = updatedVolumeList; @@ -43,8 +47,7 @@ public OMVolumeDeleteResponse(String volume, String owner, public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { String dbUserKey = omMetadataManager.getUserKey(owner); - OzoneManagerProtocolProtos.VolumeList volumeList = - updatedVolumeList; + VolumeList volumeList = updatedVolumeList; if (updatedVolumeList.getVolumeNamesList().size() == 0) { omMetadataManager.getUserTable().deleteWithBatch(batchOperation, dbUserKey); @@ -55,5 +58,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getVolumeTable().deleteWithBatch(batchOperation, omMetadataManager.getVolumeKey(volume)); } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java similarity index 80% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketCreateResponse.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java index 7e222edb8ff74..bb079ee278f1e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java @@ -16,22 +16,27 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om.response; +package org.apache.hadoop.ozone.om.response.bucket; import java.io.IOException; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; import org.apache.hadoop.utils.db.BatchOperation; /** * Response for CreateBucket request. */ -public final class OMBucketCreateResponse implements OMClientResponse { +public final class OMBucketCreateResponse extends OMClientResponse { private final OmBucketInfo omBucketInfo; - public OMBucketCreateResponse(OmBucketInfo omBucketInfo) { + public OMBucketCreateResponse(OmBucketInfo omBucketInfo, + OMResponse omResponse) { + super(omResponse); this.omBucketInfo = omBucketInfo; } @@ -48,5 +53,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, public OmBucketInfo getOmBucketInfo() { return omBucketInfo; } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java similarity index 80% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketDeleteResponse.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java index fd3842db7e6ba..0477014514d91 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMBucketDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java @@ -16,23 +16,27 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om.response; +package org.apache.hadoop.ozone.om.response.bucket; import java.io.IOException; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.utils.db.BatchOperation; /** * Response for DeleteBucket request. */ -public final class OMBucketDeleteResponse implements OMClientResponse { +public final class OMBucketDeleteResponse extends OMClientResponse { private String volumeName; private String bucketName; public OMBucketDeleteResponse( - String volumeName, String bucketName) { + String volumeName, String bucketName, + OzoneManagerProtocolProtos.OMResponse omResponse) { + super(omResponse); this.volumeName = volumeName; this.bucketName = bucketName; } @@ -53,5 +57,6 @@ public String getVolumeName() { public String getBucketName() { return bucketName; } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java new file mode 100644 index 0000000000000..f95c46e752678 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.bucket; + +import java.io.IOException; + +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.utils.db.BatchOperation; + +/** + * Response for SetBucketProperty request. + */ +public class OMBucketSetPropertyResponse extends OMClientResponse { + private OmBucketInfo omBucketInfo; + + public OMBucketSetPropertyResponse(OmBucketInfo omBucketInfo, + OMResponse omResponse) { + super(omResponse); + this.omBucketInfo = omBucketInfo; + } + + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + String dbBucketKey = + omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), + omBucketInfo.getBucketName()); + omMetadataManager.getBucketTable().putWithBatch(batchOperation, dbBucketKey, + omBucketInfo); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java new file mode 100644 index 0000000000000..e70c1c33f7c9a --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/** + * Package contains classes related to bucket responses. + */ +package org.apache.hadoop.ozone.om.response.bucket; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandler.java index 1ccac3bedc34b..6a992050e266a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandler.java @@ -41,8 +41,10 @@ public interface OzoneManagerHARequestHandler extends RequestHandler { /** * Handle Apply Transaction Requests from OzoneManager StateMachine. * @param omRequest + * @param transactionLogIndex - ratis transaction log index * @return OMResponse */ - OMResponse handleApplyTransaction(OMRequest omRequest); + OMResponse handleApplyTransaction(OMRequest omRequest, + long transactionLogIndex); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java index 9dd27b83d183c..3a6d0df4ed150 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java @@ -19,25 +19,19 @@ import java.io.IOException; -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmDeleteVolumeResponse; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeOwnerChangeResponse; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerServerProtocol; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .CreateVolumeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .CreateVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .DeleteVolumeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -46,10 +40,6 @@ .OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetBucketPropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetBucketPropertyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .SetVolumePropertyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -70,8 +60,12 @@ public class OzoneManagerHARequestHandlerImpl extends OzoneManagerRequestHandler implements OzoneManagerHARequestHandler { - public OzoneManagerHARequestHandlerImpl(OzoneManagerServerProtocol om) { + private OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; + + public OzoneManagerHARequestHandlerImpl(OzoneManager om, + OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer) { super(om); + this.ozoneManagerDoubleBuffer = ozoneManagerDoubleBuffer; } @Override @@ -90,15 +84,6 @@ public OMRequest handleStartTransaction(OMRequest omRequest) case DeleteVolume: newOmRequest = handleDeleteVolumeStart(omRequest); break; - case CreateBucket: - newOmRequest = handleCreateBucketStart(omRequest); - break; - case SetBucketProperty: - newOmRequest = handleSetBucketPropertyStart(omRequest); - break; - case DeleteBucket: - newOmRequest = handleDeleteBucketRequestStart(omRequest); - break; default: throw new IOException("Unrecognized Command Type:" + cmdType); } @@ -107,7 +92,8 @@ public OMRequest handleStartTransaction(OMRequest omRequest) @Override - public OMResponse handleApplyTransaction(OMRequest omRequest) { + public OMResponse handleApplyTransaction(OMRequest omRequest, + long transactionLogIndex) { LOG.debug("Received OMRequest: {}, ", omRequest); Type cmdType = omRequest.getCmdType(); OMResponse.Builder responseBuilder = @@ -128,17 +114,26 @@ public OMResponse handleApplyTransaction(OMRequest omRequest) { handleDeleteVolumeApply(omRequest)); break; case CreateBucket: - responseBuilder.setCreateBucketResponse( - handleCreateBucketApply(omRequest)); - break; - case SetBucketProperty: - responseBuilder.setSetBucketPropertyResponse( - handleSetBucketPropertyApply(omRequest)); - break; case DeleteBucket: - responseBuilder.setDeleteBucketResponse( - handleDeleteBucketApply(omRequest)); - break; + case SetBucketProperty: + //TODO: We don't need to pass transactionID, this will be removed when + // complete write requests is changed to new model. And also we can + // return OMClientResponse, then adding to doubleBuffer can be taken + // care by stateMachine. And also integrate both HA and NON HA code + // paths. + OMClientRequest omClientRequest = + OzoneManagerRatisUtils.createClientRequest(omRequest); + OMClientResponse omClientResponse = + omClientRequest.validateAndUpdateCache(getOzoneManager(), + transactionLogIndex); + + // If any error we have got when validateAndUpdateCache, OMResponse + // Status is set with Error Code other than OK, in that case don't + // add this to double buffer. + if (omClientResponse.getOMResponse().getStatus() == Status.OK) { + ozoneManagerDoubleBuffer.add(omClientResponse, transactionLogIndex); + } + return omClientResponse.getOMResponse(); default: // As all request types are not changed so we need to call handle // here. @@ -160,7 +155,7 @@ private OMRequest handleCreateVolumeStart(OMRequest omRequest) throws IOException { VolumeInfo volumeInfo = omRequest.getCreateVolumeRequest().getVolumeInfo(); OzoneManagerProtocolProtos.VolumeList volumeList = - getOzoneManagerServerProtocol().startCreateVolume( + getOzoneManager().startCreateVolume( OmVolumeArgs.getFromProtobuf(volumeInfo)); CreateVolumeRequest createVolumeRequest = @@ -176,7 +171,7 @@ private CreateVolumeResponse handleCreateVolumeApply(OMRequest omRequest) omRequest.getCreateVolumeRequest().getVolumeInfo(); VolumeList volumeList = omRequest.getCreateVolumeRequest().getVolumeList(); - getOzoneManagerServerProtocol().applyCreateVolume( + getOzoneManager().applyCreateVolume( OmVolumeArgs.getFromProtobuf(volumeInfo), volumeList); return CreateVolumeResponse.newBuilder().build(); @@ -191,7 +186,7 @@ private OMRequest handleSetVolumePropertyStart(OMRequest omRequest) if (setVolumePropertyRequest.hasQuotaInBytes()) { long quota = setVolumePropertyRequest.getQuotaInBytes(); OmVolumeArgs omVolumeArgs = - getOzoneManagerServerProtocol().startSetQuota(volume, quota); + getOzoneManager().startSetQuota(volume, quota); SetVolumePropertyRequest newSetVolumePropertyRequest = SetVolumePropertyRequest.newBuilder().setVolumeName(volume) .setVolumeInfo(omVolumeArgs.getProtobuf()).build(); @@ -201,7 +196,7 @@ private OMRequest handleSetVolumePropertyStart(OMRequest omRequest) } else { String owner = setVolumePropertyRequest.getOwnerName(); OmVolumeOwnerChangeResponse omVolumeOwnerChangeResponse = - getOzoneManagerServerProtocol().startSetOwner(volume, owner); + getOzoneManager().startSetOwner(volume, owner); // If volumeLists become large and as ratis writes the request to disk we // might take more space if the lists become very big in size. We might // need to revisit this if it becomes problem @@ -230,11 +225,11 @@ private SetVolumePropertyResponse handleSetVolumePropertyApply( omRequest.getSetVolumePropertyRequest(); if (setVolumePropertyRequest.hasQuotaInBytes()) { - getOzoneManagerServerProtocol().applySetQuota( + getOzoneManager().applySetQuota( OmVolumeArgs.getFromProtobuf( setVolumePropertyRequest.getVolumeInfo())); } else { - getOzoneManagerServerProtocol().applySetOwner( + getOzoneManager().applySetOwner( setVolumePropertyRequest.getOriginalOwner(), setVolumePropertyRequest.getOldOwnerVolumeList(), setVolumePropertyRequest.getNewOwnerVolumeList(), @@ -252,7 +247,7 @@ private OMRequest handleDeleteVolumeStart(OMRequest omRequest) String volume = deleteVolumeRequest.getVolumeName(); OmDeleteVolumeResponse omDeleteVolumeResponse = - getOzoneManagerServerProtocol().startDeleteVolume(volume); + getOzoneManager().startDeleteVolume(volume); DeleteVolumeRequest newDeleteVolumeRequest = DeleteVolumeRequest.newBuilder().setVolumeList( @@ -272,97 +267,10 @@ private DeleteVolumeResponse handleDeleteVolumeApply(OMRequest omRequest) DeleteVolumeRequest deleteVolumeRequest = omRequest.getDeleteVolumeRequest(); - getOzoneManagerServerProtocol().applyDeleteVolume( + getOzoneManager().applyDeleteVolume( deleteVolumeRequest.getVolumeName(), deleteVolumeRequest.getOwner(), deleteVolumeRequest.getVolumeList()); return DeleteVolumeResponse.newBuilder().build(); } - - private OMRequest handleCreateBucketStart(OMRequest omRequest) - throws IOException { - - CreateBucketRequest createBucketRequest = - omRequest.getCreateBucketRequest(); - - OmBucketInfo omBucketInfo = - getOzoneManagerServerProtocol().startCreateBucket( - OmBucketInfo.getFromProtobuf(createBucketRequest.getBucketInfo())); - - CreateBucketRequest newCreateBucketRequest = - CreateBucketRequest.newBuilder().setBucketInfo( - omBucketInfo.getProtobuf()).build(); - return omRequest.toBuilder().setCreateBucketRequest(newCreateBucketRequest) - .build(); - - } - - - private CreateBucketResponse handleCreateBucketApply(OMRequest omRequest) - throws IOException { - CreateBucketRequest createBucketRequest = - omRequest.getCreateBucketRequest(); - - getOzoneManagerServerProtocol().applyCreateBucket( - OmBucketInfo.getFromProtobuf(createBucketRequest.getBucketInfo())); - - return CreateBucketResponse.newBuilder().build(); - } - - - private OMRequest handleDeleteBucketRequestStart(OMRequest omRequest) - throws IOException { - - DeleteBucketRequest deleteBucketRequest = - omRequest.getDeleteBucketRequest(); - getOzoneManagerServerProtocol().startDeleteBucket( - deleteBucketRequest.getVolumeName(), - deleteBucketRequest.getBucketName()); - - return omRequest; - } - - private DeleteBucketResponse handleDeleteBucketApply(OMRequest omRequest) - throws IOException { - - DeleteBucketRequest deleteBucketRequest = - omRequest.getDeleteBucketRequest(); - - getOzoneManagerServerProtocol().applyDeleteBucket( - deleteBucketRequest.getVolumeName(), - deleteBucketRequest.getBucketName()); - - return DeleteBucketResponse.newBuilder().build(); - } - - private OMRequest handleSetBucketPropertyStart( - OMRequest omRequest) throws IOException { - SetBucketPropertyRequest setBucketPropertyRequest = - omRequest.getSetBucketPropertyRequest(); - - OmBucketInfo omBucketInfo = - getOzoneManagerServerProtocol().startSetBucketProperty( - OmBucketArgs.getFromProtobuf(setBucketPropertyRequest.getBucketArgs())); - - SetBucketPropertyRequest newSetBucketPropertyRequest = - SetBucketPropertyRequest.newBuilder() - .setBucketInfo(omBucketInfo.getProtobuf()).build(); - - return omRequest.toBuilder().setSetBucketPropertyRequest( - newSetBucketPropertyRequest).build(); - } - - private SetBucketPropertyResponse handleSetBucketPropertyApply( - OMRequest omRequest) throws IOException { - SetBucketPropertyRequest setBucketPropertyRequest = - omRequest.getSetBucketPropertyRequest(); - - getOzoneManagerServerProtocol().applySetBucketProperty( - OmBucketInfo.getFromProtobuf(setBucketPropertyRequest.getBucketInfo())); - - return SetBucketPropertyResponse.newBuilder().build(); - } - - - } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index 72b4d12e73018..24d61d51bda39 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -16,13 +16,16 @@ */ package org.apache.hadoop.ozone.protocolPB; + import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.NotLeaderException; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerServerProtocol; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisClient; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -33,6 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; import java.util.Optional; /** @@ -45,9 +49,9 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements private static final Logger LOG = LoggerFactory .getLogger(OzoneManagerProtocolServerSideTranslatorPB.class); private final OzoneManagerRatisServer omRatisServer; - private final OzoneManagerRatisClient omRatisClient; private final RequestHandler handler; private final boolean isRatisEnabled; + private final OzoneManager ozoneManager; /** * Constructs an instance of the server handler. @@ -55,12 +59,13 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements * @param impl OzoneManagerProtocolPB */ public OzoneManagerProtocolServerSideTranslatorPB( - OzoneManagerServerProtocol impl, OzoneManagerRatisServer ratisServer, - OzoneManagerRatisClient ratisClient, boolean enableRatis) { + OzoneManager impl, OzoneManagerRatisServer ratisServer, + boolean enableRatis) { + this.ozoneManager = impl; handler = new OzoneManagerRequestHandler(impl); this.omRatisServer = ratisServer; - this.omRatisClient = ratisClient; this.isRatisEnabled = enableRatis; + } /** @@ -80,6 +85,17 @@ public OMResponse submitRequest(RpcController controller, if (OmUtils.isReadOnly(request)) { return submitReadRequestToOM(request); } else { + // PreExecute if needed. + try { + OMClientRequest omClientRequest = + OzoneManagerRatisUtils.createClientRequest(request); + if (omClientRequest != null) { + request = omClientRequest.preExecute(ozoneManager); + } + } catch (IOException ex) { + // As some of the preExecute returns error. So handle here. + return createErrorResponse(request, ex); + } return submitRequestToRatis(request); } } else { @@ -89,12 +105,46 @@ public OMResponse submitRequest(RpcController controller, scope.close(); } } + + /** + * Create OMResponse from the specified OMRequest and exception. + * @param omRequest + * @param exception + * @return OMResponse + */ + private OMResponse createErrorResponse( + OMRequest omRequest, IOException exception) { + OzoneManagerProtocolProtos.Type cmdType = omRequest.getCmdType(); + switch (cmdType) { + case CreateBucket: + OMResponse.Builder omResponse = OMResponse.newBuilder() + .setStatus( + OzoneManagerRatisUtils.exceptionToResponseStatus(exception)) + .setCmdType(cmdType) + .setSuccess(false); + if (exception.getMessage() != null) { + omResponse.setMessage(exception.getMessage()); + } + return omResponse.build(); + case DeleteBucket: + case SetBucketProperty: + // In these cases, we can return null. As this method is called when + // some error occurred in preExecute. For these request types + // preExecute is do nothing. + return null; + default: + // We shall never come here. + return null; + } + } /** * Submits request to OM's Ratis server. */ private OMResponse submitRequestToRatis(OMRequest request) throws ServiceException { - return omRatisClient.sendCommand(request); + //TODO: Need to remove OzoneManagerRatisClient, as now we are using + // RatisServer Api's. + return omRatisServer.submitRequest(request); } private OMResponse submitReadRequestToOM(OMRequest request) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 808ac9a3c5d28..6942456bd95ce 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.io.Text; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -43,7 +44,6 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerServerProtocol; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse; @@ -138,9 +138,9 @@ public class OzoneManagerRequestHandler implements RequestHandler { static final Logger LOG = LoggerFactory.getLogger(OzoneManagerRequestHandler.class); - private final OzoneManagerServerProtocol impl; + private final OzoneManager impl; - public OzoneManagerRequestHandler(OzoneManagerServerProtocol om) { + public OzoneManagerRequestHandler(OzoneManager om) { this.impl = om; } @@ -1089,10 +1089,6 @@ private LookupFileResponse lookupFile( .build(); } - protected OzoneManagerServerProtocol getOzoneManagerServerProtocol() { - return impl; - } - private ListStatusResponse listStatus( ListStatusRequest request) throws IOException { KeyArgs keyArgs = request.getKeyArgs(); @@ -1112,4 +1108,8 @@ private ListStatusResponse listStatus( } return listStatusResponseBuilder.build(); } + + protected OzoneManager getOzoneManager() { + return impl; + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index a0162e8aa81eb..c616a28d4335a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -22,6 +22,11 @@ import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateBucketResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -102,17 +107,24 @@ private OMDummyCreateBucketResponse createDummyBucketResponse( OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder().setVolumeName(volumeName) .setBucketName(bucketName).setCreationTime(Time.now()).build(); - return new OMDummyCreateBucketResponse(omBucketInfo); + return new OMDummyCreateBucketResponse(omBucketInfo, + OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCreateBucketResponse(CreateBucketResponse.newBuilder().build()) + .build()); } /** * DummyCreatedBucket Response class used in testing. */ - public static class OMDummyCreateBucketResponse implements OMClientResponse { + public static class OMDummyCreateBucketResponse extends OMClientResponse { private final OmBucketInfo omBucketInfo; - public OMDummyCreateBucketResponse(OmBucketInfo omBucketInfo) { + public OMDummyCreateBucketResponse(OmBucketInfo omBucketInfo, + OMResponse omResponse) { + super(omResponse); this.omBucketInfo = omBucketInfo; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index 1926b65629753..4ff5411684126 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -24,6 +24,7 @@ import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -37,9 +38,18 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.response.OMVolumeCreateResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList; -import org.apache.hadoop.ozone.om.response.OMBucketCreateResponse; -import org.apache.hadoop.ozone.om.response.OMBucketDeleteResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateBucketResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateVolumeResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .DeleteBucketResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .VolumeList; +import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; +import org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; @@ -376,7 +386,12 @@ private OMVolumeCreateResponse createVolume(String volumeName) { VolumeList volumeList = VolumeList.newBuilder() .addVolumeNames(volumeName).build(); - return new OMVolumeCreateResponse(omVolumeArgs, volumeList); + return new OMVolumeCreateResponse(omVolumeArgs, volumeList, + OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCreateVolumeResponse(CreateVolumeResponse.newBuilder().build()) + .build()); } /** @@ -390,7 +405,11 @@ private OMBucketCreateResponse createBucket(String volumeName, OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder().setVolumeName(volumeName) .setBucketName(bucketName).setCreationTime(Time.now()).build(); - return new OMBucketCreateResponse(omBucketInfo); + return new OMBucketCreateResponse(omBucketInfo, OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCreateBucketResponse(CreateBucketResponse.newBuilder().build()) + .build()); } /** @@ -401,7 +420,12 @@ private OMBucketCreateResponse createBucket(String volumeName, */ private OMBucketDeleteResponse deleteBucket(String volumeName, String bucketName) { - return new OMBucketDeleteResponse(volumeName, bucketName); + return new OMBucketDeleteResponse(volumeName, bucketName, + OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setDeleteBucketResponse(DeleteBucketResponse.newBuilder().build()) + .build()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java index b5baabad61bea..d5958030bca1a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java @@ -31,7 +31,10 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMNodeDetails; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -41,22 +44,32 @@ import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.mockito.Mockito; import org.slf4j.LoggerFactory; import static org.junit.Assert.assertFalse; +import static org.mockito.Mockito.when; /** * Test OM Ratis server. */ public class TestOzoneManagerRatisServer { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + private OzoneConfiguration conf; private OzoneManagerRatisServer omRatisServer; private OzoneManagerRatisClient omRatisClient; private String omID; private String clientId = UUID.randomUUID().toString(); private static final long LEADER_ELECTION_TIMEOUT = 500L; + private OMMetadataManager omMetadataManager; + private OzoneManager ozoneManager; @Before public void init() throws Exception { @@ -80,7 +93,13 @@ public void init() throws Exception { .setOMServiceId(OzoneConsts.OM_SERVICE_ID_DEFAULT) .build(); // Starts a single node Ratis server - omRatisServer = OzoneManagerRatisServer.newOMRatisServer(conf, null, + ozoneManager = Mockito.mock(OzoneManager.class); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + omRatisServer = OzoneManagerRatisServer.newOMRatisServer(conf, ozoneManager, omNodeDetails, Collections.emptyList()); omRatisServer.start(); omRatisClient = OzoneManagerRatisClient.newOzoneManagerRatisClient(omID, @@ -165,8 +184,8 @@ public void verifyRaftGroupIdGenerationWithCustomOmServiceId() throws .build(); // Starts a single node Ratis server OzoneManagerRatisServer newOmRatisServer = OzoneManagerRatisServer - .newOMRatisServer(newConf, null, - omNodeDetails, Collections.emptyList()); + .newOMRatisServer(newConf, ozoneManager, omNodeDetails, + Collections.emptyList()); newOmRatisServer.start(); OzoneManagerRatisClient newOmRatisClient = OzoneManagerRatisClient .newOzoneManagerRatisClient( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java index a40404b18ff4e..dc1f92d52b163 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java @@ -26,7 +26,10 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMNodeDetails; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -75,6 +78,8 @@ public class TestOzoneManagerStateMachine { private OzoneManagerHARequestHandler requestHandler; private RaftGroupId raftGroupId; private OzoneManagerStateMachine ozoneManagerStateMachine; + private OMMetadataManager omMetadataManager; + private OzoneManager ozoneManager; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @@ -97,8 +102,14 @@ public void setup() throws Exception { .setOMNodeId(omID) .setOMServiceId(OzoneConsts.OM_SERVICE_ID_DEFAULT) .build(); + ozoneManager = Mockito.mock(OzoneManager.class); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + temporaryFolder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); // Starts a single node Ratis server - omRatisServer = OzoneManagerRatisServer.newOMRatisServer(conf, null, + omRatisServer = OzoneManagerRatisServer.newOMRatisServer(conf, ozoneManager, omNodeDetails, Collections.emptyList()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java new file mode 100644 index 0000000000000..abdc23eea0263 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.request; + +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.util.Time; + +import java.util.UUID; + +/** + * Helper class to test OMClientRequest classes. + */ +public final class TestOMRequestUtils { + + private TestOMRequestUtils() { + //Do nothing + } + + /** + * Add's volume and bucket creation entries to OM DB. + * @param volumeName + * @param bucketName + * @param omMetadataManager + * @throws Exception + */ + public static void addVolumeAndBucketToDB(String volumeName, + String bucketName, OMMetadataManager omMetadataManager) throws Exception { + + addVolumeToDB(volumeName, omMetadataManager); + + OmBucketInfo omBucketInfo = + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName).setCreationTime(Time.now()).build(); + + omMetadataManager.getBucketTable().put( + omMetadataManager.getBucketKey(volumeName, bucketName), omBucketInfo); + } + + /** + * Add's volume creation entry to OM DB. + * @param volumeName + * @param omMetadataManager + * @throws Exception + */ + public static void addVolumeToDB(String volumeName, + OMMetadataManager omMetadataManager) throws Exception { + OmVolumeArgs omVolumeArgs = + OmVolumeArgs.newBuilder().setCreationTime(Time.now()) + .setVolume(volumeName).setAdminName(UUID.randomUUID().toString()) + .setOwnerName(UUID.randomUUID().toString()).build(); + omMetadataManager.getVolumeTable().put( + omMetadataManager.getVolumeKey(volumeName), omVolumeArgs); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java new file mode 100644 index 0000000000000..48e4c34e5cf3a --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java @@ -0,0 +1,270 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.request.bucket; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.mockito.Mockito; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .StorageTypeProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.util.Time; + +import static org.mockito.Mockito.when; + +/** + * Tests OMBucketCreateRequest class, which handles CreateBucket request. + */ +public class TestOMBucketCreateRequest { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OMMetadataManager omMetadataManager; + + + @Before + public void setup() throws Exception { + ozoneManager = Mockito.mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + } + + @After + public void stop() { + omMetrics.unRegister(); + } + + + @Test + public void testPreExecute() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + doPreExecute(volumeName, bucketName); + } + + + @Test + public void testValidateAndUpdateCache() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + OMBucketCreateRequest omBucketCreateRequest = doPreExecute(volumeName, + bucketName); + + doValidateAndUpdateCache(volumeName, bucketName, + omBucketCreateRequest.getOmRequest()); + + } + + @Test + public void testValidateAndUpdateCacheWithNoVolume() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + OMRequest originalRequest = createBucketRequest(bucketName, volumeName, + false, StorageTypeProto.SSD); + + OMBucketCreateRequest omBucketCreateRequest = + new OMBucketCreateRequest(originalRequest); + + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + + // As we have not still called validateAndUpdateCache, get() should + // return null. + + Assert.assertNull(omMetadataManager.getBucketTable().get(bucketKey)); + + OMClientResponse omClientResponse = + omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1); + + OMResponse omResponse = omClientResponse.getOMResponse(); + Assert.assertNotNull(omResponse.getCreateBucketResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, + omResponse.getStatus()); + + // As request is invalid bucket table should not have entry. + Assert.assertNull(omMetadataManager.getBucketTable().get(bucketKey)); + } + + + @Test + public void testValidateAndUpdateCacheWithBucketAlreadyExists() + throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + OMBucketCreateRequest omBucketCreateRequest = + doPreExecute(volumeName, bucketName); + + doValidateAndUpdateCache(volumeName, bucketName, + omBucketCreateRequest.getOmRequest()); + + // Try create same bucket again + OMClientResponse omClientResponse = + omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 2); + + OMResponse omResponse = omClientResponse.getOMResponse(); + Assert.assertNotNull(omResponse.getCreateBucketResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_ALREADY_EXISTS, + omResponse.getStatus()); + } + + + private OMBucketCreateRequest doPreExecute(String volumeName, + String bucketName) throws Exception { + addCreateVolumeToTable(volumeName, omMetadataManager); + OMRequest originalRequest = createBucketRequest(bucketName, volumeName, + false, StorageTypeProto.SSD); + + OMBucketCreateRequest omBucketCreateRequest = + new OMBucketCreateRequest(originalRequest); + + OMRequest modifiedRequest = omBucketCreateRequest.preExecute(ozoneManager); + verifyRequest(modifiedRequest, originalRequest); + return new OMBucketCreateRequest(modifiedRequest); + } + + private void doValidateAndUpdateCache(String volumeName, String bucketName, + OMRequest modifiedRequest) throws Exception { + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + + // As we have not still called validateAndUpdateCache, get() should + // return null. + + Assert.assertNull(omMetadataManager.getBucketTable().get(bucketKey)); + OMBucketCreateRequest omBucketCreateRequest = + new OMBucketCreateRequest(modifiedRequest); + + + OMClientResponse omClientResponse = + omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1); + + // As now after validateAndUpdateCache it should add entry to cache, get + // should return non null value. + OmBucketInfo omBucketInfo = + omMetadataManager.getBucketTable().get(bucketKey); + Assert.assertNotNull(omMetadataManager.getBucketTable().get(bucketKey)); + + // verify table data with actual request data. + Assert.assertEquals(OmBucketInfo.getFromProtobuf( + modifiedRequest.getCreateBucketRequest().getBucketInfo()), + omBucketInfo); + + // verify OMResponse. + verifySuccessCreateBucketResponse(omClientResponse.getOMResponse()); + + } + + + private void verifyRequest(OMRequest modifiedOmRequest, + OMRequest originalRequest) { + OzoneManagerProtocolProtos.BucketInfo original = + originalRequest.getCreateBucketRequest().getBucketInfo(); + OzoneManagerProtocolProtos.BucketInfo updated = + modifiedOmRequest.getCreateBucketRequest().getBucketInfo(); + + Assert.assertEquals(original.getBucketName(), updated.getBucketName()); + Assert.assertEquals(original.getVolumeName(), updated.getVolumeName()); + Assert.assertEquals(original.getIsVersionEnabled(), + updated.getIsVersionEnabled()); + Assert.assertEquals(original.getStorageType(), updated.getStorageType()); + Assert.assertEquals(original.getMetadataList(), updated.getMetadataList()); + Assert.assertNotEquals(original.getCreationTime(), + updated.getCreationTime()); + } + + public static void verifySuccessCreateBucketResponse(OMResponse omResponse) { + Assert.assertNotNull(omResponse.getCreateBucketResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Type.CreateBucket, + omResponse.getCmdType()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omResponse.getStatus()); + } + + public static void addCreateVolumeToTable(String volumeName, + OMMetadataManager omMetadataManager) throws Exception { + OmVolumeArgs omVolumeArgs = + OmVolumeArgs.newBuilder().setCreationTime(Time.now()) + .setVolume(volumeName).setAdminName(UUID.randomUUID().toString()) + .setOwnerName(UUID.randomUUID().toString()).build(); + omMetadataManager.getVolumeTable().put( + omMetadataManager.getVolumeKey(volumeName), omVolumeArgs); + } + + + public static OMRequest createBucketRequest(String bucketName, + String volumeName, boolean isVersionEnabled, + StorageTypeProto storageTypeProto) { + OzoneManagerProtocolProtos.BucketInfo bucketInfo = + OzoneManagerProtocolProtos.BucketInfo.newBuilder() + .setBucketName(bucketName) + .setVolumeName(volumeName) + .setIsVersionEnabled(isVersionEnabled) + .setStorageType(storageTypeProto) + .addAllMetadata(getMetadataList()).build(); + OzoneManagerProtocolProtos.CreateBucketRequest.Builder req = + OzoneManagerProtocolProtos.CreateBucketRequest.newBuilder(); + req.setBucketInfo(bucketInfo); + return OMRequest.newBuilder().setCreateBucketRequest(req) + .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket) + .setClientId(UUID.randomUUID().toString()).build(); + } + + public static List< HddsProtos.KeyValue> getMetadataList() { + List metadataList = new ArrayList<>(); + metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue( + "value1").build()); + metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue( + "value2").build()); + return metadataList; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java new file mode 100644 index 0000000000000..a16e019fd0b7d --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.request.bucket; + +import java.util.UUID; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.mockito.Mockito; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .DeleteBucketRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; + +import static org.mockito.Mockito.when; + +/** + * Tests OMBucketDeleteRequest class which handles DeleteBucket request. + */ +public class TestOMBucketDeleteRequest { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OMMetadataManager omMetadataManager; + + + @Before + public void setup() throws Exception { + ozoneManager = Mockito.mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + } + + @After + public void stop() { + omMetrics.unRegister(); + } + + @Test + public void testPreExecute() throws Exception { + OMRequest omRequest = + createDeleteBucketRequest(UUID.randomUUID().toString(), + UUID.randomUUID().toString()); + + OMBucketDeleteRequest omBucketDeleteRequest = + new OMBucketDeleteRequest(omRequest); + + // As preExecute of DeleteBucket request is do nothing, requests should + // be same. + Assert.assertEquals(omRequest, + omBucketDeleteRequest.preExecute(ozoneManager)); + } + + + @Test + public void testValidateAndUpdateCache() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + OMRequest omRequest = + createDeleteBucketRequest(volumeName, bucketName); + + OMBucketDeleteRequest omBucketDeleteRequest = + new OMBucketDeleteRequest(omRequest); + + // Create Volume and bucket entries in DB. + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1); + + Assert.assertNull(omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(volumeName, bucketName))); + } + + + @Test + public void testValidateAndUpdateCacheFailure() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + OMRequest omRequest = + createDeleteBucketRequest(volumeName, bucketName); + + OMBucketDeleteRequest omBucketDeleteRequest = + new OMBucketDeleteRequest(omRequest); + + + OMClientResponse omClientResponse = + omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1); + + Assert.assertNull(omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(volumeName, bucketName))); + + Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + } + + + + + private OMRequest createDeleteBucketRequest(String volumeName, + String bucketName) { + return OMRequest.newBuilder().setDeleteBucketRequest( + DeleteBucketRequest.newBuilder() + .setBucketName(bucketName).setVolumeName(volumeName)) + .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket) + .setClientId(UUID.randomUUID().toString()).build(); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java new file mode 100644 index 0000000000000..09292650872cd --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.request.bucket; + +import java.util.UUID; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.mockito.Mockito; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. + BucketArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .SetBucketPropertyRequest; + +import static org.mockito.Mockito.when; + +/** + * Tests OMBucketSetPropertyRequest class which handles OMSetBucketProperty + * request. + */ +public class TestOMBucketSetPropertyRequest { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OMMetadataManager omMetadataManager; + + + @Before + public void setup() throws Exception { + ozoneManager = Mockito.mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + } + + @After + public void stop() { + omMetrics.unRegister(); + } + + @Test + public void testPreExecute() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + + OMRequest omRequest = createSetBucketPropertyRequest(volumeName, + bucketName, true); + + OMBucketSetPropertyRequest omBucketSetPropertyRequest = + new OMBucketSetPropertyRequest(omRequest); + + Assert.assertEquals(omRequest, + omBucketSetPropertyRequest.preExecute(ozoneManager)); + } + + @Test + public void testValidateAndUpdateCache() throws Exception { + + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + + OMRequest omRequest = createSetBucketPropertyRequest(volumeName, + bucketName, true); + + // Create with default BucketInfo values + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + OMBucketSetPropertyRequest omBucketSetPropertyRequest = + new OMBucketSetPropertyRequest(omRequest); + + OMClientResponse omClientResponse = + omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1); + + Assert.assertEquals(true, + omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(volumeName, bucketName)) + .getIsVersionEnabled()); + + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); + + } + + @Test + public void testValidateAndUpdateCacheFails() throws Exception { + + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + + OMRequest omRequest = createSetBucketPropertyRequest(volumeName, + bucketName, true); + + + OMBucketSetPropertyRequest omBucketSetPropertyRequest = + new OMBucketSetPropertyRequest(omRequest); + + OMClientResponse omClientResponse = + omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1); + + Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + + Assert.assertNull(omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(volumeName, bucketName))); + + } + + private OMRequest createSetBucketPropertyRequest(String volumeName, + String bucketName, boolean isVersionEnabled) { + return OMRequest.newBuilder().setSetBucketPropertyRequest( + SetBucketPropertyRequest.newBuilder().setBucketArgs( + BucketArgs.newBuilder().setBucketName(bucketName) + .setVolumeName(volumeName) + .setIsVersionEnabled(isVersionEnabled).build())) + .setCmdType(OzoneManagerProtocolProtos.Type.SetBucketProperty) + .setClientId(UUID.randomUUID().toString()).build(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java new file mode 100644 index 0000000000000..b89c65198d7da --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/** + * Package contains test classes for bucket requests. + */ +package org.apache.hadoop.ozone.om.request.bucket; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java new file mode 100644 index 0000000000000..c7752a3a8ec81 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.response; + +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.util.Time; + +/** + * Helper class to test OMClientResponse classes. + */ +public final class TestOMResponseUtils { + + // No one can instantiate, this is just utility class with all static methods. + private TestOMResponseUtils() { + } + + public static OmBucketInfo createBucket(String volume, String bucket) { + return OmBucketInfo.newBuilder().setVolumeName(volume).setBucketName(bucket) + .setCreationTime(Time.now()).setIsVersionEnabled(true).addMetadata( + "key1", "value1").build(); + + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketCreateResponse.java new file mode 100644 index 0000000000000..37204b9e2a569 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketCreateResponse.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.response.bucket; + +import java.util.UUID; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.response.TestOMResponseUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateBucketResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.utils.db.BatchOperation; + +/** + * This class tests OMBucketCreateResponse. + */ +public class TestOMBucketCreateResponse { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OMMetadataManager omMetadataManager; + private BatchOperation batchOperation; + + @Before + public void setup() throws Exception { + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + } + + @Test + public void testAddToDBBatch() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket( + volumeName, bucketName); + OMBucketCreateResponse omBucketCreateResponse = + new OMBucketCreateResponse(omBucketInfo, OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCreateBucketResponse( + CreateBucketResponse.newBuilder().build()).build()); + + omBucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + Assert.assertEquals(omBucketInfo, + omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(volumeName, bucketName))); + } + + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketDeleteResponse.java new file mode 100644 index 0000000000000..286456c41d9a7 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketDeleteResponse.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.response.bucket; + +import java.util.UUID; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.response.TestOMResponseUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateBucketResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .DeleteBucketResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.utils.db.BatchOperation; + +/** + * This class tests OMBucketDeleteResponse. + */ +public class TestOMBucketDeleteResponse { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OMMetadataManager omMetadataManager; + private BatchOperation batchOperation; + + + @Before + public void setup() throws Exception { + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + } + + @Test + public void testAddToDBBatch() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket( + volumeName, bucketName); + OMBucketCreateResponse omBucketCreateResponse = + new OMBucketCreateResponse(omBucketInfo, OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCreateBucketResponse( + CreateBucketResponse.newBuilder().build()).build()); + + OMBucketDeleteResponse omBucketDeleteResponse = + new OMBucketDeleteResponse(volumeName, bucketName, + OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setDeleteBucketResponse( + DeleteBucketResponse.getDefaultInstance()).build()); + + omBucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation); + omBucketDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + Assert.assertNull(omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(volumeName, bucketName))); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketSetPropertyResponse.java new file mode 100644 index 0000000000000..ffe704c96fd61 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketSetPropertyResponse.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.response.bucket; + +import java.util.UUID; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.response.TestOMResponseUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateBucketResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.utils.db.BatchOperation; + +/** + * This class tests OMBucketSetPropertyResponse. + */ +public class TestOMBucketSetPropertyResponse { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OMMetadataManager omMetadataManager; + private BatchOperation batchOperation; + + @Before + public void setup() throws Exception { + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + } + + @Test + public void testAddToDBBatch() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket( + volumeName, bucketName); + OMBucketSetPropertyResponse omBucketCreateResponse = + new OMBucketSetPropertyResponse(omBucketInfo, OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCreateBucketResponse( + CreateBucketResponse.newBuilder().build()).build()); + + omBucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + Assert.assertEquals(omBucketInfo, + omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(volumeName, bucketName))); + } + + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java new file mode 100644 index 0000000000000..0980106779c6e --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/** + * Package contains test classes for bucket responses. + */ +package org.apache.hadoop.ozone.om.response.bucket; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker b/hadoop-ozone/ozone-manager/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker new file mode 100644 index 0000000000000..3c9e1c8a6971a --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +mock-maker-inline \ No newline at end of file From 9122b9b6493d315c8910fbc06eaba96273ee335c Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Thu, 30 May 2019 17:27:52 -0500 Subject: [PATCH 0080/1308] HDFS-14521. Suppress setReplication logging. Contributed by Kihwal Lee --- .../org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index 6da59eda09f77..b0a36b92040ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -399,13 +399,13 @@ static BlockInfo[] unprotectedSetReplication( if (oldBR != -1) { if (oldBR > targetReplication) { - FSDirectory.LOG.info("Decreasing replication from {} to {} for {}", + FSDirectory.LOG.debug("Decreasing replication from {} to {} for {}", oldBR, targetReplication, iip.getPath()); } else if (oldBR < targetReplication) { - FSDirectory.LOG.info("Increasing replication from {} to {} for {}", + FSDirectory.LOG.debug("Increasing replication from {} to {} for {}", oldBR, targetReplication, iip.getPath()); } else { - FSDirectory.LOG.info("Replication remains unchanged at {} for {}", + FSDirectory.LOG.debug("Replication remains unchanged at {} for {}", oldBR, iip.getPath()); } } From 219e2867220f805fb69ff9c991113f92c3f4ed2f Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Thu, 30 May 2019 16:44:38 -0700 Subject: [PATCH 0081/1308] HDDS-1608. Support Ozone Prefix ACLs in OM metadata table. Contributed by Xiaoyu Yao. (#875) --- .../hadoop/ozone/om/OMMetadataManager.java | 7 + .../ozone/om/codec/OmPrefixInfoCodec.java | 53 ++++++ .../hadoop/ozone/om/helpers/OmPrefixInfo.java | 170 ++++++++++++++++++ .../src/main/proto/OzoneManagerProtocol.proto | 7 + .../ozone/om/codec/TestOmPrefixInfoCodec.java | 98 ++++++++++ .../server/ratis/TestCSMMetrics.java | 14 +- .../ozone/om/OmMetadataManagerImpl.java | 19 +- 7 files changed, 360 insertions(+), 8 deletions(-) create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java create mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 3149b86fc0dda..34d81cf50c7b9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -24,6 +24,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList; @@ -262,6 +263,12 @@ List listVolumes(String userName, String prefix, Table getS3Table(); + /** + * Gets the Ozone prefix path to its acl mapping table. + * @return Table. + */ + Table getPrefixTable(); + /** * Returns the DB key name of a multipart upload key in OM metadata store. * diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java new file mode 100644 index 0000000000000..5ddb468bfc6f1 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.codec; + +import com.google.common.base.Preconditions; +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrefixInfo; + +import org.apache.hadoop.utils.db.Codec; + +import java.io.IOException; + +/** + * Codec to encode PrefixAcl as byte array. + */ +public class OmPrefixInfoCodec implements Codec { + + @Override + public byte[] toPersistedFormat(OmPrefixInfo object) throws IOException { + Preconditions + .checkNotNull(object, "Null object can't be converted to byte array."); + return object.getProtobuf().toByteArray(); + } + + @Override + public OmPrefixInfo fromPersistedFormat(byte[] rawData) throws IOException { + Preconditions + .checkNotNull(rawData, + "Null byte array can't converted to real object."); + try { + return OmPrefixInfo.getFromProtobuf(PrefixInfo.parseFrom(rawData)); + } catch (InvalidProtocolBufferException e) { + throw new IllegalArgumentException( + "Can't encode the the raw data from the byte array", e); + } + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java new file mode 100644 index 0000000000000..ddb9865e4da14 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java @@ -0,0 +1,170 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.helpers; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrefixInfo; +import org.apache.hadoop.ozone.protocolPB.OMPBHelper; + +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * Wrapper class for Ozone prefix path info, currently mainly target for ACL but + * can be extended for other OzFS optimizations in future. + */ +// TODO: support Auditable interface +public final class OmPrefixInfo extends WithMetadata { + + private String name; + private List acls; + + public OmPrefixInfo(String name, List acls, + Map metadata) { + this.name = name; + this.acls = acls; + this.metadata = metadata; + } + + /** + * Returns the ACL's associated with this prefix. + * @return {@literal List} + */ + public List getAcls() { + return acls; + } + + /** + * Returns the name of the prefix path. + * @return name of the prefix path. + */ + public String getName() { + return name; + } + + /** + * Returns new builder class that builds a OmPrefixInfo. + * + * @return Builder + */ + public static OmPrefixInfo.Builder newBuilder() { + return new OmPrefixInfo.Builder(); + } + + /** + * Builder for OmPrefixInfo. + */ + public static class Builder { + private String name; + private List acls; + private Map metadata; + + public Builder() { + //Default values + this.acls = new LinkedList<>(); + this.metadata = new HashMap<>(); + } + + public Builder setAcls(List listOfAcls) { + this.acls = listOfAcls; + return this; + } + + public Builder setName(String n) { + this.name = n; + return this; + } + + public OmPrefixInfo.Builder addMetadata(String key, String value) { + metadata.put(key, value); + return this; + } + + public OmPrefixInfo.Builder addAllMetadata( + Map additionalMetadata) { + if (additionalMetadata != null) { + metadata.putAll(additionalMetadata); + } + return this; + } + + /** + * Constructs the OmPrefixInfo. + * @return instance of OmPrefixInfo. + */ + public OmPrefixInfo build() { + Preconditions.checkNotNull(name); + Preconditions.checkNotNull(acls); + return new OmPrefixInfo(name, acls, metadata); + } + } + + /** + * Creates PrefixInfo protobuf from OmPrefixInfo. + */ + public PrefixInfo getProtobuf() { + PrefixInfo.Builder pib = PrefixInfo.newBuilder().setName(name) + .addAllAcls(acls.stream().map(OMPBHelper::convertOzoneAcl) + .collect(Collectors.toList())) + .addAllMetadata(KeyValueUtil.toProtobuf(metadata)); + return pib.build(); + } + + /** + * Parses PrefixInfo protobuf and creates OmPrefixInfo. + * @param prefixInfo + * @return instance of OmPrefixInfo + */ + public static OmPrefixInfo getFromProtobuf(PrefixInfo prefixInfo) { + OmPrefixInfo.Builder opib = OmPrefixInfo.newBuilder() + .setName(prefixInfo.getName()) + .setAcls(prefixInfo.getAclsList().stream().map( + OMPBHelper::convertOzoneAcl).collect(Collectors.toList())); + if (prefixInfo.getMetadataList() != null) { + opib.addAllMetadata(KeyValueUtil + .getFromProtobuf(prefixInfo.getMetadataList())); + } + return opib.build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + OmPrefixInfo that = (OmPrefixInfo) o; + return name.equals(that.name) && + Objects.equals(acls, that.acls) && + Objects.equals(metadata, that.metadata); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } +} + diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index 6577de7368ee8..8e8d401a7f708 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -459,11 +459,18 @@ message BucketArgs { repeated hadoop.hdds.KeyValue metadata = 7; } +message PrefixInfo { + required string name = 1; + repeated OzoneAclInfo acls = 2; + repeated hadoop.hdds.KeyValue metadata = 3; +} + message OzoneObj { enum ObjectType { VOLUME = 1; BUCKET = 2; KEY = 3; + PREFIX = 4; } enum StoreType { diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java new file mode 100644 index 0000000000000..571a25f203a8e --- /dev/null +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.codec; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; + +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.LinkedList; +import java.util.List; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * This class test OmPrefixInfoCodec. + */ +public class TestOmPrefixInfoCodec { + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + + private OmPrefixInfoCodec codec; + + @Before + public void setUp() { + codec = new OmPrefixInfoCodec(); + } + + @Test + public void testCodecWithIncorrectValues() throws Exception { + try { + codec.fromPersistedFormat("random".getBytes(StandardCharsets.UTF_8)); + fail("testCodecWithIncorrectValues failed"); + } catch (IllegalArgumentException ex) { + GenericTestUtils.assertExceptionContains("Can't encode the the raw " + + "data from the byte array", ex); + } + } + + @Test + public void testCodecWithNullDataFromTable() throws Exception { + thrown.expect(NullPointerException.class); + codec.fromPersistedFormat(null); + } + + + @Test + public void testCodecWithNullDataFromUser() throws Exception { + thrown.expect(NullPointerException.class); + codec.toPersistedFormat(null); + } + + @Test + public void testToAndFromPersistedFormat() throws IOException { + + List acls = new LinkedList<>(); + OzoneAcl ozoneAcl = new OzoneAcl(ACLIdentityType.USER, + "hive", ACLType.ALL); + acls.add(ozoneAcl); + OmPrefixInfo opiSave = OmPrefixInfo.newBuilder() + .setName("/user/hive/warehouse") + .setAcls(acls) + .addMetadata("id", "100") + .build(); + + OmPrefixInfo opiLoad = codec.fromPersistedFormat( + codec.toPersistedFormat(opiSave)); + + assertTrue("Load saved prefix info should match", + opiLoad.equals(opiSave)); + } +} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index fa740eacc8e49..5d886c1caa011 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -58,13 +58,13 @@ import org.junit.Test; import org.junit.Assert; - /** - * This class tests the metrics of ContainerStateMachine. - */ - public class TestCSMMetrics { - static final String TEST_DIR = - GenericTestUtils.getTestDir("dfs").getAbsolutePath() - + File.separator; +/** + * This class tests the metrics of ContainerStateMachine. + */ +public class TestCSMMetrics { + static final String TEST_DIR = + GenericTestUtils.getTestDir("dfs").getAbsolutePath() + + File.separator; @FunctionalInterface interface CheckedBiFunction { OUT apply(LEFT left, RIGHT right) throws THROWABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 0720a10b2472b..fb7c3b8c2cda2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec; import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec; import org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec; +import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec; import org.apache.hadoop.ozone.om.codec.S3SecretValueCodec; import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec; import org.apache.hadoop.ozone.om.codec.VolumeListCodec; @@ -41,6 +42,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList; @@ -98,6 +100,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager { * |-------------------------------------------------------------------| * | dTokenTable | s3g_access_key_id -> s3Secret | * |-------------------------------------------------------------------| + * | prefixInfoTable | prefix -> PrefixInfo | + * |-------------------------------------------------------------------| */ public static final String USER_TABLE = "userTable"; @@ -110,6 +114,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager { public static final String MULTIPARTINFO_TABLE = "multipartInfoTable"; public static final String S3_SECRET_TABLE = "s3SecretTable"; public static final String DELEGATION_TOKEN_TABLE = "dTokenTable"; + public static final String PREFIX_TABLE = "prefixTable"; private DBStore store; @@ -126,6 +131,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager { private Table multipartInfoTable; private Table s3SecretTable; private Table dTokenTable; + private Table prefixTable; public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException { this.lock = new OzoneManagerLock(conf); @@ -183,6 +189,11 @@ public Table getS3Table() { return s3Table; } + @Override + public Table getPrefixTable() { + return prefixTable; + } + @Override public Table getMultipartInfoTable() { return multipartInfoTable; @@ -230,13 +241,15 @@ protected DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) { .addTable(MULTIPARTINFO_TABLE) .addTable(DELEGATION_TOKEN_TABLE) .addTable(S3_SECRET_TABLE) + .addTable(PREFIX_TABLE) .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec()) .addCodec(OmKeyInfo.class, new OmKeyInfoCodec()) .addCodec(OmBucketInfo.class, new OmBucketInfoCodec()) .addCodec(OmVolumeArgs.class, new OmVolumeArgsCodec()) .addCodec(VolumeList.class, new VolumeListCodec()) .addCodec(OmMultipartKeyInfo.class, new OmMultipartKeyInfoCodec()) - .addCodec(S3SecretValue.class, new S3SecretValueCodec()); + .addCodec(S3SecretValue.class, new S3SecretValueCodec()) + .addCodec(OmPrefixInfo.class, new OmPrefixInfoCodec()); } /** @@ -282,6 +295,10 @@ protected void initializeOmTables() throws IOException { s3SecretTable = this.store.getTable(S3_SECRET_TABLE, String.class, S3SecretValue.class); checkTableStatus(s3SecretTable, S3_SECRET_TABLE); + + prefixTable = this.store.getTable(PREFIX_TABLE, String.class, + OmPrefixInfo.class); + checkTableStatus(prefixTable, PREFIX_TABLE); } /** From 35f1014b3e10eee27f1976f4af9815a0c2d7dacd Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Fri, 31 May 2019 10:29:24 +0900 Subject: [PATCH 0082/1308] HDFS-13654. Use a random secret when a secret file doesn't exist in HttpFS. This should be default. --- .../hadoop-hdfs-httpfs/pom.xml | 1 - .../src/main/conf/httpfs-signature.secret | 1 - .../server/HttpFSAuthenticationFilter.java | 46 ++++++++++----- .../src/main/resources/httpfs-default.xml | 3 + ...HttpFSServerWebServerWithRandomSecret.java | 58 +++++++++++++++++++ 5 files changed, 91 insertions(+), 18 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-signature.secret create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServerWithRandomSecret.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index 50a24a9e6582e..69b2634d822ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -304,7 +304,6 @@ src/test/resources/classutils.txt - src/main/conf/httpfs-signature.secret diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-signature.secret b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-signature.secret deleted file mode 100644 index 56466e94deacd..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-signature.secret +++ /dev/null @@ -1 +0,0 @@ -hadoop httpfs secret diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java index 362b1b45a6e6e..7bdaa841e2647 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java @@ -21,6 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authentication.util.RandomSignerSecretProvider; +import org.apache.hadoop.security.authentication.util.SignerSecretProvider; import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter; import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler; @@ -37,8 +39,8 @@ import java.util.Properties; /** - * Subclass of hadoop-auth AuthenticationFilter that obtains its configuration - * from HttpFSServer's server configuration. + * Subclass of hadoop-auth AuthenticationFilter that obtains its + * configuration from HttpFSServer's server configuration. */ @InterfaceAudience.Private public class HttpFSAuthenticationFilter @@ -46,7 +48,8 @@ public class HttpFSAuthenticationFilter static final String CONF_PREFIX = "httpfs.authentication."; - private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file"; + private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + + ".file"; /** * Returns the hadoop-auth configuration from HttpFSServer's configuration. @@ -78,22 +81,25 @@ protected Properties getConfiguration(String configPrefix, String signatureSecretFile = props.getProperty(SIGNATURE_SECRET_FILE, null); if (signatureSecretFile == null) { - throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE); + throw new RuntimeException("Undefined property: " + + SIGNATURE_SECRET_FILE); } - try { - StringBuilder secret = new StringBuilder(); - Reader reader = new InputStreamReader(Files.newInputStream(Paths.get( - signatureSecretFile)), StandardCharsets.UTF_8); - int c = reader.read(); - while (c > -1) { - secret.append((char)c); - c = reader.read(); + if (!isRandomSecret(filterConfig)) { + try (Reader reader = new InputStreamReader(Files.newInputStream( + Paths.get(signatureSecretFile)), StandardCharsets.UTF_8)) { + StringBuilder secret = new StringBuilder(); + int c = reader.read(); + while (c > -1) { + secret.append((char) c); + c = reader.read(); + } + props.setProperty(AuthenticationFilter.SIGNATURE_SECRET, + secret.toString()); + } catch (IOException ex) { + throw new RuntimeException("Could not read HttpFS signature " + + "secret file: " + signatureSecretFile); } - reader.close(); - props.setProperty(AuthenticationFilter.SIGNATURE_SECRET, secret.toString()); - } catch (IOException ex) { - throw new RuntimeException("Could not read HttpFS signature secret file: " + signatureSecretFile); } setAuthHandlerClass(props); String dtkind = WebHdfsConstants.WEBHDFS_TOKEN_KIND.toString(); @@ -115,4 +121,12 @@ protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) { return conf; } + private boolean isRandomSecret(FilterConfig filterConfig) { + SignerSecretProvider secretProvider = (SignerSecretProvider) filterConfig + .getServletContext().getAttribute(SIGNER_SECRET_PROVIDER_ATTRIBUTE); + if (secretProvider == null) { + return false; + } + return secretProvider.getClass() == RandomSignerSecretProvider.class; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml index 3e9064f4472cf..e884a125ef51d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml @@ -157,6 +157,9 @@ If multiple HttpFS servers are used in a load-balancer/round-robin fashion, they should share the secret file. + + If the secret file specified here does not exist, random secret is + generated at startup time. diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServerWithRandomSecret.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServerWithRandomSecret.java new file mode 100644 index 0000000000000..b8e902a6f544e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServerWithRandomSecret.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.http.server; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Shell; +import org.junit.BeforeClass; + +import java.io.File; + +/** + * Unlike {@link TestHttpFSServerWebServer}, httpfs-signature.secret doesn't + * exist. In this case, a random secret is used. + */ +public class TestHttpFSServerWebServerWithRandomSecret extends + TestHttpFSServerWebServer { + @BeforeClass + public static void beforeClass() throws Exception { + File homeDir = GenericTestUtils.getTestDir(); + File confDir = new File(homeDir, "etc/hadoop"); + File logsDir = new File(homeDir, "logs"); + File tempDir = new File(homeDir, "temp"); + confDir.mkdirs(); + logsDir.mkdirs(); + tempDir.mkdirs(); + + if (Shell.WINDOWS) { + File binDir = new File(homeDir, "bin"); + binDir.mkdirs(); + File winutils = Shell.getWinUtilsFile(); + if (winutils.exists()) { + FileUtils.copyFileToDirectory(winutils, binDir); + } + } + + System.setProperty("hadoop.home.dir", homeDir.getAbsolutePath()); + System.setProperty("hadoop.log.dir", logsDir.getAbsolutePath()); + System.setProperty("httpfs.home.dir", homeDir.getAbsolutePath()); + System.setProperty("httpfs.log.dir", logsDir.getAbsolutePath()); + System.setProperty("httpfs.config.dir", confDir.getAbsolutePath()); + } +} From 7861a5eb1afd3b8c485b1db83f3bb21a50928e4e Mon Sep 17 00:00:00 2001 From: Sunil G Date: Fri, 31 May 2019 10:22:26 +0530 Subject: [PATCH 0083/1308] YARN-9033. ResourceHandlerChain#bootstrap is invoked twice during NM start if LinuxContainerExecutor enabled. Contributed by Zhankun Tang. --- .../scheduler/ContainerScheduler.java | 25 ++++++------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java index cfbde870e94a7..854ad532bcd3c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java @@ -35,7 +35,6 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerChain; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor .ChangeMonitoringContainerResourceEvent; @@ -44,7 +43,6 @@ import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; -import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService .RecoveredContainerState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerStatus; @@ -132,21 +130,14 @@ public ContainerScheduler(Context context, AsyncDispatcher dispatcher, @Override public void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); - try { - if (resourceHandlerChain == null) { - resourceHandlerChain = ResourceHandlerModule - .getConfiguredResourceHandlerChain(conf, context); - } - LOG.debug("Resource handler chain enabled = {}", - (resourceHandlerChain != null)); - if (resourceHandlerChain != null) { - LOG.debug("Bootstrapping resource handler chain"); - resourceHandlerChain.bootstrap(conf); - } - } catch (ResourceHandlerException e) { - LOG.error("Failed to bootstrap configured resource subsystems! ", e); - throw new IOException( - "Failed to bootstrap configured resource subsystems!"); + if (resourceHandlerChain == null) { + resourceHandlerChain = ResourceHandlerModule + .getConfiguredResourceHandlerChain(conf, context); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Resource handler chain enabled = " + (resourceHandlerChain + != null)); + } this.usePauseEventForPreemption = conf.getBoolean( From e49162f4b3791dbf51079e3b19dd0c8bc2a85158 Mon Sep 17 00:00:00 2001 From: Sunil G Date: Fri, 31 May 2019 10:28:09 +0530 Subject: [PATCH 0084/1308] YARN-9545. Create healthcheck REST endpoint for ATSv2. Contributed by Zoltan Siegl. --- .../api/records/timeline/TimelineHealth.java | 82 +++++++++++++++++++ .../DocumentStoreTimelineReaderImpl.java | 13 +++ .../storage/HBaseTimelineReaderImpl.java | 13 +++ .../reader/TimelineReaderManager.java | 10 +++ .../reader/TimelineReaderWebServices.java | 33 ++++++++ .../storage/FileSystemTimelineReaderImpl.java | 15 ++++ .../storage/NoOpTimelineReaderImpl.java | 7 ++ .../storage/TimelineReader.java | 8 ++ .../reader/TestTimelineReaderWebServices.java | 19 +++++ 9 files changed, 200 insertions(+) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineHealth.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineHealth.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineHealth.java new file mode 100644 index 0000000000000..d592167b862cb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineHealth.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.api.records.timeline; + + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +/** + * This class holds health information for ATS. + */ +@XmlRootElement(name = "health") +@XmlAccessorType(XmlAccessType.NONE) +@InterfaceAudience.Public +@InterfaceStability.Unstable +public class TimelineHealth { + + /** + * Timline health status. + * + * RUNNING - Service is up and running + * READER_CONNECTION_FAULURE - isConnectionAlive() of reader implementation + * reported an error + */ + public enum TimelineHealthStatus { + RUNNING, + READER_CONNECTION_FAILURE + } + + private TimelineHealthStatus healthStatus; + private String diagnosticsInfo; + + public TimelineHealth(TimelineHealthStatus healthy, String diagnosticsInfo) { + this.healthStatus = healthy; + this.diagnosticsInfo = diagnosticsInfo; + } + + public TimelineHealth() { + + } + + @XmlElement(name = "healthStatus") + public TimelineHealthStatus getHealthStatus() { + return healthStatus; + } + + @XmlElement(name = "diagnosticsInfo") + public String getDiagnosticsInfo() { + return diagnosticsInfo; + } + + + public void setHealthStatus(TimelineHealthStatus healthStatus) { + this.healthStatus = healthStatus; + } + + public void setDiagnosticsInfo(String diagnosticsInfo) { + this.diagnosticsInfo = diagnosticsInfo; + } + + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/DocumentStoreTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/DocumentStoreTimelineReaderImpl.java index 2159132585739..8de3b8645ac50 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/DocumentStoreTimelineReaderImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/DocumentStoreTimelineReaderImpl.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.timeline.TimelineHealth; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType; import org.apache.hadoop.yarn.server.timelineservice.documentstore.lib.DocumentStoreVendor; @@ -100,6 +101,18 @@ public Set getEntityTypes(TimelineReaderContext context) { return collectionReader.fetchEntityTypes(context); } + @Override + public TimelineHealth getHealthStatus() { + if (collectionReader != null) { + return new TimelineHealth(TimelineHealth.TimelineHealthStatus.RUNNING, + ""); + } else { + return new TimelineHealth( + TimelineHealth.TimelineHealthStatus.READER_CONNECTION_FAILURE, + "Timeline store reader not initialized."); + } + } + // for honoring all filters from {@link TimelineEntityFilters} private Set applyFilters(TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java index d00ae4b5f70e6..653126e10080d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.timeline.TimelineHealth; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -158,6 +159,18 @@ public Set getEntityTypes(TimelineReaderContext context) return reader.readEntityTypes(hbaseConf, conn); } + @Override + public TimelineHealth getHealthStatus() { + if (!this.isHBaseDown()) { + return new TimelineHealth(TimelineHealth.TimelineHealthStatus.RUNNING, + ""); + } else { + return new TimelineHealth( + TimelineHealth.TimelineHealthStatus.READER_CONNECTION_FAILURE, + "HBase connection is down"); + } + } + protected static final TimelineEntityFilters MONITOR_FILTERS = new TimelineEntityFilters.Builder().entityLimit(1L).build(); protected static final TimelineDataToRetrieve DATA_TO_RETRIEVE = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java index 8c7c974b5b387..06da543fdb932 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.timeline.TimelineHealth; import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity; import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; @@ -219,4 +220,13 @@ public boolean checkAccess(UserGroupInformation callerUGI) { } return callerUGI != null && adminACLsManager.isAdmin(callerUGI); } + + /** + * Check if reader connection is alive. + * + * @return boolean True if reader connection is alive, false otherwise. + */ + public TimelineHealth getHealthStatus() { + return reader.getHealthStatus(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java index 330e1f48739c7..743369233653d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java @@ -48,6 +48,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout; +import org.apache.hadoop.yarn.api.records.timeline.TimelineHealth; import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType; @@ -218,6 +219,38 @@ public TimelineAbout about( return TimelineUtils.createTimelineAbout("Timeline Reader API"); } + /** + * Health check REST end point. + * + * @param req Servlet request. + * @param res Servlet response. + * + * @return A {@link Response} object with HTTP status 200 OK if the service + * is running. + * Otherwise, a {@link Response} object with HTTP status 500 is + * returned. + */ + @GET + @Path("/health") + @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8) + public Response health( + @Context HttpServletRequest req, + @Context HttpServletResponse res + ) { + Response response; + TimelineHealth timelineHealth = this.getTimelineReaderManager().getHealthStatus(); + if (timelineHealth.getHealthStatus() + .equals(TimelineHealth.TimelineHealthStatus.RUNNING)) { + response = Response.ok(timelineHealth).build(); + } else { + LOG.info("Timeline services health check: timeline reader reported " + + "connection failure"); + response = Response.serverError().entity(timelineHealth).build(); + } + + return response; + } + /** * Return a single entity for a given entity type and UID which is a delimited * string containing clusterid, userid, flow name, flowrun id and app id. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java index ef08a9d9ef707..012c9a18f8502 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java @@ -46,6 +46,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.timeline.TimelineHealth; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric; @@ -442,4 +443,18 @@ public Set getEntities(TimelineReaderContext context, } return result; } + + @Override + public TimelineHealth getHealthStatus() { + try { + fs.exists(rootPath); + } catch (IOException e) { + return new TimelineHealth( + TimelineHealth.TimelineHealthStatus.READER_CONNECTION_FAILURE, + e.getMessage() + ); + } + return new TimelineHealth(TimelineHealth.TimelineHealthStatus.RUNNING, + ""); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java index bfa530984f809..b2fd773153ce0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.timelineservice.storage; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.timeline.TimelineHealth; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters; @@ -71,4 +72,10 @@ public Set getEntityTypes(TimelineReaderContext context) "requests would be empty"); return new HashSet<>(); } + + @Override + public TimelineHealth getHealthStatus() { + return new TimelineHealth(TimelineHealth.TimelineHealthStatus.RUNNING, + "NoOpTimelineReader is configured. "); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java index 16d623ab46fa3..4bcc6dc5a3f21 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.service.Service; +import org.apache.hadoop.yarn.api.records.timeline.TimelineHealth; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters; @@ -192,4 +193,11 @@ Set getEntities( * storage. */ Set getEntityTypes(TimelineReaderContext context) throws IOException; + + /** + * Check if reader connection is working properly. + * + * @return True if reader connection works as expected, false otherwise. + */ + TimelineHealth getHealthStatus(); } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java index 03939ada8a595..ef74716b56401 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java @@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.JettyUtils; import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout; +import org.apache.hadoop.yarn.api.records.timeline.TimelineHealth; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -777,4 +778,22 @@ public void testGetContainer() throws Exception { client.destroy(); } } + + @Test + public void testHealthCheck() throws Exception { + Client client = createClient(); + try { + URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + + "timeline/health"); + ClientResponse resp = getResponse(client, uri); + TimelineHealth timelineHealth = + resp.getEntity(new GenericType() { + }); + assertEquals(200, resp.getStatus()); + assertEquals(TimelineHealth.TimelineHealthStatus.RUNNING, + timelineHealth.getHealthStatus()); + } finally { + client.destroy(); + } + } } \ No newline at end of file From 52128e352a30b70b83483f9290d9e94e98929705 Mon Sep 17 00:00:00 2001 From: Sunil G Date: Fri, 31 May 2019 12:29:44 +0530 Subject: [PATCH 0085/1308] YARN-9543. [UI2] Handle ATSv2 server down or failures cases gracefully in YARN UI v2. Contributed by Zoltan Siegl and Akhil P B. --- .../hadoop-yarn-ui/src/main/webapp/.gitignore | 4 +++ .../webapp/app/adapters/timeline-health.js | 30 ++++++++++++++++++ .../webapp/app/controllers/application.js | 7 +++++ .../main/webapp/app/models/timeline-health.js | 27 ++++++++++++++++ .../src/main/webapp/app/routes/application.js | 4 +++ .../main/webapp/app/routes/timeline-error.js | 3 ++ .../webapp/app/serializers/timeline-health.js | 31 +++++++++++++++++++ .../main/webapp/app/templates/application.hbs | 4 +-- 8 files changed, 108 insertions(+), 2 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.gitignore create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/timeline-health.js create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/timeline-health.js create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/timeline-health.js diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.gitignore b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.gitignore new file mode 100644 index 0000000000000..338997fcf7d38 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.gitignore @@ -0,0 +1,4 @@ +tmp/ +node_modules/ +bower_components/ +dist/ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/timeline-health.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/timeline-health.js new file mode 100644 index 0000000000000..8ca2310630642 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/timeline-health.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import RESTAbstractAdapter from './restabstract'; + +export default RESTAbstractAdapter.extend({ + address: "timelineWebAddress", + restNameSpace: "timelineV2", + serverName: "ATS", + + urlForQueryRecord(/*query, modelName*/) { + var url = this.buildURL(); + return url + '/health'; + } +}); \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js index 50a290912aed0..34702aca2df24 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js @@ -73,4 +73,11 @@ export default Ember.Controller.extend({ } return null; }.property('model.userInfo'), + + isTimelineUnHealthy: function() { + if (this.model && this.model.timelineHealth) { + return this.model.timelineHealth.get('isTimelineUnHealthy'); + } + return true; + }.property('model.timelineHealth') }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/timeline-health.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/timeline-health.js new file mode 100644 index 0000000000000..367ab07d787f0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/timeline-health.js @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.Model.extend({ + healthStatus: DS.attr('string'), + + isTimelineUnHealthy: function() { + return this.get('healthStatus') !== 'RUNNING'; + }.property('healthStatus') +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js index adb57b19c4920..ead17e1ac53fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js @@ -27,6 +27,9 @@ export default AbstractRoute.extend({ }), userInfo: this.store.findAll('cluster-user-info', {reload: true}).catch(function() { return null; + }), + timelineHealth: this.store.queryRecord('timeline-health', {}).catch(function() { + return null; }) }); }, @@ -56,5 +59,6 @@ export default AbstractRoute.extend({ unloadAll: function() { this.store.unloadAll('ClusterInfo'); this.store.unloadAll('cluster-user-info'); + this.store.unloadAll('timeline-health'); }, }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/timeline-error.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/timeline-error.js index c2e5fc5c209a8..54fc8d4c1bb61 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/timeline-error.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/timeline-error.js @@ -19,6 +19,9 @@ import Ember from 'ember'; export default Ember.Route.extend({ + model() { + return {}; + }, afterModel(model/*, transition*/) { model.error_id = "error"; model.isValidErrorCode = false; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/timeline-health.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/timeline-health.js new file mode 100644 index 0000000000000..79fb4610ef05b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/timeline-health.js @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.JSONAPISerializer.extend({ + normalizeSingleResponse(store, primaryModelClass, payload) { + var fixedPayload = { + id: Date.now(), + type: primaryModelClass.modelName, + attributes: payload + }; + + return { data: fixedPayload }; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs index ecb1481d7aa61..1d469d9ce80ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs @@ -56,8 +56,8 @@ (current) {{/link-to}} {{/link-to}} - {{#link-to 'yarn-flow-activity' tagName="li"}} - {{#link-to 'yarn-flow-activity' class="navigation-link"}}Flow Activity + {{#link-to 'yarn-flow-activity' tagName="li" disabled=isTimelineUnHealthy}} + {{#link-to 'yarn-flow-activity' class="navigation-link" disabled=isTimelineUnHealthy}}Flow Activity (current) {{/link-to}} {{/link-to}} From 1ae062c8188499c266c4d2655f2631b5238f5bb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Elek?= Date: Fri, 31 May 2019 13:35:58 +0200 Subject: [PATCH 0086/1308] HDDS-1382. Create customized CSI server for Ozone Closes #693 --- .../org/apache/hadoop/hdds/HddsUtils.java | 7 +- .../org/apache/hadoop/hdds/TestHddsUtils.java | 42 + hadoop-ozone/common/src/main/bin/ozone | 6 + .../csi/dev-support/findbugsExcludeFile.xml | 22 + hadoop-ozone/csi/pom.xml | 169 +++ .../hadoop/ozone/csi/ControllerService.java | 123 ++ .../apache/hadoop/ozone/csi/CsiServer.java | 160 ++ .../hadoop/ozone/csi/IdentitiyService.java | 72 + .../apache/hadoop/ozone/csi/NodeService.java | 142 ++ .../apache/hadoop/ozone/csi/package-info.java | 22 + hadoop-ozone/csi/src/main/proto/csi.proto | 1323 +++++++++++++++++ hadoop-ozone/dist/pom.xml | 34 + hadoop-ozone/dist/src/main/Dockerfile | 4 + hadoop-ozone/integration-test/pom.xml | 4 + .../ozone/TestOzoneConfigurationFields.java | 2 +- hadoop-ozone/pom.xml | 11 + 16 files changed, 2141 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java create mode 100644 hadoop-ozone/csi/dev-support/findbugsExcludeFile.xml create mode 100644 hadoop-ozone/csi/pom.xml create mode 100644 hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/ControllerService.java create mode 100644 hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java create mode 100644 hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java create mode 100644 hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/NodeService.java create mode 100644 hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java create mode 100644 hadoop-ozone/csi/src/main/proto/csi.proto diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index a5961cb5dde52..a284caaf66579 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -226,7 +226,12 @@ public static Optional getHostName(String value) { if ((value == null) || value.isEmpty()) { return Optional.empty(); } - return Optional.of(HostAndPort.fromString(value).getHostText()); + String hostname = value.replaceAll("\\:[0-9]+$", ""); + if (hostname.length() == 0) { + return Optional.empty(); + } else { + return Optional.of(hostname); + } } /** diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java new file mode 100644 index 0000000000000..75636106498eb --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds; + +import java.util.Optional; + +import org.junit.Assert; +import org.junit.Test; + +/** + * Testing HddsUtils. + */ +public class TestHddsUtils { + + @Test + public void testGetHostName() { + Assert.assertEquals(Optional.of("localhost"), + HddsUtils.getHostName("localhost:1234")); + + Assert.assertEquals(Optional.of("localhost"), + HddsUtils.getHostName("localhost")); + + Assert.assertEquals(Optional.empty(), + HddsUtils.getHostName(":1234")); + } + +} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone index 58eb8cdecabb4..6307a8191d976 100755 --- a/hadoop-ozone/common/src/main/bin/ozone +++ b/hadoop-ozone/common/src/main/bin/ozone @@ -46,6 +46,7 @@ function hadoop_usage hadoop_add_subcommand "om" daemon "Ozone Manager" hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service" hadoop_add_subcommand "s3g" daemon "run the S3 compatible REST gateway" + hadoop_add_subcommand "csi" daemon "run the standalone CSI daemon" hadoop_add_subcommand "recon" daemon "run the Recon service" hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager" hadoop_add_subcommand "sh" client "command line interface for object store operations" @@ -154,6 +155,11 @@ function ozonecmd_case HADOOP_CLASSNAME='org.apache.hadoop.ozone.s3.Gateway' OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-s3gateway" ;; + csi) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.ozone.csi.CsiServer' + OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-csi" + ;; recon) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME='org.apache.hadoop.ozone.recon.ReconServer' diff --git a/hadoop-ozone/csi/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/csi/dev-support/findbugsExcludeFile.xml new file mode 100644 index 0000000000000..62d72d26a8309 --- /dev/null +++ b/hadoop-ozone/csi/dev-support/findbugsExcludeFile.xml @@ -0,0 +1,22 @@ + + + + + + + diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml new file mode 100644 index 0000000000000..c3fb1d3d83911 --- /dev/null +++ b/hadoop-ozone/csi/pom.xml @@ -0,0 +1,169 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-ozone + 0.5.0-SNAPSHOT + + hadoop-ozone-csi + 0.5.0-SNAPSHOT + Apache Hadoop Ozone CSI service + Apache Hadoop Ozone CSI service + jar + + + 1.17.1 + + + + com.google.protobuf + protobuf-java-util + 3.5.1 + + + com.google.protobuf + protobuf-java + + + + + org.apache.hadoop + hadoop-hdds-config + + + com.google.guava + guava + 26.0-android + + + com.google.protobuf + protobuf-java + 3.5.1 + + + io.grpc + grpc-netty + ${grpc.version} + + + io.netty + netty-transport-native-epoll + 4.1.30.Final + + + io.grpc + grpc-protobuf + ${grpc.version} + + + com.google.protobuf + protobuf-java + + + + + io.grpc + grpc-stub + ${grpc.version} + + + org.apache.hadoop + hadoop-ozone-client + + + com.google.guava + guava + + + com.google.protobuf + protobuf-java + + + io.netty + netty-all + + + + + + + + + + kr.motd.maven + os-maven-plugin + ${os-maven-plugin.version} + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + ${protobuf-maven-plugin.version} + true + + + com.google.protobuf:protoc:${protobuf-compile.version}:exe:${os.detected.classifier} + + ${basedir}/src/main/proto/ + + csi.proto + + target/generated-sources/java + false + + + + compile-protoc + + compile + test-compile + compile-custom + test-compile-custom + + + grpc-java + + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + + + + + + + maven-enforcer-plugin + + + depcheck + + + + + + org.codehaus.mojo + findbugs-maven-plugin + + ${basedir}/dev-support/findbugsExcludeFile.xml + + + + + + diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/ControllerService.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/ControllerService.java new file mode 100644 index 0000000000000..65b72502c6766 --- /dev/null +++ b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/ControllerService.java @@ -0,0 +1,123 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.csi; + +import java.io.IOException; + +import org.apache.hadoop.ozone.client.OzoneClient; + +import csi.v1.ControllerGrpc.ControllerImplBase; +import csi.v1.Csi.CapacityRange; +import csi.v1.Csi.ControllerGetCapabilitiesRequest; +import csi.v1.Csi.ControllerGetCapabilitiesResponse; +import csi.v1.Csi.ControllerServiceCapability; +import csi.v1.Csi.ControllerServiceCapability.RPC; +import csi.v1.Csi.ControllerServiceCapability.RPC.Type; +import csi.v1.Csi.CreateVolumeRequest; +import csi.v1.Csi.CreateVolumeResponse; +import csi.v1.Csi.DeleteVolumeRequest; +import csi.v1.Csi.DeleteVolumeResponse; +import csi.v1.Csi.Volume; +import io.grpc.stub.StreamObserver; + +/** + * CSI controller service. + *

    + * This service usually runs only once and responsible for the creation of + * the volume. + */ +public class ControllerService extends ControllerImplBase { + + private final String volumeOwner; + + private long defaultVolumeSize; + + private OzoneClient ozoneClient; + + public ControllerService(OzoneClient ozoneClient, long volumeSize, + String volumeOwner) { + this.volumeOwner = volumeOwner; + this.defaultVolumeSize = volumeSize; + this.ozoneClient = ozoneClient; + } + + @Override + public void createVolume(CreateVolumeRequest request, + StreamObserver responseObserver) { + try { + ozoneClient.getObjectStore() + .createS3Bucket(volumeOwner, request.getName()); + + long size = findSize(request.getCapacityRange()); + + CreateVolumeResponse response = CreateVolumeResponse.newBuilder() + .setVolume(Volume.newBuilder() + .setVolumeId(request.getName()) + .setCapacityBytes(size)) + .build(); + + responseObserver.onNext(response); + responseObserver.onCompleted(); + } catch (IOException e) { + responseObserver.onError(e); + } + } + + private long findSize(CapacityRange capacityRange) { + if (capacityRange.getRequiredBytes() != 0) { + return capacityRange.getRequiredBytes(); + } else { + if (capacityRange.getLimitBytes() != 0) { + return Math.min(defaultVolumeSize, capacityRange.getLimitBytes()); + } else { + //~1 gig + return defaultVolumeSize; + } + } + } + + @Override + public void deleteVolume(DeleteVolumeRequest request, + StreamObserver responseObserver) { + try { + ozoneClient.getObjectStore().deleteS3Bucket(request.getVolumeId()); + + DeleteVolumeResponse response = DeleteVolumeResponse.newBuilder() + .build(); + + responseObserver.onNext(response); + responseObserver.onCompleted(); + } catch (IOException e) { + responseObserver.onError(e); + } + } + + @Override + public void controllerGetCapabilities( + ControllerGetCapabilitiesRequest request, + StreamObserver responseObserver) { + ControllerGetCapabilitiesResponse response = + ControllerGetCapabilitiesResponse.newBuilder() + .addCapabilities( + ControllerServiceCapability.newBuilder().setRpc( + RPC.newBuilder().setType(Type.CREATE_DELETE_VOLUME))) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } +} diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java new file mode 100644 index 0000000000000..df5127c47b5d3 --- /dev/null +++ b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java @@ -0,0 +1,160 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.csi; + +import java.util.concurrent.Callable; + +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.Config; +import org.apache.hadoop.hdds.conf.ConfigGroup; +import org.apache.hadoop.hdds.conf.ConfigTag; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.util.StringUtils; + +import io.grpc.Server; +import io.grpc.netty.NettyServerBuilder; +import io.netty.channel.epoll.EpollEventLoopGroup; +import io.netty.channel.epoll.EpollServerDomainSocketChannel; +import io.netty.channel.unix.DomainSocketAddress; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine.Command; + +/** + * CLI entrypoint of the CSI service daemon. + */ +@Command(name = "ozone csi", + hidden = true, description = "CSI service daemon.", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true) +public class CsiServer extends GenericCli implements Callable { + + private final static Logger LOG = LoggerFactory.getLogger(CsiServer.class); + + @Override + public Void call() throws Exception { + OzoneConfiguration ozoneConfiguration = createOzoneConfiguration(); + CsiConfig csiConfig = ozoneConfiguration.getObject(CsiConfig.class); + + OzoneClient rpcClient = OzoneClientFactory.getRpcClient(ozoneConfiguration); + + EpollEventLoopGroup group = new EpollEventLoopGroup(); + + if (csiConfig.getVolumeOwner().isEmpty()) { + throw new IllegalArgumentException( + "ozone.csi.owner is not set. You should set this configuration " + + "variable to define which user should own all the created " + + "buckets."); + } + + Server server = + NettyServerBuilder + .forAddress(new DomainSocketAddress(csiConfig.getSocketPath())) + .channelType(EpollServerDomainSocketChannel.class) + .workerEventLoopGroup(group) + .bossEventLoopGroup(group) + .addService(new IdentitiyService()) + .addService(new ControllerService(rpcClient, + csiConfig.getDefaultVolumeSize(), csiConfig.getVolumeOwner())) + .addService(new NodeService(csiConfig)) + .build(); + + server.start(); + server.awaitTermination(); + rpcClient.close(); + return null; + } + + public static void main(String[] args) { + + StringUtils.startupShutdownMessage(CsiServer.class, args, LOG); + new CsiServer().run(args); + } + + /** + * Configuration settings specific to the CSI server. + */ + @ConfigGroup(prefix = "ozone.csi") + public static class CsiConfig { + private String socketPath; + private long defaultVolumeSize; + private String s3gAddress; + private String volumeOwner; + + public String getSocketPath() { + return socketPath; + } + + public String getVolumeOwner() { + return volumeOwner; + } + + @Config(key = "owner", + defaultValue = "", + description = + "This is the username which is used to create the requested " + + "storage. Used as a hadoop username and the generated ozone" + + " volume used to store all the buckets. WARNING: It can " + + "be a security hole to use CSI in a secure environments as " + + "ALL the users can request the mount of a specific bucket " + + "via the CSI interface.", + tags = ConfigTag.STORAGE) + public void setVolumeOwner(String volumeOwner) { + this.volumeOwner = volumeOwner; + } + + @Config(key = "socket", + defaultValue = "/var/lib/csi.sock", + description = + "The socket where all the CSI services will listen (file name).", + tags = ConfigTag.STORAGE) + public void setSocketPath(String socketPath) { + this.socketPath = socketPath; + } + + public long getDefaultVolumeSize() { + return defaultVolumeSize; + } + + @Config(key = "default-volume-size", + defaultValue = "1000000000", + description = + "The default size of the create volumes (if not specified).", + tags = ConfigTag.STORAGE) + public void setDefaultVolumeSize(long defaultVolumeSize) { + this.defaultVolumeSize = defaultVolumeSize; + } + + public String getS3gAddress() { + return s3gAddress; + } + + @Config(key = "s3g.address", + defaultValue = "http://localhost:9878", + description = + "The default size of the created volumes (if not specified in the" + + " requests).", + tags = ConfigTag.STORAGE) + public void setS3gAddress(String s3gAddress) { + this.s3gAddress = s3gAddress; + } + } +} diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java new file mode 100644 index 0000000000000..5a0c4c8ba8a02 --- /dev/null +++ b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.csi; + +import org.apache.hadoop.ozone.util.OzoneVersionInfo; + +import com.google.protobuf.BoolValue; +import csi.v1.Csi.GetPluginCapabilitiesResponse; +import csi.v1.Csi.GetPluginInfoResponse; +import csi.v1.Csi.PluginCapability; +import csi.v1.Csi.PluginCapability.Service; +import static csi.v1.Csi.PluginCapability.Service.Type.CONTROLLER_SERVICE; +import csi.v1.Csi.ProbeResponse; +import csi.v1.IdentityGrpc.IdentityImplBase; +import io.grpc.stub.StreamObserver; + +/** + * Implementation of the CSI identity service. + */ +public class IdentitiyService extends IdentityImplBase { + + @Override + public void getPluginInfo(csi.v1.Csi.GetPluginInfoRequest request, + StreamObserver responseObserver) { + GetPluginInfoResponse response = GetPluginInfoResponse.newBuilder() + .setName("org.apache.hadoop.ozone") + .setVendorVersion(OzoneVersionInfo.OZONE_VERSION_INFO.getVersion()) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getPluginCapabilities( + csi.v1.Csi.GetPluginCapabilitiesRequest request, + StreamObserver responseObserver) { + GetPluginCapabilitiesResponse response = + GetPluginCapabilitiesResponse.newBuilder() + .addCapabilities(PluginCapability.newBuilder().setService( + Service.newBuilder().setType(CONTROLLER_SERVICE))) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + + } + + @Override + public void probe(csi.v1.Csi.ProbeRequest request, + StreamObserver responseObserver) { + ProbeResponse response = ProbeResponse.newBuilder() + .setReady(BoolValue.of(true)) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + + } +} diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/NodeService.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/NodeService.java new file mode 100644 index 0000000000000..8edda5923dae1 --- /dev/null +++ b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/NodeService.java @@ -0,0 +1,142 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.csi; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.ozone.csi.CsiServer.CsiConfig; + +import csi.v1.Csi.NodeGetCapabilitiesRequest; +import csi.v1.Csi.NodeGetCapabilitiesResponse; +import csi.v1.Csi.NodeGetInfoRequest; +import csi.v1.Csi.NodeGetInfoResponse; +import csi.v1.Csi.NodePublishVolumeRequest; +import csi.v1.Csi.NodePublishVolumeResponse; +import csi.v1.Csi.NodeUnpublishVolumeRequest; +import csi.v1.Csi.NodeUnpublishVolumeResponse; +import csi.v1.NodeGrpc.NodeImplBase; +import io.grpc.stub.StreamObserver; +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Implementation of the CSI node service. + */ +public class NodeService extends NodeImplBase { + + private static final Logger LOG = LoggerFactory.getLogger(NodeService.class); + + private String s3Endpoint; + + public NodeService(CsiConfig configuration) { + this.s3Endpoint = configuration.getS3gAddress(); + + } + + @Override + public void nodePublishVolume(NodePublishVolumeRequest request, + StreamObserver responseObserver) { + + try { + Files.createDirectories(Paths.get(request.getTargetPath())); + String mountCommand = + String.format("goofys --endpoint %s %s %s", + s3Endpoint, + request.getVolumeId(), + request.getTargetPath()); + LOG.info("Executing {}", mountCommand); + + executeCommand(mountCommand); + + responseObserver.onNext(NodePublishVolumeResponse.newBuilder() + .build()); + responseObserver.onCompleted(); + + } catch (Exception e) { + responseObserver.onError(e); + } + + } + + private void executeCommand(String mountCommand) + throws IOException, InterruptedException { + Process exec = Runtime.getRuntime().exec(mountCommand); + exec.waitFor(10, TimeUnit.SECONDS); + + LOG.info("Command is executed with stdout: {}, stderr: {}", + IOUtils.toString(exec.getInputStream(), "UTF-8"), + IOUtils.toString(exec.getErrorStream(), "UTF-8")); + if (exec.exitValue() != 0) { + throw new RuntimeException(String + .format("Return code of the command %s was %d", mountCommand, + exec.exitValue())); + } + } + + @Override + public void nodeUnpublishVolume(NodeUnpublishVolumeRequest request, + StreamObserver responseObserver) { + String umountCommand = + String.format("fusermount -u %s", request.getTargetPath()); + LOG.info("Executing {}", umountCommand); + + try { + executeCommand(umountCommand); + + responseObserver.onNext(NodeUnpublishVolumeResponse.newBuilder() + .build()); + responseObserver.onCompleted(); + + } catch (Exception e) { + responseObserver.onError(e); + } + + } + + @Override + public void nodeGetCapabilities(NodeGetCapabilitiesRequest request, + StreamObserver responseObserver) { + NodeGetCapabilitiesResponse response = + NodeGetCapabilitiesResponse.newBuilder() + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void nodeGetInfo(NodeGetInfoRequest request, + StreamObserver responseObserver) { + NodeGetInfoResponse response = null; + try { + response = NodeGetInfoResponse.newBuilder() + .setNodeId(InetAddress.getLocalHost().getHostName()) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } catch (UnknownHostException e) { + responseObserver.onError(e); + } + + } +} diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java new file mode 100644 index 0000000000000..1b558dd6f40d5 --- /dev/null +++ b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.csi; + +/** + * Container Storage Interface server implementation for Ozone. + */ \ No newline at end of file diff --git a/hadoop-ozone/csi/src/main/proto/csi.proto b/hadoop-ozone/csi/src/main/proto/csi.proto new file mode 100644 index 0000000000000..3bd53a0758b4b --- /dev/null +++ b/hadoop-ozone/csi/src/main/proto/csi.proto @@ -0,0 +1,1323 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by make; DO NOT EDIT. +syntax = "proto3"; +package csi.v1; + +import "google/protobuf/descriptor.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "csi"; + +extend google.protobuf.FieldOptions { + // Indicates that a field MAY contain information that is sensitive + // and MUST be treated as such (e.g. not logged). + bool csi_secret = 1059; +} +service Identity { + rpc GetPluginInfo(GetPluginInfoRequest) + returns (GetPluginInfoResponse) {} + + rpc GetPluginCapabilities(GetPluginCapabilitiesRequest) + returns (GetPluginCapabilitiesResponse) {} + + rpc Probe (ProbeRequest) + returns (ProbeResponse) {} +} + +service Controller { + rpc CreateVolume (CreateVolumeRequest) + returns (CreateVolumeResponse) {} + + rpc DeleteVolume (DeleteVolumeRequest) + returns (DeleteVolumeResponse) {} + + rpc ControllerPublishVolume (ControllerPublishVolumeRequest) + returns (ControllerPublishVolumeResponse) {} + + rpc ControllerUnpublishVolume (ControllerUnpublishVolumeRequest) + returns (ControllerUnpublishVolumeResponse) {} + + rpc ValidateVolumeCapabilities (ValidateVolumeCapabilitiesRequest) + returns (ValidateVolumeCapabilitiesResponse) {} + + rpc ListVolumes (ListVolumesRequest) + returns (ListVolumesResponse) {} + + rpc GetCapacity (GetCapacityRequest) + returns (GetCapacityResponse) {} + + rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest) + returns (ControllerGetCapabilitiesResponse) {} + + rpc CreateSnapshot (CreateSnapshotRequest) + returns (CreateSnapshotResponse) {} + + rpc DeleteSnapshot (DeleteSnapshotRequest) + returns (DeleteSnapshotResponse) {} + + rpc ListSnapshots (ListSnapshotsRequest) + returns (ListSnapshotsResponse) {} + + rpc ControllerExpandVolume (ControllerExpandVolumeRequest) + returns (ControllerExpandVolumeResponse) {} +} + +service Node { + rpc NodeStageVolume (NodeStageVolumeRequest) + returns (NodeStageVolumeResponse) {} + + rpc NodeUnstageVolume (NodeUnstageVolumeRequest) + returns (NodeUnstageVolumeResponse) {} + + rpc NodePublishVolume (NodePublishVolumeRequest) + returns (NodePublishVolumeResponse) {} + + rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) + returns (NodeUnpublishVolumeResponse) {} + + rpc NodeGetVolumeStats (NodeGetVolumeStatsRequest) + returns (NodeGetVolumeStatsResponse) {} + + + rpc NodeExpandVolume(NodeExpandVolumeRequest) + returns (NodeExpandVolumeResponse) {} + + + rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) + returns (NodeGetCapabilitiesResponse) {} + + rpc NodeGetInfo (NodeGetInfoRequest) + returns (NodeGetInfoResponse) {} +} +message GetPluginInfoRequest { + // Intentionally empty. +} + +message GetPluginInfoResponse { + // The name MUST follow domain name notation format + // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD + // include the plugin's host company name and the plugin name, + // to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. This field is REQUIRED. + string name = 1; + + // This field is REQUIRED. Value of this field is opaque to the CO. + string vendor_version = 2; + + // This field is OPTIONAL. Values are opaque to the CO. + map manifest = 3; +} +message GetPluginCapabilitiesRequest { + // Intentionally empty. +} + +message GetPluginCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated PluginCapability capabilities = 1; +} + +// Specifies a capability of the plugin. +message PluginCapability { + message Service { + enum Type { + UNKNOWN = 0; + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins MAY wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + CONTROLLER_SERVICE = 1; + + // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for + // this plugin MAY NOT be equally accessible by all nodes in the + // cluster. The CO MUST use the topology information returned by + // CreateVolumeRequest along with the topology information + // returned by NodeGetInfo to ensure that a given volume is + // accessible from a given node when scheduling workloads. + VOLUME_ACCESSIBILITY_CONSTRAINTS = 2; + } + Type type = 1; + } + + message VolumeExpansion { + enum Type { + UNKNOWN = 0; + + // ONLINE indicates that volumes may be expanded when published to + // a node. When a Plugin implements this capability it MUST + // implement either the EXPAND_VOLUME controller capability or the + // EXPAND_VOLUME node capability or both. When a plugin supports + // ONLINE volume expansion and also has the EXPAND_VOLUME + // controller capability then the plugin MUST support expansion of + // volumes currently published and available on a node. When a + // plugin supports ONLINE volume expansion and also has the + // EXPAND_VOLUME node capability then the plugin MAY support + // expansion of node-published volume via NodeExpandVolume. + // + // Example 1: Given a shared filesystem volume (e.g. GlusterFs), + // the Plugin may set the ONLINE volume expansion capability and + // implement ControllerExpandVolume but not NodeExpandVolume. + // + // Example 2: Given a block storage volume type (e.g. EBS), the + // Plugin may set the ONLINE volume expansion capability and + // implement both ControllerExpandVolume and NodeExpandVolume. + // + // Example 3: Given a Plugin that supports volume expansion only + // upon a node, the Plugin may set the ONLINE volume + // expansion capability and implement NodeExpandVolume but not + // ControllerExpandVolume. + ONLINE = 1; + + // OFFLINE indicates that volumes currently published and + // available on a node SHALL NOT be expanded via + // ControllerExpandVolume. When a plugin supports OFFLINE volume + // expansion it MUST implement either the EXPAND_VOLUME controller + // capability or both the EXPAND_VOLUME controller capability and + // the EXPAND_VOLUME node capability. + // + // Example 1: Given a block storage volume type (e.g. Azure Disk) + // that does not support expansion of "node-attached" (i.e. + // controller-published) volumes, the Plugin may indicate + // OFFLINE volume expansion support and implement both + // ControllerExpandVolume and NodeExpandVolume. + OFFLINE = 2; + } + } + + oneof type { + // Service that the plugin supports. + Service service = 1; + VolumeExpansion volume_expansion = 2; + } +} +message ProbeRequest { + // Intentionally empty. +} + +message ProbeResponse { + // Readiness allows a plugin to report its initialization status back + // to the CO. Initialization for some plugins MAY be time consuming + // and it is important for a CO to distinguish between the following + // cases: + // + // 1) The plugin is in an unhealthy state and MAY need restarting. In + // this case a gRPC error code SHALL be returned. + // 2) The plugin is still initializing, but is otherwise perfectly + // healthy. In this case a successful response SHALL be returned + // with a readiness value of `false`. Calls to the plugin's + // Controller and/or Node services MAY fail due to an incomplete + // initialization state. + // 3) The plugin has finished initializing and is ready to service + // calls to its Controller and/or Node services. A successful + // response is returned with a readiness value of `true`. + // + // This field is OPTIONAL. If not present, the caller SHALL assume + // that the plugin is in a ready state and is accepting calls to its + // Controller and/or Node services (according to the plugin's reported + // capabilities). + .google.protobuf.BoolValue ready = 1; +} +message CreateVolumeRequest { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. The Plugin SHOULD ensure that multiple + // `CreateVolume` calls for the same name do not result in more + // than one piece of storage provisioned corresponding to that + // name. If a Plugin is unable to enforce idempotency, the CO's + // error recovery logic could result in multiple (unused) volumes + // being provisioned. + // In the case of error, the CO MUST handle the gRPC error codes + // per the recovery behavior defined in the "CreateVolume Errors" + // section below. + // The CO is responsible for cleaning up volumes it provisioned + // that it no longer needs. If the CO is uncertain whether a volume + // was provisioned or not when a `CreateVolume` call fails, the CO + // MAY call `CreateVolume` again, with the same name, to ensure the + // volume exists and to retrieve the volume's `volume_id` (unless + // otherwise prohibited by "CreateVolume Errors"). + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + string name = 1; + + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. If + // specified it MUST always be honored, even when creating volumes + // from a source; which MAY force some backends to internally extend + // the volume after creating it. + CapacityRange capacity_range = 2; + + // The capabilities that the provisioned volume MUST have. SP MUST + // provision a volume that will satisfy ALL of the capabilities + // specified in this list. Otherwise SP MUST return the appropriate + // gRPC error code. + // The Plugin MUST assume that the CO MAY use the provisioned volume + // with ANY of the capabilities specified in this list. + // For example, a CO MAY specify two volume capabilities: one with + // access mode SINGLE_NODE_WRITER and another with access mode + // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the + // provisioned volume can be used in either mode. + // This also enables the CO to do early validation: If ANY of the + // specified volume capabilities are not supported by the SP, the call + // MUST return the appropriate gRPC error code. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + map parameters = 4; + + // Secrets required by plugin to complete volume creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // If specified, the new volume will be pre-populated with data from + // this source. This field is OPTIONAL. + VolumeContentSource volume_content_source = 6; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume MUST be accessible from. + // An SP SHALL advertise the requirements for topological + // accessibility information in documentation. COs SHALL only specify + // topological accessibility information supported by the SP. + // This field is OPTIONAL. + // This field SHALL NOT be specified unless the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // If this field is not specified and the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. + TopologyRequirement accessibility_requirements = 7; +} + +// Specifies what source the volume will be created from. One of the +// type fields MUST be specified. +message VolumeContentSource { + message SnapshotSource { + // Contains identity information for the existing source snapshot. + // This field is REQUIRED. Plugin is REQUIRED to support creating + // volume from snapshot if it supports the capability + // CREATE_DELETE_SNAPSHOT. + string snapshot_id = 1; + } + + message VolumeSource { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + string volume_id = 1; + } + + oneof type { + SnapshotSource snapshot = 1; + VolumeSource volume = 2; + } +} + +message CreateVolumeResponse { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume volume = 1; +} + +// Specify a capability of a volume. +message VolumeCapability { + // Indicate that the volume will be accessed via the block device API. + message BlockVolume { + // Intentionally empty, for now. + } + + // Indicate that the volume will be accessed via the filesystem API. + message MountVolume { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string fs_type = 1; + + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + repeated string mount_flags = 2; + } + + // Specify how a volume can be accessed. + message AccessMode { + enum Mode { + UNKNOWN = 0; + + // Can only be published once as read/write on a single node, at + // any given time. + SINGLE_NODE_WRITER = 1; + + // Can only be published once as readonly on a single node, at + // any given time. + SINGLE_NODE_READER_ONLY = 2; + + // Can be published as readonly at multiple nodes simultaneously. + MULTI_NODE_READER_ONLY = 3; + + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + MULTI_NODE_SINGLE_WRITER = 4; + + // Can be published as read/write at multiple nodes + // simultaneously. + MULTI_NODE_MULTI_WRITER = 5; + } + + // This field is REQUIRED. + Mode mode = 1; + } + + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + oneof access_type { + BlockVolume block = 1; + MountVolume mount = 2; + } + + // This is a REQUIRED field. + AccessMode access_mode = 3; +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` SHALL be set to the same value. At +// least one of the these fields MUST be specified. +message CapacityRange { + // Volume MUST be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 required_bytes = 1; + + // Volume MUST not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + int64 limit_bytes = 2; +} + +// Information about a specific volume. +message Volume { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + int64 capacity_bytes = 1; + + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + string volume_id = 2; + + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + map volume_context = 3; + + // If specified, indicates that the volume is not empty and is + // pre-populated with data from the specified source. + // This field is OPTIONAL. + VolumeContentSource content_source = 4; + + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume is accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // An SP MAY specify multiple topologies to indicate the volume is + // accessible from multiple locations. + // COs MAY use this information along with the topology information + // returned by NodeGetInfo to ensure that a given volume is accessible + // from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the volume is equally accessible from all nodes in the cluster and + // MAY schedule workloads referencing the volume on any available + // node. + // + // Example 1: + // accessible_topology = {"region": "R1", "zone": "Z2"} + // Indicates a volume accessible only from the "region" "R1" and the + // "zone" "Z2". + // + // Example 2: + // accessible_topology = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" + // in the "region" "R1". + repeated Topology accessible_topology = 5; +} + +message TopologyRequirement { + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + repeated Topology requisite = 1; + + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + repeated Topology preferred = 2; +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +message Topology { + map segments = 1; +} +message DeleteVolumeRequest { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + string volume_id = 1; + + // Secrets required by plugin to complete volume deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 2 [(csi_secret) = true]; +} + +message DeleteVolumeResponse { + // Intentionally empty. +} +message ControllerPublishVolumeRequest { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetInfo`. + string node_id = 2; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 3; + + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. + bool readonly = 4; + + // Secrets required by plugin to complete controller publish volume + // request. This field is OPTIONAL. Refer to the + // `Secrets Requirements` section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; +} + +message ControllerPublishVolumeResponse { + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + map publish_context = 1; +} +message ControllerUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetInfo` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + string node_id = 2; + + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume call for the specified volume. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 3 [(csi_secret) = true]; +} + +message ControllerUnpublishVolumeResponse { + // Intentionally empty. +} +message ValidateVolumeCapabilitiesRequest { + // The ID of the volume to check. This field is REQUIRED. + string volume_id = 1; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 2; + + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "confirmed" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; + + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + map parameters = 4; + + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; +} + +message ValidateVolumeCapabilitiesResponse { + message Confirmed { + // Volume context validated by the plugin. + // This field is OPTIONAL. + map volume_context = 1; + + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + map parameters = 3; + } + + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed confirmed = 1; + + // Message to the CO if `confirmed` above is empty. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + string message = 2; +} +message ListVolumesRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; +} + +message ListVolumesResponse { + message Entry { + Volume volume = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message GetCapacityRequest { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + repeated VolumeCapability volume_capabilities = 1; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + map parameters = 2; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that in the specified + // `accessible_topology`. This is the same as the + // `accessible_topology` the CO returns in a `CreateVolumeResponse`. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. + Topology accessible_topology = 3; +} + +message GetCapacityResponse { + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 available_capacity = 1; +} +message ControllerGetCapabilitiesRequest { + // Intentionally empty. +} + +message ControllerGetCapabilitiesResponse { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated ControllerServiceCapability capabilities = 1; +} + +// Specifies a capability of the controller service. +message ControllerServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + CREATE_DELETE_VOLUME = 1; + PUBLISH_UNPUBLISH_VOLUME = 2; + LIST_VOLUMES = 3; + GET_CAPACITY = 4; + // Currently the only way to consume a snapshot is to create + // a volume from it. Therefore plugins supporting + // CREATE_DELETE_SNAPSHOT MUST support creating volume from + // snapshot. + CREATE_DELETE_SNAPSHOT = 5; + LIST_SNAPSHOTS = 6; + + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + CLONE_VOLUME = 7; + + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + PUBLISH_READONLY = 8; + + // See VolumeExpansion for details. + EXPAND_VOLUME = 9; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message CreateSnapshotRequest { + // The ID of the source volume to be snapshotted. + // This field is REQUIRED. + string source_volume_id = 1; + + // The suggested name for the snapshot. This field is REQUIRED for + // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + string name = 2; + + // Secrets required by plugin to complete snapshot creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 3 [(csi_secret) = true]; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + // Use cases for opaque parameters: + // - Specify a policy to automatically clean up the snapshot. + // - Specify an expiration date for the snapshot. + // - Specify whether the snapshot is readonly or read/write. + // - Specify if the snapshot should be replicated to some place. + // - Specify primary or secondary for replication systems that + // support snapshotting only on primary. + map parameters = 4; +} + +message CreateSnapshotResponse { + // Contains all attributes of the newly created snapshot that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the snapshot. This field is REQUIRED. + Snapshot snapshot = 1; +} + +// Information about a specific snapshot. +message Snapshot { + // This is the complete size of the snapshot in bytes. The purpose of + // this field is to give CO guidance on how much space is needed to + // create a volume from this snapshot. The size of the volume MUST NOT + // be less than the size of the source snapshot. This field is + // OPTIONAL. If this field is not set, it indicates that this size is + // unknown. The value of this field MUST NOT be negative and a size of + // zero means it is unspecified. + int64 size_bytes = 1; + + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + string snapshot_id = 2; + + // Identity information for the source volume. Note that creating a + // snapshot from a snapshot is not supported here so the source has to + // be a volume. This field is REQUIRED. + string source_volume_id = 3; + + // Timestamp when the point-in-time snapshot is taken on the storage + // system. This field is REQUIRED. + .google.protobuf.Timestamp creation_time = 4; + + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + bool ready_to_use = 5; +} +message DeleteSnapshotRequest { + // The ID of the snapshot to be deleted. + // This field is REQUIRED. + string snapshot_id = 1; + + // Secrets required by plugin to complete snapshot deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 2 [(csi_secret) = true]; +} + +message DeleteSnapshotResponse {} +// List all snapshots on the storage system regardless of how they were +// created. +message ListSnapshotsRequest { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + int32 max_entries = 1; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListSnapshots` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string starting_token = 2; + + // Identity information for the source volume. This field is OPTIONAL. + // It can be used to list snapshots by volume. + string source_volume_id = 3; + + // Identity information for a specific snapshot. This field is + // OPTIONAL. It can be used to list only a specific snapshot. + // ListSnapshots will return with current snapshot information + // and will not block if the snapshot is being processed after + // it is cut. + string snapshot_id = 4; +} + +message ListSnapshotsResponse { + message Entry { + Snapshot snapshot = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListSnapshots` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListSnapshots` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + string next_token = 2; +} +message ControllerExpandVolumeRequest { + // The ID of the volume to expand. This field is REQUIRED. + string volume_id = 1; + + // This allows CO to specify the capacity requirements of the volume + // after expansion. This field is REQUIRED. + CapacityRange capacity_range = 2; + + // Secrets required by the plugin for expanding the volume. + // This field is OPTIONAL. + map secrets = 3 [(csi_secret) = true]; +} + +message ControllerExpandVolumeResponse { + // Capacity of volume after expansion. This field is REQUIRED. + int64 capacity_bytes = 1; + + // Whether node expansion is required for the volume. When true + // the CO MUST make NodeExpandVolume RPC call on the node. This field + // is REQUIRED. + bool node_expansion_required = 2; +} +message NodeStageVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_context = 2; + + // The path to which the volume MAY be staged. It MUST be an + // absolute path in the root filesystem of the process serving this + // request, and MUST be a directory. The CO SHALL ensure that there + // is only one `staging_target_path` per volume. The CO SHALL ensure + // that the path is directory and that the process serving the + // request has `read` and `write` permission to that directory. The + // CO SHALL be responsible for creating the directory if it does not + // exist. + // This is a REQUIRED field. + string staging_target_path = 3; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 4; + + // Secrets required by plugin to complete node stage volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; +} + +message NodeStageVolumeResponse { + // Intentionally empty. +} +message NodeUnstageVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was staged. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + string staging_target_path = 2; +} + +message NodeUnstageVolumeResponse { + // Intentionally empty. +} +message NodePublishVolumeRequest { + // The ID of the volume to publish. This field is REQUIRED. + string volume_id = 1; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_context = 2; + + // The path to which the volume was staged by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + string staging_target_path = 3; + + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the parent directory of this path exists + // and that the process serving the request has `read` and `write` + // permissions to that parent directory. + // For volumes with an access type of block, the SP SHALL place the + // block device at target_path. + // For volumes with an access type of mount, the SP SHALL place the + // mounted directory at target_path. + // Creation of target_path is the responsibility of the SP. + // This is a REQUIRED field. + string target_path = 4; + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability volume_capability = 5; + + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. + bool readonly = 6; + + // Secrets required by plugin to complete node publish volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 7 [(csi_secret) = true]; + + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 8; +} + +message NodePublishVolumeResponse { + // Intentionally empty. +} +message NodeUnpublishVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // The SP MUST delete the file or directory it created at this path. + // This is a REQUIRED field. + string target_path = 2; +} + +message NodeUnpublishVolumeResponse { + // Intentionally empty. +} +message NodeGetVolumeStatsRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. + // This is a REQUIRED field. + string volume_path = 2; +} + +message NodeGetVolumeStatsResponse { + // This field is OPTIONAL. + repeated VolumeUsage usage = 1; +} + +message VolumeUsage { + enum Unit { + UNKNOWN = 0; + BYTES = 1; + INODES = 2; + } + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 available = 1; + + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 total = 2; + + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 used = 3; + + // Units by which values are measured. This field is REQUIRED. + Unit unit = 4; +} +message NodeGetCapabilitiesRequest { + // Intentionally empty. +} + +message NodeGetCapabilitiesResponse { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + repeated NodeServiceCapability capabilities = 1; +} + +// Specifies a capability of the node service. +message NodeServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + STAGE_UNSTAGE_VOLUME = 1; + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + GET_VOLUME_STATS = 2; + // See VolumeExpansion for details. + EXPAND_VOLUME = 3; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +message NodeGetInfoRequest { +} + +message NodeGetInfoResponse { + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. + string node_id = 1; + + // Maximum number of volumes that controller can publish to the node. + // If value is not set or zero CO SHALL decide how many volumes of + // this type can be published by the controller to the node. The + // plugin MUST NOT set negative values here. + // This field is OPTIONAL. + int64 max_volumes_per_node = 2; + + // Specifies where (regions, zones, racks, etc.) the node is + // accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // COs MAY use this information along with the topology information + // returned in CreateVolumeResponse to ensure that a given volume is + // accessible from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "R2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2". + Topology accessible_topology = 3; +} +message NodeExpandVolumeRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; + + // The path on which volume is available. This field is REQUIRED. + string volume_path = 2; + + // This allows CO to specify the capacity requirements of the volume + // after expansion. If capacity_range is omitted then a plugin MAY + // inspect the file system of the volume to determine the maximum + // capacity to which the volume can be expanded. In such cases a + // plugin MAY expand the volume to its maximum capacity. + // This field is OPTIONAL. + CapacityRange capacity_range = 3; +} + +message NodeExpandVolumeResponse { + // The capacity of the volume in bytes. This field is OPTIONAL. + int64 capacity_bytes = 1; +} diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 2edd2aa340d85..046f89c346c06 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -68,6 +68,13 @@ classpath hadoop-ozone-s3gateway.classpath + + org.apache.hadoop + hadoop-ozone-csi + ${ozone.version} + classpath + hadoop-ozone-csi.classpath + org.apache.hadoop hadoop-ozone-ozone-manager @@ -133,6 +140,29 @@ runtime + + copy-omitted-jars + prepare-package + + copy + + + target/ozone-${ozone.version}/share/ozone/lib + + + + com.google.protobuf + protobuf-java + 3.5.1 + + + com.google.guava + guava + 26.0-android + + + + @@ -247,6 +277,10 @@ org.apache.hadoop hadoop-ozone-s3gateway + + org.apache.hadoop + hadoop-ozone-csi + org.apache.hadoop hadoop-ozone-ozone-manager diff --git a/hadoop-ozone/dist/src/main/Dockerfile b/hadoop-ozone/dist/src/main/Dockerfile index 5c65e433a263a..469a90424481a 100644 --- a/hadoop-ozone/dist/src/main/Dockerfile +++ b/hadoop-ozone/dist/src/main/Dockerfile @@ -19,3 +19,7 @@ FROM apache/hadoop-runner:jdk11 ADD --chown=hadoop . /opt/hadoop WORKDIR /opt/hadoop + +RUN sudo wget https://os.anzix.net/goofys -O /usr/bin/goofys +RUN sudo chmod 755 /usr/bin/goofys +RUN sudo yum install -y fuse diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index 821a2c43e0a90..2ecee8c5ac31f 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -56,6 +56,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone-s3gateway + + org.apache.hadoop + hadoop-ozone-csi + org.apache.hadoop hadoop-ozone-recon diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index 2bc2dbbbdd7cb..b4fc035661dfd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -52,4 +52,4 @@ private void addPropertiesNotInXml() { configurationPropsToSkipCompare.add(OzoneConfigKeys. OZONE_S3_TOKEN_MAX_LIFETIME_KEY); } -} \ No newline at end of file +} diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 52fd608b1deb0..408c640428b04 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -52,6 +52,7 @@ ozone-recon ozone-recon-codegen upgrade + csi @@ -89,6 +90,11 @@ hadoop-ozone-s3gateway ${ozone.version} + + org.apache.hadoop + hadoop-ozone-csi + ${ozone.version} + org.apache.hadoop hadoop-ozone-datanode @@ -114,6 +120,11 @@ hadoop-ozone-filesystem-lib-legacy ${ozone.version} + + org.apache.hadoop + hadoop-hdds-config + ${hdds.version} + org.apache.hadoop hadoop-ozone-integration-test From 4cb559ea7bcf00fc4a574fffad9a3f73b8c532b0 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Fri, 31 May 2019 14:31:44 -0400 Subject: [PATCH 0087/1308] YARN-9027. Fixed LevelDBCacheTimelineStore initialization. Contributed by Prabhu Joseph --- .../server/timeline/LevelDBCacheTimelineStore.java | 8 ++++++++ .../timeline/TestLevelDBCacheTimelineStore.java | 13 +++++++++++++ 2 files changed, 21 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java index 9b1ffdcce41fb..f84eeebbf0c8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java @@ -38,6 +38,7 @@ import java.io.File; import java.io.IOException; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; /** * LevelDB implementation of {@link KeyValueBasedTimelineStore}. This @@ -63,6 +64,8 @@ public class LevelDBCacheTimelineStore extends KeyValueBasedTimelineStore { private String dbId; private DB entityDb; private Configuration configuration; + private static final AtomicInteger DB_COUNTER = new AtomicInteger(0); + private static final String CACHED_LDB_FILENAME = "db"; public LevelDBCacheTimelineStore(String id, String name) { super(name); @@ -76,6 +79,11 @@ public LevelDBCacheTimelineStore(String id) { this(id, LevelDBCacheTimelineStore.class.getName()); } + public LevelDBCacheTimelineStore() { + this(CACHED_LDB_FILENAME + String.valueOf(DB_COUNTER.getAndIncrement()), + LevelDBCacheTimelineStore.class.getName()); + } + @Override protected synchronized void serviceInit(Configuration conf) throws Exception { configuration = conf; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLevelDBCacheTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLevelDBCacheTimelineStore.java index 66da1e0e27481..43b04a5a37097 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLevelDBCacheTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLevelDBCacheTimelineStore.java @@ -19,9 +19,11 @@ package org.apache.hadoop.yarn.server.timeline; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.util.ReflectionUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; +import static org.junit.Assert.assertNotNull; import java.io.IOException; @@ -46,6 +48,17 @@ public TimelineStore getTimelineStore() { return store; } + @Test + public void testDefaultConstructor() { + TimelineStore store = null; + try { + store = ReflectionUtils.newInstance(LevelDBCacheTimelineStore.class, + new YarnConfiguration()); + } finally { + assertNotNull("LevelDBCacheTimelineStore failed to instantiate", store); + } + } + @Test public void testGetSingleEntity() throws IOException { super.testGetSingleEntity(); From c1d2d92187de7de6df0bcf195f3db792a269351a Mon Sep 17 00:00:00 2001 From: Ajay Yadav <7813154+ajayydv@users.noreply.github.com> Date: Fri, 31 May 2019 14:08:28 -0700 Subject: [PATCH 0088/1308] HDDS-1540. Implement addAcl,removeAcl,setAcl,getAcl for Bucket. Contributed by Ajay Kumar. (#874) --- .../rpc/TestOzoneRpcClientAbstract.java | 66 ++++- .../apache/hadoop/ozone/om/BucketManager.java | 3 +- .../hadoop/ozone/om/BucketManagerImpl.java | 242 +++++++++++++++++- .../org/apache/hadoop/ozone/om/IOzoneAcl.java | 67 +++++ .../apache/hadoop/ozone/om/OzoneManager.java | 49 +++- .../apache/hadoop/ozone/om/VolumeManager.java | 42 +-- .../hadoop/ozone/om/VolumeManagerImpl.java | 14 +- .../OzoneManagerRequestHandler.java | 2 +- 8 files changed, 423 insertions(+), 62 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/IOzoneAcl.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 5679edaf6254d..e6224ab044ff8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -2159,11 +2159,20 @@ public void testNativeAclsForVolume() throws Exception { // Add acl's and then call getAcl. for (OzoneAcl a : volAcls) { + // Try removing an acl which doesn't exist, it should return false. assertFalse(finalVolume.getAcls().contains(a)); - store.addAcl(ozObj, a); + assertFalse(store.removeAcl(ozObj, a)); + + assertTrue(store.addAcl(ozObj, a)); finalVolume = store.getVolume(volumeName); assertTrue(finalVolume.getAcls().contains(a)); + + // Call addAcl again, this time operation will fail as + // acl is already added. + assertFalse(store.addAcl(ozObj, a)); } + assertTrue(finalVolume.getAcls().size() == volAcls.size()); + // Reset acl's. store.setAcl(ozObj, newAcls); @@ -2173,6 +2182,61 @@ public void testNativeAclsForVolume() throws Exception { assertTrue(finalVolume.getAcls().size() == 0); } + @Test + public void testNativeAclsForBucket() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + assertNotNull("Bucket creation failed", bucket); + + OzoneObj ozObj = new OzoneObjInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setResType(OzoneObj.ResourceType.BUCKET) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); + // Get acls for volume. + List volAcls = store.getAcl(ozObj); + volAcls.forEach(a -> assertTrue(bucket.getAcls().contains(a))); + + // Remove all acl's. + for (OzoneAcl a : volAcls) { + assertTrue(store.removeAcl(ozObj, a)); + } + List newAcls = store.getAcl(ozObj); + OzoneBucket finalBuck = volume.getBucket(bucketName); + assertTrue(finalBuck.getAcls().size() == 0); + assertTrue(newAcls.size() == 0); + + // Add acl's and then call getAcl. + for (OzoneAcl a : volAcls) { + // Try removing an acl which doesn't exist, it should return false. + assertFalse(finalBuck.getAcls().contains(a)); + assertFalse(store.removeAcl(ozObj, a)); + + // Add acl should succeed. + assertTrue(store.addAcl(ozObj, a)); + finalBuck = volume.getBucket(bucketName); + assertTrue(finalBuck.getAcls().contains(a)); + + // Call addAcl again, this time operation will return false as + // acl is already added. + assertFalse(store.addAcl(ozObj, a)); + } + assertTrue(finalBuck.getAcls().size() == volAcls.size()); + + // Reset acl's. + store.setAcl(ozObj, newAcls); + finalBuck = volume.getBucket(bucketName); + newAcls = store.getAcl(ozObj); + assertTrue(newAcls.size() == 0); + assertTrue(finalBuck.getAcls().size() == 0); + } + private byte[] generateData(int size, byte val) { byte[] chars = new byte[size]; Arrays.fill(chars, val); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java index 4417567d9b6db..595ea43df0e58 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java @@ -25,7 +25,7 @@ /** * BucketManager handles all the bucket level operations. */ -public interface BucketManager { +public interface BucketManager extends IOzoneAcl { /** * Creates a bucket. * @param bucketInfo - OmBucketInfo for creating bucket. @@ -78,4 +78,5 @@ OmBucketInfo getBucketInfo(String volumeName, String bucketName) List listBuckets(String volumeName, String startBucket, String bucketPrefix, int maxNumOfBuckets) throws IOException; + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 1a6c628ae0385..18cc266a11617 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -18,6 +18,7 @@ import java.io.IOException; import java.util.List; +import java.util.Objects; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoProtocolVersion; @@ -30,6 +31,8 @@ import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import com.google.common.base.Preconditions; @@ -37,6 +40,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; + /** * OM bucket manager. */ @@ -207,7 +212,7 @@ public OmBucketInfo getBucketInfo(String volumeName, String bucketName) LOG.debug("bucket: {} not found in volume: {}.", bucketName, volumeName); throw new OMException("Bucket not found", - OMException.ResultCodes.BUCKET_NOT_FOUND); + BUCKET_NOT_FOUND); } return value; } catch (IOException | DBException ex) { @@ -241,7 +246,7 @@ public void setBucketProperty(OmBucketArgs args) throws IOException { if (oldBucketInfo == null) { LOG.debug("bucket: {} not found ", bucketName); throw new OMException("Bucket doesn't exist", - OMException.ResultCodes.BUCKET_NOT_FOUND); + BUCKET_NOT_FOUND); } OmBucketInfo.Builder bucketInfoBuilder = OmBucketInfo.newBuilder(); bucketInfoBuilder.setVolumeName(oldBucketInfo.getVolumeName()) @@ -333,7 +338,7 @@ public void deleteBucket(String volumeName, String bucketName) if (metadataManager.getBucketTable().get(bucketKey) == null) { LOG.debug("bucket: {} not found ", bucketName); throw new OMException("Bucket doesn't exist", - OMException.ResultCodes.BUCKET_NOT_FOUND); + BUCKET_NOT_FOUND); } //Check if bucket is empty if (!metadataManager.isBucketEmpty(volumeName, bucketName)) { @@ -370,4 +375,235 @@ public List listBuckets(String volumeName, volumeName, startBucket, bucketPrefix, maxNumOfBuckets); } + + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * @throws IOException if there is error. + */ + @Override + public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + Objects.requireNonNull(obj); + Objects.requireNonNull(acl); + if (!obj.getResourceType().equals(OzoneObj.ResourceType.BUCKET)) { + throw new IllegalArgumentException("Unexpected argument passed to " + + "BucketManager. OzoneObj type:" + obj.getResourceType()); + } + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + metadataManager.getLock().acquireBucketLock(volume, bucket); + try { + String dbBucketKey = metadataManager.getBucketKey(volume, bucket); + OmBucketInfo bucketInfo = + metadataManager.getBucketTable().get(dbBucketKey); + if (bucketInfo == null) { + LOG.debug("Bucket:{}/{} does not exist", volume, bucket); + throw new OMException("Bucket " + bucket + " is not found", + BUCKET_NOT_FOUND); + } + List list = bucketInfo.getAcls(); + if(!validateAddAcl(acl, list)) { + // New acl can't be added as it is not consistent with existing ACLs. + LOG.info("New acl:{} can't be added as it is not consistent with " + + "existing ACLs:{}.", acl, StringUtils.join(",", list)); + return false; + } + list.add(acl); + OmBucketInfo updatedBucket = OmBucketInfo.newBuilder() + .setVolumeName(bucketInfo.getVolumeName()) + .setBucketName(bucketInfo.getBucketName()) + .setStorageType(bucketInfo.getStorageType()) + .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) + .setCreationTime(bucketInfo.getCreationTime()) + .setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo()) + .addAllMetadata(bucketInfo.getMetadata()) + .setAcls(list) + .build(); + // TODO:HDDS-1619 OM HA changes required for all acl operations. + + metadataManager.getBucketTable().put(dbBucketKey, updatedBucket); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Add acl operation failed for bucket:{}/{} acl:{}", + volume, bucket, acl, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseBucketLock(volume, bucket); + } + + return true; + } + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * @throws IOException if there is error. + */ + @Override + public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + Objects.requireNonNull(obj); + Objects.requireNonNull(acl); + if (!obj.getResourceType().equals(OzoneObj.ResourceType.BUCKET)) { + throw new IllegalArgumentException("Unexpected argument passed to " + + "BucketManager. OzoneObj type:" + obj.getResourceType()); + } + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + metadataManager.getLock().acquireBucketLock(volume, bucket); + try { + String dbBucketKey = metadataManager.getBucketKey(volume, bucket); + OmBucketInfo bucketInfo = + metadataManager.getBucketTable().get(dbBucketKey); + if (bucketInfo == null) { + LOG.debug("Bucket:{}/{} does not exist", volume, bucket); + throw new OMException("Bucket " + bucket + " is not found", + BUCKET_NOT_FOUND); + } + List list = bucketInfo.getAcls(); + if (!list.contains(acl)) { + // Return false if acl doesn't exist in current ACLs. + LOG.info("Acl:{} not found in existing ACLs:{}.", acl, + StringUtils.join(",", list)); + return false; + } + list.remove(acl); + OmBucketInfo updatedBucket = OmBucketInfo.newBuilder() + .setVolumeName(bucketInfo.getVolumeName()) + .setBucketName(bucketInfo.getBucketName()) + .setStorageType(bucketInfo.getStorageType()) + .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) + .setCreationTime(bucketInfo.getCreationTime()) + .setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo()) + .addAllMetadata(bucketInfo.getMetadata()) + .setAcls(list) + .build(); + + metadataManager.getBucketTable().put(dbBucketKey, updatedBucket); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Remove acl operation failed for bucket:{}/{} acl:{}", + volume, bucket, acl, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseBucketLock(volume, bucket); + } + + return true; + } + + /** + * Acls to be set for given Ozone object. This operations reset ACL for given + * object to list of ACLs provided in argument. + * + * @param obj Ozone object. + * @param acls List of acls. + * @throws IOException if there is error. + */ + @Override + public boolean setAcl(OzoneObj obj, List acls) throws IOException { + Objects.requireNonNull(obj); + Objects.requireNonNull(acls); + if (!obj.getResourceType().equals(OzoneObj.ResourceType.BUCKET)) { + throw new IllegalArgumentException("Unexpected argument passed to " + + "BucketManager. OzoneObj type:" + obj.getResourceType()); + } + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + metadataManager.getLock().acquireBucketLock(volume, bucket); + try { + String dbBucketKey = metadataManager.getBucketKey(volume, bucket); + OmBucketInfo bucketInfo = + metadataManager.getBucketTable().get(dbBucketKey); + if (bucketInfo == null) { + LOG.debug("Bucket:{}/{} does not exist", volume, bucket); + throw new OMException("Bucket " + bucket + " is not found", + BUCKET_NOT_FOUND); + } + OmBucketInfo updatedBucket = OmBucketInfo.newBuilder() + .setVolumeName(bucketInfo.getVolumeName()) + .setBucketName(bucketInfo.getBucketName()) + .setStorageType(bucketInfo.getStorageType()) + .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) + .setCreationTime(bucketInfo.getCreationTime()) + .setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo()) + .addAllMetadata(bucketInfo.getMetadata()) + .setAcls(acls) + .build(); + + metadataManager.getBucketTable().put(dbBucketKey, updatedBucket); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Set acl operation failed for bucket:{}/{} acl:{}", + volume, bucket, StringUtils.join(",", acls), ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseBucketLock(volume, bucket); + } + + return true; + } + + /** + * Validates if a new acl addition is consistent with current ACL list. + * @param newAcl new acl to be added. + * @param currentAcls list of acls. + * + * @return true if newAcl addition to existing acls is valid, else false. + * */ + private boolean validateAddAcl(OzoneAcl newAcl, List currentAcls) { + + // Check 1: Check for duplicate. + if(currentAcls.contains(newAcl)) { + return false; + } + + return true; + } + + /** + * Returns list of ACLs for given Ozone object. + * + * @param obj Ozone object. + * @throws IOException if there is error. + */ + @Override + public List getAcl(OzoneObj obj) throws IOException { + Objects.requireNonNull(obj); + + if (!obj.getResourceType().equals(OzoneObj.ResourceType.BUCKET)) { + throw new IllegalArgumentException("Unexpected argument passed to " + + "BucketManager. OzoneObj type:" + obj.getResourceType()); + } + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + metadataManager.getLock().acquireBucketLock(volume, bucket); + try { + String dbBucketKey = metadataManager.getBucketKey(volume, bucket); + OmBucketInfo bucketInfo = + metadataManager.getBucketTable().get(dbBucketKey); + if (bucketInfo == null) { + LOG.debug("Bucket:{}/{} does not exist", volume, bucket); + throw new OMException("Bucket " + bucket + " is not found", + BUCKET_NOT_FOUND); + } + return bucketInfo.getAcls(); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Get acl operation failed for bucket:{}/{} acl:{}", + volume, bucket, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseBucketLock(volume, bucket); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/IOzoneAcl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/IOzoneAcl.java new file mode 100644 index 0000000000000..50744e4ecaae1 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/IOzoneAcl.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.security.acl.OzoneObj; + +import java.io.IOException; +import java.util.List; + +/** + * Interface for Ozone Acl management. + */ +public interface IOzoneAcl { + + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * + * @throws IOException if there is error. + * */ + boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException; + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * + * @throws IOException if there is error. + * */ + boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException; + + /** + * Acls to be set for given Ozone object. This operations reset ACL for + * given object to list of ACLs provided in argument. + * @param obj Ozone object. + * @param acls List of acls. + * + * @throws IOException if there is error. + * */ + boolean setAcl(OzoneObj obj, List acls) throws IOException; + + /** + * Returns list of ACLs for given Ozone object. + * @param obj Ozone object. + * + * @throws IOException if there is error. + * */ + List getAcl(OzoneObj obj) throws IOException; +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 845e53a757f52..a89e4b29c6709 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -29,7 +29,6 @@ import java.security.KeyPair; import java.security.cert.CertificateException; import java.util.Collection; -import java.util.Collections; import java.util.Objects; import org.apache.hadoop.classification.InterfaceAudience; @@ -205,6 +204,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_AUTH_METHOD; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; import static org.apache.hadoop.ozone.protocol.proto .OzoneManagerProtocolProtos.OzoneManagerService @@ -2940,11 +2940,16 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { obj.getVolumeName(), obj.getBucketName(), obj.getKeyName()); } // TODO: Audit ACL operation. - if(obj.getResourceType().equals(ResourceType.VOLUME)) { + switch (obj.getResourceType()) { + case VOLUME: return volumeManager.addAcl(obj, acl); - } - return false; + case BUCKET: + return bucketManager.addAcl(obj, acl); + default: + throw new OMException("Unexpected resource type: " + + obj.getResourceType(), INVALID_REQUEST); + } } /** @@ -2961,11 +2966,17 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL, obj.getVolumeName(), obj.getBucketName(), obj.getKeyName()); } - if(obj.getResourceType().equals(ResourceType.VOLUME)) { + // TODO: Audit ACL operation. + switch (obj.getResourceType()) { + case VOLUME: return volumeManager.removeAcl(obj, acl); - } - return false; + case BUCKET: + return bucketManager.removeAcl(obj, acl); + default: + throw new OMException("Unexpected resource type: " + + obj.getResourceType(), INVALID_REQUEST); + } } /** @@ -2982,11 +2993,17 @@ public boolean setAcl(OzoneObj obj, List acls) throws IOException { checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL, obj.getVolumeName(), obj.getBucketName(), obj.getKeyName()); } - if(obj.getResourceType().equals(ResourceType.VOLUME)) { + // TODO: Audit ACL operation. + switch (obj.getResourceType()) { + case VOLUME: return volumeManager.setAcl(obj, acls); - } - return false; + case BUCKET: + return bucketManager.setAcl(obj, acls); + default: + throw new OMException("Unexpected resource type: " + + obj.getResourceType(), INVALID_REQUEST); + } } /** @@ -3001,11 +3018,17 @@ public List getAcl(OzoneObj obj) throws IOException { checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.READ_ACL, obj.getVolumeName(), obj.getBucketName(), obj.getKeyName()); } - if(obj.getResourceType().equals(ResourceType.VOLUME)) { + // TODO: Audit ACL operation. + switch (obj.getResourceType()) { + case VOLUME: return volumeManager.getAcl(obj); - } - return Collections.emptyList(); + case BUCKET: + return bucketManager.getAcl(obj); + default: + throw new OMException("Unexpected resource type: " + + obj.getResourceType(), INVALID_REQUEST); + } } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java index b7e28d396ebe9..53add71383726 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.ozone.om; -import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmDeleteVolumeResponse; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeOwnerChangeResponse; @@ -24,7 +23,6 @@ .OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .VolumeList; -import org.apache.hadoop.ozone.security.acl.OzoneObj; import java.io.IOException; import java.util.List; @@ -32,7 +30,7 @@ /** * OM volume manager interface. */ -public interface VolumeManager { +public interface VolumeManager extends IOzoneAcl { /** * Create a new volume. @@ -144,42 +142,4 @@ boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) List listVolumes(String userName, String prefix, String startKey, int maxKeys) throws IOException; - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * - * @throws IOException if there is error. - * */ - boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException; - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * - * @throws IOException if there is error. - * */ - boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException; - - /** - * Acls to be set for given Ozone object. This operations reset ACL for - * given object to list of ACLs provided in argument. - * @param obj Ozone object. - * @param acls List of acls. - * - * @throws IOException if there is error. - * */ - boolean setAcl(OzoneObj obj, List acls) throws IOException; - - /** - * Returns list of ACLs for given Ozone object. - * @param obj Ozone object. - * - * @throws IOException if there is error. - * */ - List getAcl(OzoneObj obj) throws IOException; - } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java index 19e94b5e45c22..9519f770c662c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java @@ -539,7 +539,12 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { throw new OMException("Volume " + volume + " is not found", ResultCodes.VOLUME_NOT_FOUND); } - volumeArgs.addAcl(acl); + try { + volumeArgs.addAcl(acl); + } catch (OMException ex) { + LOG.info("Add acl failed.", ex); + return false; + } metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); Preconditions.checkState(volume.equals(volumeArgs.getVolume())); @@ -584,7 +589,12 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { throw new OMException("Volume " + volume + " is not found", ResultCodes.VOLUME_NOT_FOUND); } - volumeArgs.removeAcl(acl); + try { + volumeArgs.removeAcl(acl); + } catch (OMException ex) { + LOG.info("Remove acl failed.", ex); + return false; + } metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); Preconditions.checkState(volume.equals(volumeArgs.getVolume())); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 6942456bd95ce..b82265024a3c7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -398,7 +398,7 @@ private GetAclResponse getAcl(GetAclRequest req) throws IOException { List aclList = impl.getAcl(OzoneObjInfo.fromProtobuf(req.getObj())); - aclList.parallelStream().forEach(a -> acls.add(OzoneAcl.toProtobuf(a))); + aclList.forEach(a -> acls.add(OzoneAcl.toProtobuf(a))); return GetAclResponse.newBuilder().addAllAcls(acls).build(); } From 221089760910743eae58eb2cbd5ac86c4ee96a17 Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Fri, 31 May 2019 17:35:49 -0700 Subject: [PATCH 0089/1308] YARN-9592. Use Logger format in ContainersMonitorImpl. Contributed by Inigo Goiri. --- .../monitor/ContainersMonitorImpl.java | 110 +++++++++--------- 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java index b46e6204b9dca..43c7820e39e12 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java @@ -133,13 +133,12 @@ protected void serviceInit(Configuration myConf) throws Exception { this.resourceCalculatorPlugin = ResourceCalculatorPlugin.getContainersMonitorPlugin(this.conf); - LOG.info(" Using ResourceCalculatorPlugin : " - + this.resourceCalculatorPlugin); + LOG.info("Using ResourceCalculatorPlugin: {}", + this.resourceCalculatorPlugin); processTreeClass = this.conf.getClass( YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, null, ResourceCalculatorProcessTree.class); - LOG.info(" Using ResourceCalculatorProcessTree : " - + this.processTreeClass); + LOG.info("Using ResourceCalculatorProcessTree: {}", this.processTreeClass); this.containerMetricsEnabled = this.conf.getBoolean(YarnConfiguration.NM_CONTAINER_METRICS_ENABLE, @@ -185,10 +184,10 @@ protected void serviceInit(Configuration myConf) throws Exception { strictMemoryEnforcement = conf.getBoolean( YarnConfiguration.NM_MEMORY_RESOURCE_ENFORCED, YarnConfiguration.DEFAULT_NM_MEMORY_RESOURCE_ENFORCED); - LOG.info("Physical memory check enabled: " + pmemCheckEnabled); - LOG.info("Virtual memory check enabled: " + vmemCheckEnabled); - LOG.info("Elastic memory control enabled: " + elasticMemoryEnforcement); - LOG.info("Strict memory control enabled: " + strictMemoryEnforcement); + LOG.info("Physical memory check enabled: {}", pmemCheckEnabled); + LOG.info("Virtual memory check enabled: {}", vmemCheckEnabled); + LOG.info("Elastic memory control enabled: {}", elasticMemoryEnforcement); + LOG.info("Strict memory control enabled: {}", strictMemoryEnforcement); if (elasticMemoryEnforcement) { if (!CGroupElasticMemoryController.isAvailable()) { @@ -213,7 +212,7 @@ protected void serviceInit(Configuration myConf) throws Exception { containersMonitorEnabled = isContainerMonitorEnabled() && monitoringInterval > 0; - LOG.info("ContainersMonitor enabled: " + containersMonitorEnabled); + LOG.info("ContainersMonitor enabled: {}", containersMonitorEnabled); nodeCpuPercentageForYARN = NodeManagerHardwareUtils.getNodeCpuPercentage(this.conf); @@ -226,20 +225,21 @@ protected void serviceInit(Configuration myConf) throws Exception { .getPhysicalMemorySize(); if (totalPhysicalMemoryOnNM <= 0) { LOG.warn("NodeManager's totalPmem could not be calculated. " - + "Setting it to " + UNKNOWN_MEMORY_LIMIT); + + "Setting it to {}", UNKNOWN_MEMORY_LIMIT); totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT; } } if (totalPhysicalMemoryOnNM != UNKNOWN_MEMORY_LIMIT && this.maxPmemAllottedForContainers > totalPhysicalMemoryOnNM * 0.80f) { - LOG.warn("NodeManager configured with " - + TraditionalBinaryPrefix.long2String(maxPmemAllottedForContainers, - "", 1) - + " physical memory allocated to containers, which is more than " - + "80% of the total physical memory available (" - + TraditionalBinaryPrefix.long2String(totalPhysicalMemoryOnNM, "", - 1) + "). Thrashing might happen."); + LOG.warn( + "NodeManager configured with {} physical memory allocated to " + + "containers, which is more than 80% of the total physical memory " + + "available ({}). Thrashing might happen.", + TraditionalBinaryPrefix.long2String( + maxPmemAllottedForContainers, "B", 1), + TraditionalBinaryPrefix.long2String( + totalPhysicalMemoryOnNM, "B", 1)); } } super.serviceInit(this.conf); @@ -264,13 +264,13 @@ private boolean isContainerMonitorEnabled() { private boolean isResourceCalculatorAvailable() { if (resourceCalculatorPlugin == null) { - LOG.info("ResourceCalculatorPlugin is unavailable on this system. " + this - .getClass().getName() + " is disabled."); + LOG.info("ResourceCalculatorPlugin is unavailable on this system. " + + "{} is disabled.", this.getClass().getName()); return false; } if (getResourceCalculatorProcessTree("0") == null) { LOG.info("ResourceCalculatorProcessTree is unavailable on this system. " - + this.getClass().getName() + " is disabled."); + + "{} is disabled.", this.getClass().getName()); return false; } return true; @@ -426,15 +426,15 @@ private boolean isProcessTreeOverLimit(String containerId, boolean isOverLimit = false; if (currentMemUsage > (2 * memLimit)) { - LOG.warn("Process tree for container: " + containerId - + " running over twice " + "the configured limit. Limit=" + memLimit - + ", current usage = " + currentMemUsage); + LOG.warn("Process tree for container: {} running over twice " + + "the configured limit. Limit={}, current usage = {}", + containerId, memLimit, currentMemUsage); isOverLimit = true; } else if (curMemUsageOfAgedProcesses > memLimit) { - LOG.warn("Process tree for container: " + containerId - + " has processes older than 1 " - + "iteration running over the configured limit. Limit=" + memLimit - + ", current usage = " + curMemUsageOfAgedProcesses); + LOG.warn("Process tree for container: {} has processes older than 1 " + + "iteration running over the configured limit. " + + "Limit={}, current usage = {}", + containerId, memLimit, curMemUsageOfAgedProcesses); isOverLimit = true; } @@ -468,8 +468,8 @@ public void run() { tmp.append(p.getPID()); tmp.append(" "); } - LOG.debug("Current ProcessTree list : {}", - tmp.substring(0, tmp.length()) + "]"); + tmp.append("]"); + LOG.debug("Current ProcessTree list : {}", tmp); } // Temporary structure to calculate the total resource utilization of @@ -495,8 +495,9 @@ public void run() { if (pId == null || !isResourceCalculatorAvailable()) { continue; // processTree cannot be tracked } - LOG.debug("Constructing ProcessTree for : PID = {}" - +" ContainerId = {}", pId, containerId); + LOG.debug( + "Constructing ProcessTree for : PID = {} ContainerId = {}", + pId, containerId); ResourceCalculatorProcessTree pTree = ptInfo.getProcessTree(); pTree.updateProcessTree(); // update process-tree long currentVmemUsage = pTree.getVirtualMemorySize(); @@ -509,8 +510,8 @@ public void run() { // CPU usage is not available likely because the container just // started. Let us skip this turn and consider this container // in the next iteration. - LOG.info("Skipping monitoring container " + containerId - + " since CPU usage is not yet available."); + LOG.info("Skipping monitoring container {} since " + + "CPU usage is not yet available.", containerId); continue; } @@ -558,8 +559,8 @@ public void run() { try { Thread.sleep(monitoringInterval); } catch (InterruptedException e) { - LOG.warn(ContainersMonitorImpl.class.getName() - + " is interrupted. Exiting."); + LOG.warn("{} is interrupted. Exiting.", + ContainersMonitorImpl.class.getName()); break; } } @@ -604,16 +605,16 @@ private void initializeProcessTrees( if ((ipAndHost != null) && (ipAndHost[0] != null) && (ipAndHost[1] != null)) { container.setIpAndHost(ipAndHost); - LOG.info(containerId + "'s ip = " + ipAndHost[0] - + ", and hostname = " + ipAndHost[1]); + LOG.info("{}'s ip = {}, and hostname = {}", + containerId, ipAndHost[0], ipAndHost[1]); } else { - LOG.info("Can not get both ip and hostname: " - + Arrays.toString(ipAndHost)); + LOG.info("Can not get both ip and hostname: {}", + Arrays.toString(ipAndHost)); } String exposedPorts = containerExecutor.getExposedPorts(container); container.setExposedPorts(exposedPorts); } else { - LOG.info(containerId + " is missing. Not setting ip and hostname"); + LOG.info("{} is missing. Not setting ip and hostname", containerId); } } } @@ -648,15 +649,15 @@ private void recordUsage(ContainerId containerId, String pId, long vmemLimit = ptInfo.getVmemLimit(); long pmemLimit = ptInfo.getPmemLimit(); if (AUDITLOG.isDebugEnabled()) { - AUDITLOG.debug(String.format( - "Resource usage of ProcessTree %s for container-id %s:" + - " %s CPU:%f CPU/core:%f", - pId, containerId.toString(), + AUDITLOG.debug( + "Resource usage of ProcessTree {} for container-id {}:" + + " {} CPU:{} CPU/core:{}", + pId, containerId, formatUsageString( currentVmemUsage, vmemLimit, currentPmemUsage, pmemLimit), cpuUsagePercentPerCore, - cpuUsageTotalCoresPercentage)); + cpuUsageTotalCoresPercentage); } // Add resource utilization for this container @@ -754,15 +755,15 @@ && isProcessTreeOverLimit(containerId.toString(), LOG.warn(msg); // warn if not a leader if (!pTree.checkPidPgrpidForMatch()) { - LOG.error("Killed container process with PID " + pId - + " but it is not a process group leader."); + LOG.error("Killed container process with PID {} " + + "but it is not a process group leader.", pId); } // kill the container eventDispatcher.getEventHandler().handle( new ContainerKillEvent(containerId, containerExitStatus, msg)); trackingContainers.remove(containerId); - LOG.info("Removed ProcessTree with root " + pId); + LOG.info("Removed ProcessTree with root {}", pId); } } @@ -784,7 +785,7 @@ private void reportResourceUsage(ContainerId containerId, currentPmemUsage, cpuUsagePercentPerCore); } } else { - LOG.info(containerId + " does not exist to report"); + LOG.info("{} does not exist to report", containerId); } } @@ -967,12 +968,11 @@ private void onChangeMonitoringContainerResource( if (containersMonitorEnabled) { ProcessTreeInfo processTreeInfo = trackingContainers.get(containerId); if (processTreeInfo == null) { - LOG.warn("Failed to track container " - + containerId.toString() - + ". It may have already completed."); + LOG.warn("Failed to track container {}. It may have already completed.", + containerId); return; } - LOG.info("Changing resource-monitoring for " + containerId); + LOG.info("Changing resource-monitoring for {}", containerId); updateContainerMetrics(monitoringEvent); long pmemLimit = changeEvent.getResource().getMemorySize() * 1024L * 1024L; @@ -984,7 +984,7 @@ private void onChangeMonitoringContainerResource( private void onStopMonitoringContainer( ContainersMonitorEvent monitoringEvent, ContainerId containerId) { - LOG.info("Stopping resource-monitoring for " + containerId); + LOG.info("Stopping resource-monitoring for {}", containerId); updateContainerMetrics(monitoringEvent); trackingContainers.remove(containerId); } @@ -993,7 +993,7 @@ private void onStartMonitoringContainer( ContainersMonitorEvent monitoringEvent, ContainerId containerId) { ContainerStartMonitoringEvent startEvent = (ContainerStartMonitoringEvent) monitoringEvent; - LOG.info("Starting resource-monitoring for " + containerId); + LOG.info("Starting resource-monitoring for {}", containerId); updateContainerMetrics(monitoringEvent); trackingContainers.put(containerId, new ProcessTreeInfo(containerId, null, null, From 08363db09326d421bd38b72b86a24cb75c339043 Mon Sep 17 00:00:00 2001 From: Sunil G Date: Mon, 3 Jun 2019 09:41:17 +0530 Subject: [PATCH 0090/1308] SUBMARINE-87. Add an apache rat check script for submarine. Contributed by Zhankun Tang. --- hadoop-submarine/dev-support/checks/rat.sh | 24 ++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100755 hadoop-submarine/dev-support/checks/rat.sh diff --git a/hadoop-submarine/dev-support/checks/rat.sh b/hadoop-submarine/dev-support/checks/rat.sh new file mode 100755 index 0000000000000..ffead21731971 --- /dev/null +++ b/hadoop-submarine/dev-support/checks/rat.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +mkdir -p target +rm target/rat-aggregated.txt +mvn apache-rat:check +grep -r --include=rat.txt "\!\?\?\?\?" ./* | tee ./target/rat-aggregated.txt +if [ "$(cat target/rat-aggregated.txt)" ]; then + echo "Failed to pass apache rat check!" + exit -1 +fi From 21852494815e7314e0873c3963a54457ac2aab28 Mon Sep 17 00:00:00 2001 From: Sunil G Date: Mon, 3 Jun 2019 10:41:25 +0530 Subject: [PATCH 0091/1308] Preparing for submarine-0.3.0 development --- hadoop-submarine/hadoop-submarine-all/pom.xml | 4 ++-- hadoop-submarine/hadoop-submarine-core/pom.xml | 4 ++-- hadoop-submarine/hadoop-submarine-dist/pom.xml | 4 ++-- hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml | 6 +++--- .../hadoop-submarine-yarnservice-runtime/pom.xml | 8 ++++---- hadoop-submarine/pom.xml | 2 +- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/hadoop-submarine/hadoop-submarine-all/pom.xml b/hadoop-submarine/hadoop-submarine-all/pom.xml index 639a9199e93de..c26e0d57df50d 100644 --- a/hadoop-submarine/hadoop-submarine-all/pom.xml +++ b/hadoop-submarine/hadoop-submarine-all/pom.xml @@ -20,7 +20,7 @@ hadoop-submarine org.apache.hadoop - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT ${project.artifactId} ${project.version} @@ -30,7 +30,7 @@ ${project.parent.parent.basedir} hadoop-submarine-all - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT diff --git a/hadoop-submarine/hadoop-submarine-core/pom.xml b/hadoop-submarine/hadoop-submarine-core/pom.xml index 332c4db966d99..0ef6f71a3c37a 100644 --- a/hadoop-submarine/hadoop-submarine-core/pom.xml +++ b/hadoop-submarine/hadoop-submarine-core/pom.xml @@ -20,10 +20,10 @@ hadoop-submarine org.apache.hadoop - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT hadoop-submarine-core - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT Hadoop Submarine Core diff --git a/hadoop-submarine/hadoop-submarine-dist/pom.xml b/hadoop-submarine/hadoop-submarine-dist/pom.xml index 7196df1bb880f..8aa7db13f936e 100644 --- a/hadoop-submarine/hadoop-submarine-dist/pom.xml +++ b/hadoop-submarine/hadoop-submarine-dist/pom.xml @@ -20,7 +20,7 @@ hadoop-submarine org.apache.hadoop - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT ${project.artifactId} ${project.version} @@ -31,7 +31,7 @@ ${project.parent.parent.basedir} hadoop-submarine-dist - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT diff --git a/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml b/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml index cc2ebfc67743a..7ab2fdd35b5f3 100644 --- a/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml +++ b/hadoop-submarine/hadoop-submarine-tony-runtime/pom.xml @@ -18,7 +18,7 @@ hadoop-submarine org.apache.hadoop - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT 4.0.0 @@ -28,7 +28,7 @@ org.apache.hadoop hadoop-submarine-core - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT compile @@ -59,7 +59,7 @@ hadoop-submarine-core test-jar test - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT org.mockito diff --git a/hadoop-submarine/hadoop-submarine-yarnservice-runtime/pom.xml b/hadoop-submarine/hadoop-submarine-yarnservice-runtime/pom.xml index 034a29e589073..19e629e81d149 100644 --- a/hadoop-submarine/hadoop-submarine-yarnservice-runtime/pom.xml +++ b/hadoop-submarine/hadoop-submarine-yarnservice-runtime/pom.xml @@ -20,10 +20,10 @@ hadoop-submarine org.apache.hadoop - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT hadoop-submarine-yarnservice-runtime - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT Hadoop Submarine YARN Service Runtime @@ -98,12 +98,12 @@ hadoop-submarine-core test-jar test - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT org.apache.hadoop hadoop-submarine-core - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT org.apache.hadoop diff --git a/hadoop-submarine/pom.xml b/hadoop-submarine/pom.xml index 2d33da2ac1662..501dfb4f31def 100644 --- a/hadoop-submarine/pom.xml +++ b/hadoop-submarine/pom.xml @@ -24,7 +24,7 @@ hadoop-submarine - 0.2.0-SNAPSHOT + 0.3.0-SNAPSHOT Hadoop Submarine pom From 4530f4500d308c9cefbcc5990769c04bd061ad87 Mon Sep 17 00:00:00 2001 From: Weiwei Yang Date: Mon, 3 Jun 2019 14:09:37 +0800 Subject: [PATCH 0092/1308] YARN-9507. Fix NPE in NodeManager#serviceStop on startup failure. Contributed by Bilwa S T. --- .../hadoop/yarn/server/nodemanager/NodeManager.java | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index 89e3b478d1eba..9eff3a9213e31 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -526,9 +526,11 @@ protected void serviceStop() throws Exception { DefaultMetricsSystem.shutdown(); // Cleanup ResourcePluginManager - ResourcePluginManager rpm = context.getResourcePluginManager(); - if (rpm != null) { - rpm.cleanup(); + if (null != context) { + ResourcePluginManager rpm = context.getResourcePluginManager(); + if (rpm != null) { + rpm.cleanup(); + } } } finally { // YARN-3641: NM's services stop get failed shouldn't block the From 2a97a37d9e313e509ac43fdafd379183fd564d9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Elek?= Date: Mon, 3 Jun 2019 08:34:19 +0200 Subject: [PATCH 0093/1308] Revert "HDDS-1597. Remove hdds-server-scm dependency from ozone-common. (#860)" This reverts commit baee71551d5a9c39760631de463684d810fa96fa. --- .../hadoop/hdds/server/ServerUtils.java | 21 ------------------- .../org/apache/hadoop/hdds/scm/ScmUtils.java | 21 +++++++++++++++++-- .../hdds/scm/exceptions/SCMException.java | 0 .../hdds/scm/exceptions/package-info.java | 0 hadoop-ozone/common/pom.xml | 4 ++++ .../java/org/apache/hadoop/ozone/OmUtils.java | 4 ++-- hadoop-ozone/integration-test/pom.xml | 9 -------- .../hadoop/ozone/om/TestKeyManagerImpl.java | 0 hadoop-ozone/pom.xml | 5 ----- hadoop-ozone/tools/pom.xml | 5 ----- 10 files changed, 25 insertions(+), 44 deletions(-) rename hadoop-hdds/{common => server-scm}/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java (100%) rename hadoop-hdds/{common => server-scm}/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java (100%) rename hadoop-ozone/{integration-test => ozone-manager}/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java (100%) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java index 33a1ca9558b04..f775ca104b326 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java @@ -203,25 +203,4 @@ public static void setOzoneMetaDirPath(OzoneConfiguration conf, conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, path); } - /** - * Returns with the service specific metadata directory. - *

    - * If the directory is missing the method tries to create it. - * - * @param conf The ozone configuration object - * @param key The configuration key which specify the directory. - * @return The path of the directory. - */ - public static File getDBPath(Configuration conf, String key) { - final File dbDirPath = - getDirectoryFromConfig(conf, key, "OM"); - if (dbDirPath != null) { - return dbDirPath; - } - - LOG.warn("{} is not configured. We recommend adding this setting. " - + "Falling back to {} instead.", key, - HddsConfigKeys.OZONE_METADATA_DIRS); - return ServerUtils.getOzoneMetaDirPath(conf); - } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java index 426341a32f40d..37702532ac760 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java @@ -18,13 +18,18 @@ package org.apache.hadoop.hdds.scm; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.safemode.Precheck; - +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.server.ServerUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; + /** * SCM utility class. */ @@ -48,4 +53,16 @@ public static void preCheck(ScmOps operation, Precheck... preChecks) } } + public static File getDBPath(Configuration conf, String dbDirectory) { + final File dbDirPath = + ServerUtils.getDirectoryFromConfig(conf, dbDirectory, "OM"); + if (dbDirPath != null) { + return dbDirPath; + } + + LOG.warn("{} is not configured. We recommend adding this setting. " + + "Falling back to {} instead.", dbDirectory, + HddsConfigKeys.OZONE_METADATA_DIRS); + return ServerUtils.getOzoneMetaDirPath(conf); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java similarity index 100% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java similarity index 100% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index 050022c949546..06973b397533a 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -60,6 +60,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds-server-framework + + org.apache.hadoop + hadoop-hdds-server-scm + org.apache.hadoop hadoop-hdds-container-service diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index f4c33d3a7eaf2..5cd51421cb72d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -39,7 +39,7 @@ import org.apache.commons.compress.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.hdds.scm.ScmUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -164,7 +164,7 @@ public static int getOmRestPort(Configuration conf) { * @return File path, after creating all the required Directories. */ public static File getOmDbDir(Configuration conf) { - return ServerUtils.getDBPath(conf, OMConfigKeys.OZONE_OM_DB_DIRS); + return ScmUtils.getDBPath(conf, OMConfigKeys.OZONE_OM_DB_DIRS); } /** diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index 2ecee8c5ac31f..3d4de8ef5b253 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -34,10 +34,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone-common - - org.apache.hadoop - hadoop-hdds-server-scm - org.apache.hadoop hadoop-ozone-ozone-manager @@ -68,11 +64,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone-client - - commons-lang - commons-lang - test - org.apache.hadoop hadoop-ozone-ozone-manager diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java similarity index 100% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 408c640428b04..a754d1fabf219 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -220,11 +220,6 @@ bcprov-jdk15on ${bouncycastle.version} - - commons-lang - commons-lang - 2.6 - diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 7ce6f6b2a53b5..f8ed807990636 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -37,11 +37,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone-common - - - org.apache.hadoop - hadoop-hdds-server-scm - org.apache.hadoop hadoop-ozone-client From 7f46dda513fb79c349acb73bdb90b689df9cc18d Mon Sep 17 00:00:00 2001 From: Sunil G Date: Mon, 3 Jun 2019 12:24:20 +0530 Subject: [PATCH 0094/1308] YARN-8947. [UI2] Active User info missing from UI2. Contributed by Akhil PB. --- .../src/main/webapp/app/models/yarn-user.js | 11 ++++- .../serializers/yarn-queue/capacity-queue.js | 11 +++++ .../src/main/webapp/app/styles/app.scss | 18 +++++++ .../webapp/app/templates/yarn-queue/apps.hbs | 47 ++++++++++++++++++- 4 files changed, 84 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-user.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-user.js index 84b0fabe52144..9016e92559b4d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-user.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-user.js @@ -22,5 +22,14 @@ export default DS.Model.extend({ name: DS.attr('string'), queueName: DS.attr('string'), usedMemoryMB: DS.attr('number'), - usedVCore: DS.attr('number') + usedVCore: DS.attr('number'), + maxMemoryMB: DS.attr('number'), + maxVCore: DS.attr('number'), + amUsedMemoryMB: DS.attr('number'), + amUsedVCore: DS.attr('number'), + maxAMMemoryMB: DS.attr('number'), + maxAMVCore: DS.attr('number'), + userWeight: DS.attr('string'), + activeApps: DS.attr('number'), + pendingApps: DS.attr('number') }); \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js index 57714e3d46229..2dec5205fdd51 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js @@ -36,6 +36,8 @@ export default DS.JSONAPISerializer.extend({ // update user models if (payload.users && payload.users.user) { payload.users.user.forEach(function(u) { + var defaultPartitionResource = u.resources.resourceUsagesByPartition[0]; + var maxAMResource = defaultPartitionResource.amLimit; includedData.push({ type: "YarnUser", id: u.username + "_" + payload.queueName, @@ -44,6 +46,15 @@ export default DS.JSONAPISerializer.extend({ queueName: payload.queueName, usedMemoryMB: u.resourcesUsed.memory || 0, usedVCore: u.resourcesUsed.vCores || 0, + maxMemoryMB: u.userResourceLimit.memory || 0, + maxVCore: u.userResourceLimit.vCores || 0, + amUsedMemoryMB: u.AMResourceUsed.memory || 0, + amUsedVCore: u.AMResourceUsed.vCores || 0, + maxAMMemoryMB: maxAMResource.memory || 0, + maxAMVCore: maxAMResource.vCores || 0, + userWeight: u.userWeight || '', + activeApps: u.numActiveApplications || 0, + pendingApps: u.numPendingApplications || 0 } }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss index 01dc9f5b71c74..59e437a9f5516 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss @@ -754,4 +754,22 @@ div.loggedin-user { border: none; border-radius: 2px; } +} + +/** + * Active User Info table styles + */ +.active-user-panel { + margin-top: 10px; +} + +table.active-user-table { + border: 1px solid #ddd; + > thead > tr > th { + background-color: #f7f7f7; + } + &.table-bordered > thead > tr > th, + &.table-bordered > tbody > tr > td { + border: 1px solid #dcdcdc !important; + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue/apps.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue/apps.hbs index 97820c9f0e61e..dc5413e048e11 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue/apps.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue/apps.hbs @@ -17,7 +17,7 @@ }}

    -
    +
    {{#if (eq model.queues.firstObject.type "capacity")}} {{yarn-queue.capacity-queue-info model=model}} @@ -28,7 +28,7 @@ {{/if}}
    -
    +
    {{#if model.apps}} {{em-table columns=columns rows=model.apps definition=tableDefinitio}} {{else}} @@ -38,4 +38,47 @@
    {{/if}}
    + {{!-- Active User Info --}} + {{#if model.selectedQueue.users}} +
    +
    +
    + Active User Info: {{ model.selected }} +
    +
    + + + + + + + + + + + + + + + + {{!-- Fix to start odd row background color from third row --}} + + {{#each model.selectedQueue.users as |user|}} + + + + + + + + + + + {{/each}} + +
    User NameMax ResourceUsed ResourceMax AM ResourceUsed AM ResourceActive AppsPending AppsWeight
    {{ user.name }}<memory: {{user.maxMemoryMB}} MB, vCores: {{user.maxVCore}}><memory: {{user.usedMemoryMB}} MB, vCores: {{user.usedVCore}}><memory: {{user.maxAMMemoryMB}} MB, vCores: {{user.maxAMVCore}}><memory: {{user.amUsedMemoryMB}} MB, vCores: {{user.amUsedVCore}}>{{user.activeApps}}{{user.pendingApps}}{{user.userWeight}}
    +
    +
    +
    + {{/if}}
    From 59719dc560cf67f485d8e5b4a6f0f38ef97d536b Mon Sep 17 00:00:00 2001 From: Sunil G Date: Mon, 3 Jun 2019 15:53:23 +0530 Subject: [PATCH 0095/1308] YARN-8906. [UI2] NM hostnames not displayed correctly in Node Heatmap Chart. Contributed by Akhil PB. --- .../src/main/webapp/app/components/nodes-heatmap.js | 6 ++++-- .../main/webapp/app/templates/components/nodes-heatmap.hbs | 1 - 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js index 1f772defcd26b..7eac266505aca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js @@ -230,12 +230,14 @@ export default BaseChartComponent.extend({ var node_id = data.get("id"), node_addr = encodeURIComponent(data.get("nodeHTTPAddress")), href = `#/yarn-node/${node_id}/${node_addr}/info`; + var nodeHostName = data.get("nodeHostName"); var a = g.append("a") .attr("href", href); a.append("text") - .text(data.get("nodeHostName")) + .text(nodeHostName.length > 30 ? nodeHostName.substr(0, 30) + '...' : nodeHostName) .attr("y", yOffset + this.CELL_HEIGHT / 2 + 5) - .attr("x", xOffset + this.CELL_WIDTH / 2) + .attr("x", nodeHostName.length > 30 ? xOffset + 10 : xOffset + this.CELL_WIDTH / 2) + .style("text-anchor", nodeHostName.length > 30 ? "start" : "middle") .attr("class", this.isNodeSelected(data) ? "heatmap-cell" : "heatmap-cell-notselected"); if (this.isNodeSelected(data)) { this.bindTP(a, rect); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/nodes-heatmap.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/nodes-heatmap.hbs index f68bba6b8bcef..d1ac8e7a30f39 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/nodes-heatmap.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/nodes-heatmap.hbs @@ -25,4 +25,3 @@
    -

    \ No newline at end of file From bd2590d71ba1f3db1c686f7afeaf51382f8d8a2f Mon Sep 17 00:00:00 2001 From: Weiwei Yang Date: Mon, 3 Jun 2019 22:59:02 +0800 Subject: [PATCH 0096/1308] YARN-9580. Fulfilled reservation information in assignment is lost when transferring in ParentQueue#assignContainers. Contributed by Tao Yang. --- .../scheduler/capacity/ParentQueue.java | 4 ++ .../TestCapacitySchedulerMultiNodes.java | 57 +++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 8a7acd643878f..c56369c3e1c25 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -631,6 +631,10 @@ public CSAssignment assignContainers(Resource clusterResource, assignedToChild.getRequestLocalityType()); assignment.setExcessReservation(assignedToChild.getExcessReservation()); assignment.setContainersToKill(assignedToChild.getContainersToKill()); + assignment.setFulfilledReservation( + assignedToChild.isFulfilledReservation()); + assignment.setFulfilledReservedContainer( + assignedToChild.getFulfilledReservedContainer()); // Done if no child-queue assigned anything if (Resources.greaterThan(resourceCalculator, clusterResource, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java index 6c9faa6785d55..0e295765e8414 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java @@ -245,4 +245,61 @@ public void testExcessReservationWillBeUnreserved() throws Exception { rm1.close(); } + + @Test(timeout=30000) + public void testAllocateForReservedContainer() throws Exception { + CapacitySchedulerConfiguration newConf = + new CapacitySchedulerConfiguration(conf); + newConf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER, + YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER); + newConf.setInt(CapacitySchedulerConfiguration.MULTI_NODE_SORTING_POLICY_NAME + + ".resource-based.sorting-interval.ms", 0); + newConf.setMaximumApplicationMasterResourcePerQueuePercent("root.default", + 1.0f); + MockRM rm1 = new MockRM(newConf); + + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); + MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB); + + // launch an app to queue, AM container should be launched in nm1 + RMApp app1 = rm1.submitApp(5 * GB, "app", "user", null, "default"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // launch another app to queue, AM container should be launched in nm2 + RMApp app2 = rm1.submitApp(5 * GB, "app", "user", null, "default"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2); + + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + FiCaSchedulerApp schedulerApp1 = + cs.getApplicationAttempt(am1.getApplicationAttemptId()); + FiCaSchedulerApp schedulerApp2 = + cs.getApplicationAttempt(am2.getApplicationAttemptId()); + + /* + * Verify that reserved container will be allocated + * after node has sufficient resource. + */ + // Ask a container with 6GB memory size for app2, + // nm1 will reserve a container for app2 + am2.allocate("*", 6 * GB, 1, new ArrayList<>()); + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + + // Check containers of app1 and app2. + Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); + Assert.assertEquals(1, schedulerApp1.getLiveContainers().size()); + Assert.assertEquals(1, schedulerApp2.getLiveContainers().size()); + Assert.assertEquals(1, schedulerApp2.getReservedContainers().size()); + + // Kill app1 to release resource on nm1. + rm1.killApp(app1.getApplicationId()); + + // Trigger scheduling to allocate for reserved container on nm1. + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); + Assert.assertEquals(2, schedulerApp2.getLiveContainers().size()); + + rm1.close(); + } } From f3271126fc9a3ad178b7dadd8edf851e16cf76d0 Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Tue, 4 Jun 2019 00:59:02 +0530 Subject: [PATCH 0097/1308] HDDS-1558. IllegalArgumentException while processing container Reports. Signed-off-by: Nanda kumar --- .../container/common/impl/HddsDispatcher.java | 15 +++- .../container/common/interfaces/Handler.java | 9 ++ .../container/keyvalue/KeyValueContainer.java | 6 +- .../container/keyvalue/KeyValueHandler.java | 14 +++ .../TestContainerStateMachineFailures.java | 85 +++++++++++++++++++ 5 files changed, 125 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 4e8d5b914e3fa..6f56b3c13d27c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -67,6 +67,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -299,8 +300,18 @@ private ContainerCommandResponseProto dispatchRequest( State containerState = container.getContainerData().getState(); Preconditions.checkState( containerState == State.OPEN || containerState == State.CLOSING); - container.getContainerData() - .setState(ContainerDataProto.State.UNHEALTHY); + // mark and persist the container state to be unhealthy + try { + handler.markContainerUhealthy(container); + } catch (IOException ioe) { + // just log the error here in case marking the container fails, + // Return the actual failure response to the client + LOG.error("Failed to mark container " + containerID + " UNHEALTHY. ", + ioe); + } + // in any case, the in memory state of the container should be unhealthy + Preconditions.checkArgument( + container.getContainerData().getState() == State.UNHEALTHY); sendCloseContainerActionIfNeeded(container); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java index a3bb34b565291..52d14dbbb4747 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java @@ -129,6 +129,15 @@ public abstract Container importContainer( public abstract void markContainerForClose(Container container) throws IOException; + /** + * Marks the container Unhealthy. Moves the container to UHEALTHY state. + * + * @param container container to update + * @throws IOException in case of exception + */ + public abstract void markContainerUhealthy(Container container) + throws IOException; + /** * Moves the Container to QUASI_CLOSED state. * diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 38257c39f2389..6a1ca8682188c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -339,8 +339,10 @@ private void updateContainerData(Runnable update) updateContainerFile(containerFile); } catch (StorageContainerException ex) { - if (oldState != null) { - // Failed to update .container file. Reset the state to CLOSING + if (oldState != null + && containerData.getState() != ContainerDataProto.State.UNHEALTHY) { + // Failed to update .container file. Reset the state to old state only + // if the current state is not unhealthy. containerData.setState(oldState); } throw ex; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 531fb02830658..72f48fa3b5ec6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -892,6 +892,14 @@ public void markContainerForClose(Container container) } } + @Override + public void markContainerUhealthy(Container container) + throws IOException { + // this will mark the container unhealthy and a close container action will + // be sent from the dispatcher ton SCM to close down this container. + container.markContainerUnhealthy(); + } + @Override public void quasiCloseContainer(Container container) throws IOException { @@ -920,6 +928,12 @@ public void closeContainer(Container container) if (state == State.CLOSED) { return; } + if (state == State.UNHEALTHY) { + throw new StorageContainerException( + "Cannot close container #" + container.getContainerData() + .getContainerID() + " while in " + state + " state.", + ContainerProtos.Result.CONTAINER_UNHEALTHY); + } // The container has to be either in CLOSING or in QUASI_CLOSED state. if (state != State.CLOSING && state != State.QUASI_CLOSED) { ContainerProtos.Result error = state == State.INVALID ? diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index 5739d48c652cc..744f687286a8c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -24,12 +24,16 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.container.common.impl.ContainerData; +import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -49,10 +53,13 @@ HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys. HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys. HDDS_SCM_WATCHER_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys. OZONE_SCM_STALENODE_INTERVAL; +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertThat; /** * Tests the containerStateMachine failure handling. @@ -185,4 +192,82 @@ public void testContainerStateMachineFailures() throws Exception { Assert.assertEquals(ContainerProtos.Result.CONTAINER_MISSING, dispatcher.dispatch(request.build(), null).getResult()); } + + @Test + public void testUnhealthyContainer() throws Exception { + OzoneOutputStream key = + objectStore.getVolume(volumeName).getBucket(bucketName) + .createKey("ratis", 1024, ReplicationType.RATIS, + ReplicationFactor.ONE, new HashMap<>()); + // First write and flush creates a container in the datanode + key.write("ratis".getBytes()); + key.flush(); + key.write("ratis".getBytes()); + + //get the name of a valid container + OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName). + setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) + .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName("ratis") + .build(); + KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); + List locationInfoList = + groupOutputStream.getLocationInfoList(); + Assert.assertEquals(1, locationInfoList.size()); + OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); + ContainerData containerData = + cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() + .getContainer().getContainerSet() + .getContainer(omKeyLocationInfo.getContainerID()) + .getContainerData(); + Assert.assertTrue(containerData instanceof KeyValueContainerData); + KeyValueContainerData keyValueContainerData = + (KeyValueContainerData) containerData; + // delete the container db file + FileUtil.fullyDelete(new File(keyValueContainerData.getChunksPath())); + try { + key.close(); + Assert.fail(); + } catch (IOException ioe) { + Assert.assertTrue(ioe.getMessage().contains( + "Requested operation not allowed as ContainerState is UNHEALTHY")); + } + long containerID = omKeyLocationInfo.getContainerID(); + + // Make sure the container is marked unhealthy + Assert.assertTrue( + cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() + .getContainer().getContainerSet().getContainer(containerID) + .getContainerState() + == ContainerProtos.ContainerDataProto.State.UNHEALTHY); + // Check metadata in the .container file + File containerFile = new File(keyValueContainerData.getMetadataPath(), + containerID + OzoneConsts.CONTAINER_EXTENSION); + + keyValueContainerData = (KeyValueContainerData) ContainerDataYaml + .readContainerFile(containerFile); + assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); + + // restart the hdds datanode and see if the container is listed in the + // in the missing container set and not in the regular set + cluster.restartHddsDatanode(0, true); + // make sure the container state is still marked unhealthy after restart + keyValueContainerData = (KeyValueContainerData) ContainerDataYaml + .readContainerFile(containerFile); + assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); + + OzoneContainer ozoneContainer; + ozoneContainer = cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() + .getContainer(); + HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer.getDispatcher(); + ContainerProtos.ContainerCommandRequestProto.Builder request = + ContainerProtos.ContainerCommandRequestProto.newBuilder(); + request.setCmdType(ContainerProtos.Type.CloseContainer); + request.setContainerID(containerID); + request.setCloseContainer( + ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); + request.setDatanodeUuid( + cluster.getHddsDatanodes().get(0).getDatanodeDetails().getUuidString()); + Assert.assertEquals(ContainerProtos.Result.CONTAINER_UNHEALTHY, + dispatcher.dispatch(request.build(), null).getResult()); + } } \ No newline at end of file From 21de9af9038961e36e7335dc1f688f5f48056d1c Mon Sep 17 00:00:00 2001 From: avijayanhwx <14299376+avijayanhwx@users.noreply.github.com> Date: Mon, 3 Jun 2019 12:45:04 -0700 Subject: [PATCH 0098/1308] HDDS-1625 : ConcurrentModificationException when SCM has containers of different owners. (#883) --- .../scm/container/SCMContainerManager.java | 9 ++++--- .../TestContainerStateManagerIntegration.java | 24 +++++++++++++++++++ 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java index 359731cfe4652..1c1ffe174ea4f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java @@ -43,6 +43,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.NavigableSet; @@ -469,15 +470,17 @@ private void addContainerToDB(ContainerInfo containerInfo) */ private NavigableSet getContainersForOwner( NavigableSet containerIDs, String owner) { - for (ContainerID cid : containerIDs) { + Iterator containerIDIterator = containerIDs.iterator(); + while (containerIDIterator.hasNext()) { + ContainerID cid = containerIDIterator.next(); try { if (!getContainer(cid).getOwner().equals(owner)) { - containerIDs.remove(cid); + containerIDIterator.remove(); } } catch (ContainerNotFoundException e) { LOG.error("Could not find container info for container id={} {}", cid, e); - containerIDs.remove(cid); + containerIDIterator.remove(); } } return containerIDs; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java index 9f90a2deac6b4..e4f1a37fd6235 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java @@ -122,6 +122,30 @@ public void testAllocateContainer() throws IOException { Assert.assertEquals(3, numContainers); } + @Test + public void testAllocateContainerWithDifferentOwner() throws IOException { + + // Allocate a container and verify the container info + ContainerWithPipeline container1 = scm.getClientProtocolServer() + .allocateContainer(xceiverClientManager.getType(), + xceiverClientManager.getFactor(), containerOwner); + ContainerInfo info = containerManager + .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, + container1.getPipeline()); + Assert.assertNotNull(info); + + String newContainerOwner = "OZONE_NEW"; + ContainerWithPipeline container2 = scm.getClientProtocolServer() + .allocateContainer(xceiverClientManager.getType(), + xceiverClientManager.getFactor(), newContainerOwner); + ContainerInfo info2 = containerManager + .getMatchingContainer(OzoneConsts.GB * 3, newContainerOwner, + container1.getPipeline()); + Assert.assertNotNull(info2); + + Assert.assertNotEquals(info.containerID(), info2.containerID()); + } + @Test public void testContainerStateManagerRestart() throws IOException, TimeoutException, InterruptedException, AuthenticationException { From 277e9a835b5b45af8df70b0dca52c03074f0d6b5 Mon Sep 17 00:00:00 2001 From: Mukul Kumar Singh Date: Tue, 4 Jun 2019 02:12:44 +0530 Subject: [PATCH 0099/1308] Opening of rocksDB in datanode fails with "No locks available" Signed-off-by: Nanda kumar --- .../common/utils/ContainerCache.java | 14 +- .../common/utils/ReferenceCountedDB.java | 28 ++-- .../container/common/TestContainerCache.java | 128 ++++++++++++++++++ 3 files changed, 145 insertions(+), 25 deletions(-) create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java index ef75ec13db4d9..d25e53b74f6c3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java @@ -77,7 +77,8 @@ public void shutdownCache() { while (iterator.hasNext()) { iterator.next(); ReferenceCountedDB db = (ReferenceCountedDB) iterator.getValue(); - db.setEvicted(true); + Preconditions.checkArgument(db.cleanup(), "refCount:", + db.getReferenceCount()); } // reset the cache cache.clear(); @@ -92,14 +93,9 @@ public void shutdownCache() { @Override protected boolean removeLRU(LinkEntry entry) { ReferenceCountedDB db = (ReferenceCountedDB) entry.getValue(); - String dbFile = (String)entry.getKey(); lock.lock(); try { - db.setEvicted(false); - return true; - } catch (Exception e) { - LOG.error("Eviction for db:{} failed", dbFile, e); - return false; + return db.cleanup(); } finally { lock.unlock(); } @@ -156,8 +152,8 @@ public void removeDB(String containerDBPath) { try { ReferenceCountedDB db = (ReferenceCountedDB)this.get(containerDBPath); if (db != null) { - // marking it as evicted will close the db as well. - db.setEvicted(true); + Preconditions.checkArgument(db.cleanup(), "refCount:", + db.getReferenceCount()); } this.remove(containerDBPath); } finally { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java index 31aca64c5fbd6..81cde5bddc41f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java @@ -24,7 +24,6 @@ import org.slf4j.LoggerFactory; import java.io.Closeable; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; /** @@ -38,17 +37,19 @@ public class ReferenceCountedDB implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(ReferenceCountedDB.class); private final AtomicInteger referenceCount; - private final AtomicBoolean isEvicted; private final MetadataStore store; private final String containerDBPath; public ReferenceCountedDB(MetadataStore store, String containerDBPath) { this.referenceCount = new AtomicInteger(0); - this.isEvicted = new AtomicBoolean(false); this.store = store; this.containerDBPath = containerDBPath; } + public long getReferenceCount() { + return referenceCount.get(); + } + public void incrementReference() { this.referenceCount.incrementAndGet(); if (LOG.isDebugEnabled()) { @@ -59,35 +60,30 @@ public void incrementReference() { } public void decrementReference() { - this.referenceCount.decrementAndGet(); + int refCount = this.referenceCount.decrementAndGet(); + Preconditions.checkArgument(refCount >= 0, "refCount:", refCount); if (LOG.isDebugEnabled()) { LOG.debug("DecRef {} to refCnt {} \n", containerDBPath, referenceCount.get()); new Exception().printStackTrace(); } - cleanup(); - } - - public void setEvicted(boolean checkNoReferences) { - Preconditions.checkState(!checkNoReferences || - (referenceCount.get() == 0), - "checkNoReferences:%b, referencount:%d, dbPath:%s", - checkNoReferences, referenceCount.get(), containerDBPath); - isEvicted.set(true); - cleanup(); } - private void cleanup() { - if (referenceCount.get() == 0 && isEvicted.get() && store != null) { + public boolean cleanup() { + if (referenceCount.get() == 0 && store != null) { if (LOG.isDebugEnabled()) { LOG.debug("Close {} refCnt {}", containerDBPath, referenceCount.get()); } try { store.close(); + return true; } catch (Exception e) { LOG.error("Error closing DB. Container: " + containerDBPath, e); + return false; } + } else { + return false; } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java new file mode 100644 index 0000000000000..83a3812b47327 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.common; + +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; +import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.utils.MetadataStoreBuilder; +import org.junit.Assert; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.io.File; + + +/** + * Test ContainerCache with evictions. + */ +public class TestContainerCache { + private static String testRoot = new FileSystemTestHelper().getTestRootDir(); + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + private void createContainerDB(OzoneConfiguration conf, File dbFile) + throws Exception { + MetadataStore store = MetadataStoreBuilder.newBuilder().setConf(conf) + .setCreateIfMissing(true).setDbFile(dbFile).build(); + + // we close since the SCM pre-creates containers. + // we will open and put Db handle into a cache when keys are being created + // in a container. + + store.close(); + } + + @Test + public void testContainerCacheEviction() throws Exception { + File root = new File(testRoot); + root.mkdirs(); + + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2); + + ContainerCache cache = ContainerCache.getInstance(conf); + File containerDir1 = new File(root, "cont1"); + File containerDir2 = new File(root, "cont2"); + File containerDir3 = new File(root, "cont3"); + File containerDir4 = new File(root, "cont4"); + + + createContainerDB(conf, containerDir1); + createContainerDB(conf, containerDir2); + createContainerDB(conf, containerDir3); + createContainerDB(conf, containerDir4); + + // Get 2 references out of the same db and verify the objects are same. + ReferenceCountedDB db1 = cache.getDB(1, "RocksDB", + containerDir1.getPath(), conf); + Assert.assertEquals(1, db1.getReferenceCount()); + ReferenceCountedDB db2 = cache.getDB(1, "RocksDB", + containerDir1.getPath(), conf); + Assert.assertEquals(2, db2.getReferenceCount()); + Assert.assertEquals(2, db1.getReferenceCount()); + Assert.assertEquals(db1, db2); + + // add one more references to ContainerCache. + ReferenceCountedDB db3 = cache.getDB(2, "RocksDB", + containerDir2.getPath(), conf); + Assert.assertEquals(1, db3.getReferenceCount()); + + // and close the reference + db3.close(); + Assert.assertEquals(0, db3.getReferenceCount()); + + Assert.assertTrue(cache.isFull()); + + // add one more reference to ContainerCache and verify that it will not + // evict the least recent entry as it has reference. + ReferenceCountedDB db4 = cache.getDB(3, "RocksDB", + containerDir3.getPath(), conf); + Assert.assertEquals(1, db4.getReferenceCount()); + + Assert.assertEquals(2, cache.size()); + Assert.assertNotNull(cache.get(containerDir1.getPath())); + Assert.assertNull(cache.get(containerDir2.getPath())); + + // Now close both the references for container1 + db1.close(); + db2.close(); + Assert.assertEquals(0, db1.getReferenceCount()); + Assert.assertEquals(0, db2.getReferenceCount()); + + + // The reference count for container1 is 0 but it is not evicted. + ReferenceCountedDB db5 = cache.getDB(1, "RocksDB", + containerDir1.getPath(), conf); + Assert.assertEquals(1, db5.getReferenceCount()); + Assert.assertEquals(db1, db5); + db5.close(); + db4.close(); + + + // Decrementing reference count below zero should fail. + thrown.expect(IllegalArgumentException.class); + db5.close(); + } +} From 606061aa147dc6d619d6240b7ea31d8f8f220e5d Mon Sep 17 00:00:00 2001 From: Zhankun Tang Date: Tue, 4 Jun 2019 09:56:59 +0800 Subject: [PATCH 0100/1308] YARN-9595. FPGA plugin: NullPointerException in FpgaNodeResourceUpdateHandler.updateConfiguredResource(). Contributed by Peter Bacsko. --- .../resourceplugin/fpga/FpgaDiscoverer.java | 5 +-- .../fpga/TestFpgaDiscoverer.java | 33 +++++++++++++++++++ 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java index 185effaa6c17b..180a011b61f59 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java @@ -124,6 +124,7 @@ public List discover() if (allowed == null || allowed.equalsIgnoreCase( YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES)) { + currentFpgaInfo = ImmutableList.copyOf(list); return list; } else if (allowed.matches("(\\d,)*\\d")){ Set minors = Sets.newHashSet(allowed.split(",")); @@ -134,6 +135,8 @@ public List discover() .filter(dev -> minors.contains(String.valueOf(dev.getMinor()))) .collect(Collectors.toList()); + currentFpgaInfo = ImmutableList.copyOf(list); + // if the count of user configured is still larger than actual if (list.size() != minors.size()) { LOG.warn("We continue although there're mistakes in user's configuration " + @@ -145,8 +148,6 @@ public List discover() YarnConfiguration.NM_FPGA_ALLOWED_DEVICES + ":\"" + allowed + "\""); } - currentFpgaInfo = ImmutableList.copyOf(list); - return list; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestFpgaDiscoverer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestFpgaDiscoverer.java index 92e9db2070466..6f570c6f63d63 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestFpgaDiscoverer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestFpgaDiscoverer.java @@ -288,6 +288,39 @@ public void testDiscoveryWhenExternalScriptCannotBeExecuted() } } + @Test + public void testCurrentFpgaInfoWhenAllDevicesAreAllowed() + throws YarnException { + conf.set(YarnConfiguration.NM_FPGA_AVAILABLE_DEVICES, + "acl0/243:0,acl1/244:1"); + + fpgaDiscoverer.initialize(conf); + List devices = fpgaDiscoverer.discover(); + List currentFpgaInfo = fpgaDiscoverer.getCurrentFpgaInfo(); + + assertEquals("Devices", devices, currentFpgaInfo); + } + + @Test + public void testCurrentFpgaInfoWhenAllowedDevicesDefined() + throws YarnException { + conf.set(YarnConfiguration.NM_FPGA_AVAILABLE_DEVICES, + "acl0/243:0,acl1/244:1"); + conf.set(YarnConfiguration.NM_FPGA_ALLOWED_DEVICES, "0"); + + fpgaDiscoverer.initialize(conf); + List devices = fpgaDiscoverer.discover(); + List currentFpgaInfo = fpgaDiscoverer.getCurrentFpgaInfo(); + + assertEquals("Devices", devices, currentFpgaInfo); + assertEquals("List of devices", 1, currentFpgaInfo.size()); + + FpgaDevice device = currentFpgaInfo.get(0); + assertEquals("Device id", "acl0", device.getAliasDevName()); + assertEquals("Minor number", 0, device.getMinor()); + assertEquals("Major", 243, device.getMajor()); + } + private IntelFpgaOpenclPlugin.InnerShellExecutor mockPuginShell() { IntelFpgaOpenclPlugin.InnerShellExecutor shell = mock(IntelFpgaOpenclPlugin.InnerShellExecutor.class); when(shell.runDiagnose(anyString(),anyInt())).thenReturn(""); From 1fc359fc101b3ff90c95d22a3f4cfa78b65ae47d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Tue, 4 Jun 2019 08:18:02 +0200 Subject: [PATCH 0101/1308] HDDS-1607. Create smoketest for non-secure mapreduce example (#869) * HDDS-1607. Create smoketest for non-secure mapreduce example. * remove hardcoded project version --- .../dist/src/main/compose/ozone-mr/.env | 19 +++ .../main/compose/ozone-mr/docker-compose.yaml | 95 +++++++++++++ .../src/main/compose/ozone-mr/docker-config | 130 ++++++++++++++++++ .../dist/src/main/compose/ozone-mr/test.sh | 36 +++++ .../dist/src/main/smoketest/createmrenv.robot | 48 +++++++ .../dist/src/main/smoketest/mapreduce.robot | 37 +++++ 6 files changed, 365 insertions(+) create mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/.env create mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/docker-compose.yaml create mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/docker-config create mode 100755 hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh create mode 100644 hadoop-ozone/dist/src/main/smoketest/createmrenv.robot create mode 100644 hadoop-ozone/dist/src/main/smoketest/mapreduce.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/.env new file mode 100644 index 0000000000000..ba24fed5f9d7c --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/.env @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HDDS_VERSION=${hdds.version} +HADOOP_IMAGE=apache/hadoop +HADOOP_VERSION=3 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/docker-compose.yaml new file mode 100644 index 0000000000000..1a7f87263d72e --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/docker-compose.yaml @@ -0,0 +1,95 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3" +services: + datanode: + image: apache/hadoop-runner + volumes: + - ../..:/opt/hadoop + ports: + - 9864 + command: ["/opt/hadoop/bin/ozone","datanode"] + env_file: + - docker-config + om: + image: apache/hadoop-runner + hostname: om + volumes: + - ../..:/opt/hadoop + ports: + - 9874:9874 + environment: + WAITFOR: scm:9876 + ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + env_file: + - docker-config + command: ["/opt/hadoop/bin/ozone","om"] + s3g: + image: apache/hadoop-runner + hostname: s3g + volumes: + - ../..:/opt/hadoop + ports: + - 9878:9878 + env_file: + - ./docker-config + command: ["/opt/hadoop/bin/ozone","s3g"] + scm: + image: apache/hadoop-runner:latest + hostname: scm + volumes: + - ../..:/opt/hadoop + ports: + - 9876:9876 + env_file: + - docker-config + environment: + ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + command: ["/opt/hadoop/bin/ozone","scm"] + rm: + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + hostname: rm + volumes: + - ../..:/opt/ozone + ports: + - 8088:8088 + env_file: + - ./docker-config + environment: + HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar + command: ["yarn", "resourcemanager"] + nm: + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + hostname: nm + volumes: + - ../..:/opt/ozone + env_file: + - ./docker-config + environment: + HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar + WAIT_FOR: rm:8088 + command: ["yarn","nodemanager"] + dns: + image: andyshinn/dnsmasq:2.76 + ports: + - 53:53/udp + - 53:53/tcp + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" + command: + - "-k" + - "-d" diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/docker-config new file mode 100644 index 0000000000000..216e2314f4d89 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/docker-config @@ -0,0 +1,130 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +OZONE-SITE.XML_ozone.om.address=om +OZONE-SITE.XML_ozone.om.http-address=om:9874 +OZONE-SITE.XML_ozone.scm.names=scm +OZONE-SITE.XML_ozone.enabled=true +OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id +OZONE-SITE.XML_ozone.scm.block.client.address=scm +OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata +OZONE-SITE.XML_ozone.scm.client.address=scm +OZONE-SITE.XML_ozone.replication=1 + +OZONE-SITE.XML_hdds.datanode.dir=/data/hdds +HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019 +HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012 + +CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem +CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs +CORE-SITE.xml_fs.defaultFS=o3fs://bucket1.vol1/ + +MAPRED-SITE.XML_mapreduce.framework.name=yarn +MAPRED-SITE.XML_yarn.app.mapreduce.am.env=HADOOP_MAPRED_HOME=$HADOOP_HOME +MAPRED-SITE.XML_mapreduce.map.env=HADOOP_MAPRED_HOME=$HADOOP_HOME +MAPRED-SITE.XML_mapreduce.reduce.env=HADOOP_MAPRED_HOME=$HADOOP_HOME +MAPRED-SITE.XML_mapreduce.map.memory.mb=4096 +MAPRED-SITE.XML_mapreduce.reduce.memory.mb=4096 +MAPRED-SITE.XML_mapred.child.java.opts=-Xmx2g +MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-0.5.0-SNAPSHOT.jar + +YARN-SITE.XML_yarn.app.mapreduce.am.staging-dir=/user +YARN_SITE.XML_yarn.timeline-service.enabled=true +#YARN_SITE.XML_yarn.timeline-service.generic.application.history.enabled=true +#YARN_SITE.XML_yarn.timeline-service.hostname=jhs +#YARN_SITE.XML_yarn.log.server.url=http://jhs:8188/applicationhistory/logs/ + +YARN-SITE.XML_yarn.nodemanager.pmem-check-enabled=false +YARN-SITE.XML_yarn.nodemanager.delete.debug-delay-sec=6000 +YARN-SITE.XML_yarn.nodemanager.vmem-check-enabled=false +YARN-SITE.XML_yarn.nodemanager.aux-services=mapreduce_shuffle +YARN-SITE.XML_yarn.nodemanager.disk-health-checker.enable=false + +YARN-SITE.XML_yarn.resourcemanager.hostname=rm +YARN_SITE_XML_yarn.resourcemanager.system.metrics.publisher.enabled=true + +#YARN-SITE.XML_yarn.log-aggregation-enable=true +#YARN-SITE.XML_yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds=3600 + +#YARN-SITE.yarn.nodemanager.container-executor.class=org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor +#YARN-SITE.XML_yarn.nodemanager.linux-container-executor.path=/opt/hadoop/bin/container-executor +#YARN-SITE.XML_yarn.nodemanager.linux-container-executor.group=hadoop +YARN-SITE.XML_yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage=99 +YARN-SITE.XML_yarn.nodemanager.disk-health-checker.enable=false + +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-applications=10000 +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-am-resource-percent=0.1 +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.resource-calculator=org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.queues=default +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.capacity=100 +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.user-limit-factor=1 +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.maximum-capacity=100 +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.state=RUNNING +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_submit_applications=* +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_administer_queue=* +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.node-locality-delay=40 +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings= +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings-override.enable=false + +LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout +LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender +LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR +LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN +LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop=INFO +LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR + +#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. +#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm + +#LOG4J2.PROPERTIES_* are for Ozone Audit Logging +LOG4J2.PROPERTIES_monitorInterval=30 +LOG4J2.PROPERTIES_filter=read,write +LOG4J2.PROPERTIES_filter.read.type=MarkerFilter +LOG4J2.PROPERTIES_filter.read.marker=READ +LOG4J2.PROPERTIES_filter.read.onMatch=DENY +LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL +LOG4J2.PROPERTIES_filter.write.type=MarkerFilter +LOG4J2.PROPERTIES_filter.write.marker=WRITE +LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL +LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL +LOG4J2.PROPERTIES_appenders=console, rolling +LOG4J2.PROPERTIES_appender.console.type=Console +LOG4J2.PROPERTIES_appender.console.name=STDOUT +LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout +LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n +LOG4J2.PROPERTIES_appender.rolling.type=RollingFile +LOG4J2.PROPERTIES_appender.rolling.name=RollingFile +LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log +LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz +LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout +LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n +LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies +LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy +LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 +LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy +LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB +LOG4J2.PROPERTIES_loggers=audit +LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger +LOG4J2.PROPERTIES_logger.audit.name=OMAudit +LOG4J2.PROPERTIES_logger.audit.level=INFO +LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling +LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile +LOG4J2.PROPERTIES_rootLogger.level=INFO +LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout +LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT + diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh new file mode 100755 index 0000000000000..892a540b46741 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +start_docker_env + +execute_robot_test scm createmrenv.robot + +# reinitialize the directories to use +export OZONE_DIR=/opt/ozone +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" +execute_robot_test rm mapreduce.robot + +stop_docker_env + +generate_report diff --git a/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot b/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot new file mode 100644 index 0000000000000..49d06aa3c669c --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot @@ -0,0 +1,48 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Create directories required for MR test +Library OperatingSystem +Resource commonlib.robot +Test Timeout 2 minute + + +*** Variables *** +${volume} vol1 +${bucket} bucket1 + + +*** Keywords *** +Create volume + ${result} = Execute ozone sh volume create /${volume} --user hadoop --quota 100TB --root + Should not contain ${result} Failed + Should contain ${result} Creating Volume: ${volume} +Create bucket + Execute ozone sh bucket create /${volume}/${bucket} + +*** Test Cases *** +Create test volume, bucket and key + ${result} = Execute And Ignore Error ozone sh bucket info /${volume}/${bucket} + Run Keyword if "VOLUME_NOT_FOUND" in """${result}""" Create volume + Run Keyword if "VOLUME_NOT_FOUND" in """${result}""" Create bucket + Run Keyword if "BUCKET_NOT_FOUND" in """${result}""" Create bucket + ${result} = Execute ozone sh bucket info /${volume}/${bucket} + Should not contain ${result} NOT_FOUND + Execute ozone sh key put /vol1/bucket1/key1 LICENSE.txt + +Create user dir for hadoop + Execute ozone fs -mkdir /user + Execute ozone fs -mkdir /user/hadoop diff --git a/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot b/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot new file mode 100644 index 0000000000000..a608677489054 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Execute MR jobs +Library OperatingSystem +Resource commonlib.robot +Test Timeout 2 minute + + +*** Variables *** +${volume} vol1 +${bucket} bucket1 +${hadoop.version} 3.2.0 + + +*** Test cases *** +Execute PI calculation + ${output} = Execute yarn jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-${hadoop.version}.jar pi 3 3 + Should Contain ${output} completed successfully + +Execute WordCount + ${random} Generate Random String 2 [NUMBERS] + ${output} = Execute yarn jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-${hadoop.version}.jar wordcount o3fs://bucket1.vol1/key1 o3fs://bucket1.vol1/key1-${random}.count + Should Contain ${output} completed successfully From e140a450465c903217c73942f1d9200ea7f27570 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Tue, 4 Jun 2019 08:20:45 +0200 Subject: [PATCH 0102/1308] HDDS-1629. Tar file creation can be optional for non-dist builds. Contributed by Elek, Marton. (#887) --- hadoop-ozone/dist/pom.xml | 49 +++++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 046f89c346c06..855fab8eb2153 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -225,24 +225,6 @@ - - tar-ozone - package - - exec - - - ${shell-executable} - ${project.build.directory} - - - ${basedir}/dev-support/bin/dist-tar-stitching - - ${hdds.version} - ${project.build.directory} - - - + + hadoop-src + + tar.gz + + true + + + . + + LICENCE.txt + README.txt + NOTICE.txt + + + + . + true + + .git/** + **/.gitignore + **/.svn + **/*.iws + **/*.ipr + **/*.iml + **/.classpath + **/.project + **/.settings + **/target/** + + **/*.log + **/build/** + **/file:/** + **/SecurityAuth.audit* + + + + diff --git a/pom.xml b/pom.xml index c77ec4baeac39..165052e5903c4 100644 --- a/pom.xml +++ b/pom.xml @@ -641,6 +641,63 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x + + submarine-src + + false + + + + + org.apache.maven.plugins + maven-assembly-plugin + false + + + src-dist + package + + single + + + false + false + hadoop-submarine-${project.version}-src + hadoop-dist/target + + + + hadoop-assemblies/src/main/resources/assemblies/hadoop-src-submarine.xml + + + + + + + org.apache.maven.plugins + maven-antrun-plugin + false + + + src-dist-msg + package + + run + + + + + Hadoop source tar with Submarine source is available at: ${basedir}/hadoop-dist/target/hadoop-${project.version}-src-with-submarine.tar.gz + + + + + + + + + + sign From ef2da40a703b642b835e054326b989187a4dbfd3 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Wed, 5 Jun 2019 07:51:23 +0200 Subject: [PATCH 0116/1308] HDDS-1640. Reduce the size of recon jar file Closes #907 --- hadoop-ozone/pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index baefe34152345..2d80c3a284c79 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -342,6 +342,12 @@ org.apache.maven.plugins maven-jar-plugin + + + **/node_modules/* + **/ozone-recon-web/** + + From 433e97cd34f0569f8ae7d165a27449d0b7f8bac8 Mon Sep 17 00:00:00 2001 From: Weiwei Yang Date: Wed, 5 Jun 2019 13:55:30 +0800 Subject: [PATCH 0117/1308] YARN-9600. Support self-adaption width for columns of containers table on app attempt page. Contributed by Tao Yang. --- .../org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java index 2d53dc9557814..77ee9e9dde65b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java @@ -155,7 +155,8 @@ public Collection run() throws Exception { // Container Table TBODY> tbody = - html.table("#containers").thead().tr().th(".id", "Container ID") + html.table("#containers").$style("width:100%") + .thead().tr().th(".id", "Container ID") .th(".node", "Node").th(".exitstatus", "Container Exit Status") .th(".logs", "Logs").__().__().tbody(); From 42cd861be08767f2388d9efdc5047c4840312c2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Elek?= Date: Wed, 5 Jun 2019 14:04:17 +0200 Subject: [PATCH 0118/1308] HDDS-1628. Fix the execution and return code of smoketest executor shell script Closes #902 --- hadoop-ozone/dev-support/checks/acceptance.sh | 3 ++- hadoop-ozone/dist/src/main/compose/test-all.sh | 2 +- hadoop-ozone/dist/src/main/smoketest/test.sh | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh b/hadoop-ozone/dev-support/checks/acceptance.sh index 0a4c5d6f6fe00..8de920ff9e556 100755 --- a/hadoop-ozone/dev-support/checks/acceptance.sh +++ b/hadoop-ozone/dev-support/checks/acceptance.sh @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" export HADOOP_VERSION=3 -hadoop-ozone/dist/target/ozone-*-SNAPSHOT/smoketest/test.sh +"$DIR/../../../hadoop-ozone/dist/target/ozone-*-SNAPSHOT/compose/test-all.sh" exit $? diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh index 225acecd4705c..a17ef4d83e7c1 100755 --- a/hadoop-ozone/dist/src/main/compose/test-all.sh +++ b/hadoop-ozone/dist/src/main/compose/test-all.sh @@ -34,7 +34,7 @@ for test in $(find $SCRIPT_DIR -name test.sh); do #required to read the .env file from the right location cd "$(dirname "$test")" || continue - $test + ./test.sh ret=$? if [[ $ret -ne 0 ]]; then RESULT=-1 diff --git a/hadoop-ozone/dist/src/main/smoketest/test.sh b/hadoop-ozone/dist/src/main/smoketest/test.sh index b2cdfc31bc828..e0a26b0793002 100755 --- a/hadoop-ozone/dist/src/main/smoketest/test.sh +++ b/hadoop-ozone/dist/src/main/smoketest/test.sh @@ -23,5 +23,6 @@ REPLACEMENT="$DIR/../compose/test-all.sh" echo "THIS SCRIPT IS DEPRECATED. Please use $REPLACEMENT instead." ${REPLACEMENT} - +RESULT=$? cp -r "$DIR/../compose/result" "$DIR" +exit $RESULT From 7724d8031b3b8cf499c9777c837b5000db12ecee Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Wed, 5 Jun 2019 12:42:45 +0100 Subject: [PATCH 0119/1308] Revert "HADOOP-16321: ITestS3ASSL+TestOpenSSLSocketFactory failing with java.lang.UnsatisfiedLinkErrors" This reverts commit 5906268f0dd63a93eb591ddccf70d23b15e5c2ed. --- .../hadoop/security/ssl/TestOpenSSLSocketFactory.java | 8 ++------ .../test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java | 5 +---- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestOpenSSLSocketFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestOpenSSLSocketFactory.java index 41ec3e4516f0e..ea881e990b934 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestOpenSSLSocketFactory.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestOpenSSLSocketFactory.java @@ -35,10 +35,7 @@ public class TestOpenSSLSocketFactory { @Test public void testOpenSSL() throws IOException { - assumeTrue("Unable to load native libraries", - NativeCodeLoader.isNativeCodeLoaded()); - assumeTrue("Build was not compiled with support for OpenSSL", - NativeCodeLoader.buildSupportsOpenssl()); + assumeTrue(NativeCodeLoader.buildSupportsOpenssl()); OpenSSLSocketFactory.initializeDefaultFactory( OpenSSLSocketFactory.SSLChannelMode.OpenSSL); assertThat(OpenSSLSocketFactory.getDefaultFactory() @@ -47,8 +44,7 @@ public void testOpenSSL() throws IOException { @Test public void testJSEEJava8() throws IOException { - assumeTrue("Not running on Java 8", - System.getProperty("java.version").startsWith("1.8")); + assumeTrue(System.getProperty("java.version").startsWith("1.8")); OpenSSLSocketFactory.initializeDefaultFactory( OpenSSLSocketFactory.SSLChannelMode.Default_JSSE); assertThat(Arrays.stream(OpenSSLSocketFactory.getDefaultFactory() diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java index 4232b0f270eb3..794bf80826650 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java @@ -40,10 +40,7 @@ public class ITestS3ASSL extends AbstractS3ATestBase { @Test public void testOpenSSL() throws IOException { - assumeTrue("Unable to load native libraries", - NativeCodeLoader.isNativeCodeLoaded()); - assumeTrue("Build was not compiled with support for OpenSSL", - NativeCodeLoader.buildSupportsOpenssl()); + assumeTrue(NativeCodeLoader.buildSupportsOpenssl()); Configuration conf = new Configuration(getConfiguration()); conf.setEnum(Constants.SSL_CHANNEL_MODE, OpenSSLSocketFactory.SSLChannelMode.OpenSSL); From 309501c6fa1073f3cfd7e535a4207dbfb21165f9 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Wed, 5 Jun 2019 12:43:36 +0100 Subject: [PATCH 0120/1308] Revert "HADOOP-16050: s3a SSL connections should use OpenSSL" This reverts commit b067f8acaa79b1230336900a5c62ba465b2adb28. Change-Id: I584b050a56c0e6f70b11fa3f7db00d5ac46e7dd8 --- hadoop-common-project/hadoop-common/pom.xml | 10 --- .../ssl/TestOpenSSLSocketFactory.java | 53 -------------- hadoop-tools/hadoop-aws/pom.xml | 5 -- .../org/apache/hadoop/fs/s3a/Constants.java | 6 -- .../org/apache/hadoop/fs/s3a/S3AUtils.java | 38 ++-------- .../org/apache/hadoop/fs/s3a/ITestS3ASSL.java | 72 ------------------- hadoop-tools/hadoop-azure/pom.xml | 2 +- .../hadoop/fs/azurebfs/AbfsConfiguration.java | 4 +- .../constants/FileSystemConfigurations.java | 6 +- .../fs/azurebfs/services/AbfsClient.java | 8 +-- .../azurebfs/services/AbfsHttpOperation.java | 4 +- .../fs/azurebfs/utils/SSLSocketFactoryEx.java | 62 ++++++++-------- ...TestAbfsConfigurationFieldsValidation.java | 16 ++--- .../fs/azurebfs/services/TestAbfsClient.java | 6 +- 14 files changed, 57 insertions(+), 235 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestOpenSSLSocketFactory.java delete mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java rename hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/OpenSSLSocketFactory.java => hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/SSLSocketFactoryEx.java (82%) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 6d15958e353a7..64e4d044191cf 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -343,16 +343,6 @@ dnsjava compile - - org.wildfly.openssl - wildfly-openssl - provided - - - org.assertj - assertj-core - test - diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestOpenSSLSocketFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestOpenSSLSocketFactory.java deleted file mode 100644 index ea881e990b934..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestOpenSSLSocketFactory.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.security.ssl; - -import java.io.IOException; -import java.util.Arrays; - -import org.junit.Test; - -import org.apache.hadoop.util.NativeCodeLoader; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.Assume.assumeTrue; - -/** - * Tests for {@link OpenSSLSocketFactory}. - */ -public class TestOpenSSLSocketFactory { - - @Test - public void testOpenSSL() throws IOException { - assumeTrue(NativeCodeLoader.buildSupportsOpenssl()); - OpenSSLSocketFactory.initializeDefaultFactory( - OpenSSLSocketFactory.SSLChannelMode.OpenSSL); - assertThat(OpenSSLSocketFactory.getDefaultFactory() - .getProviderName()).contains("openssl"); - } - - @Test - public void testJSEEJava8() throws IOException { - assumeTrue(System.getProperty("java.version").startsWith("1.8")); - OpenSSLSocketFactory.initializeDefaultFactory( - OpenSSLSocketFactory.SSLChannelMode.Default_JSSE); - assertThat(Arrays.stream(OpenSSLSocketFactory.getDefaultFactory() - .getSupportedCipherSuites())).noneMatch("GCM"::contains); - } -} diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index 880ae832e5b8d..9dc0acc2d2d96 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -417,11 +417,6 @@ aws-java-sdk-bundle compile - - org.wildfly.openssl - wildfly-openssl - runtime - junit junit diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index 7a687943cfb7b..18ed7b44027dc 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -20,7 +20,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.security.ssl.OpenSSLSocketFactory; import java.util.concurrent.TimeUnit; @@ -148,11 +147,6 @@ private Constants() { "fs.s3a.connection.ssl.enabled"; public static final boolean DEFAULT_SECURE_CONNECTIONS = true; - // use OpenSSL or JSEE for secure connections - public static final String SSL_CHANNEL_MODE = "fs.s3a.ssl.channel.mode"; - public static final OpenSSLSocketFactory.SSLChannelMode - DEFAULT_SSL_CHANNEL_MODE = OpenSSLSocketFactory.SSLChannelMode.Default; - //use a custom endpoint? public static final String ENDPOINT = "fs.s3a.endpoint"; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java index 4d9fc3292f0b2..fe4fd0ffd6a70 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java @@ -34,7 +34,6 @@ import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.MultiObjectDeleteException; import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.amazonaws.thirdparty.apache.http.conn.ssl.SSLConnectionSocketFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -52,7 +51,6 @@ import org.apache.hadoop.fs.s3native.S3xLoginHelper; import org.apache.hadoop.net.ConnectTimeoutException; import org.apache.hadoop.security.ProviderUtils; -import org.apache.hadoop.security.ssl.OpenSSLSocketFactory; import org.apache.hadoop.util.VersionInfo; import com.google.common.collect.Lists; @@ -60,7 +58,6 @@ import org.slf4j.LoggerFactory; import javax.annotation.Nullable; -import javax.net.ssl.HostnameVerifier; import java.io.Closeable; import java.io.EOFException; import java.io.FileNotFoundException; @@ -1254,15 +1251,14 @@ public static ClientConfiguration createAwsConf(Configuration conf, * * @param conf Hadoop configuration * @param awsConf AWS SDK configuration - * - * @throws IOException if there was an error initializing the protocol - * settings */ public static void initConnectionSettings(Configuration conf, - ClientConfiguration awsConf) throws IOException { + ClientConfiguration awsConf) { awsConf.setMaxConnections(intOption(conf, MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS, 1)); - initProtocolSettings(conf, awsConf); + boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS, + DEFAULT_SECURE_CONNECTIONS); + awsConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP); awsConf.setMaxErrorRetry(intOption(conf, MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES, 0)); awsConf.setConnectionTimeout(intOption(conf, ESTABLISH_TIMEOUT, @@ -1281,32 +1277,6 @@ public static void initConnectionSettings(Configuration conf, } } - /** - * Initializes the connection protocol settings when connecting to S3 (e.g. - * either HTTP or HTTPS). If secure connections are enabled, this method - * will load the configured SSL providers. - * - * @param conf Hadoop configuration - * @param awsConf AWS SDK configuration - * - * @throws IOException if there is an error initializing the configured - * {@link javax.net.ssl.SSLSocketFactory} - */ - private static void initProtocolSettings(Configuration conf, - ClientConfiguration awsConf) throws IOException { - boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS, - DEFAULT_SECURE_CONNECTIONS); - awsConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP); - if (secureConnections) { - OpenSSLSocketFactory.initializeDefaultFactory( - conf.getEnum(SSL_CHANNEL_MODE, DEFAULT_SSL_CHANNEL_MODE)); - awsConf.getApacheHttpClientConfig().setSslSocketFactory( - new SSLConnectionSocketFactory( - OpenSSLSocketFactory.getDefaultFactory(), - (HostnameVerifier) null)); - } - } - /** * Initializes AWS SDK proxy support in the AWS client configuration * if the S3A settings enable it. diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java deleted file mode 100644 index 794bf80826650..0000000000000 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.s3a; - -import java.io.IOException; - -import org.junit.Test; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.apache.hadoop.security.ssl.OpenSSLSocketFactory; -import org.apache.hadoop.util.NativeCodeLoader; - -import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; -import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset; -import static org.junit.Assume.assumeTrue; - -/** - * Tests non-default values for {@link Constants#SSL_CHANNEL_MODE}. - */ -public class ITestS3ASSL extends AbstractS3ATestBase { - - @Test - public void testOpenSSL() throws IOException { - assumeTrue(NativeCodeLoader.buildSupportsOpenssl()); - Configuration conf = new Configuration(getConfiguration()); - conf.setEnum(Constants.SSL_CHANNEL_MODE, - OpenSSLSocketFactory.SSLChannelMode.OpenSSL); - try (S3AFileSystem fs = S3ATestUtils.createTestFileSystem(conf)) { - writeThenReadFile(fs, path("ITestS3ASSL/testOpenSSL")); - } - } - - @Test - public void testJSEE() throws IOException { - Configuration conf = new Configuration(getConfiguration()); - conf.setEnum(Constants.SSL_CHANNEL_MODE, - OpenSSLSocketFactory.SSLChannelMode.Default_JSSE); - try (S3AFileSystem fs = S3ATestUtils.createTestFileSystem(conf)) { - writeThenReadFile(fs, path("ITestS3ASSL/testJSEE")); - } - } - - /** - * Helper function that writes and then reads a file. Unlike - * {@link #writeThenReadFile(Path, int)} it takes a {@link FileSystem} as a - * parameter. - */ - private void writeThenReadFile(FileSystem fs, Path path) throws IOException { - byte[] data = dataset(1024, 'a', 'z'); - writeDataset(fs, path, data, data.length, 1024, true); - ContractTestUtils.verifyFileContents(fs, path, data); - } -} diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index 26d37b852ee38..1a4250f6667ba 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -194,7 +194,7 @@ org.wildfly.openssl wildfly-openssl - runtime + compile diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java index 36e6237cd45be..5c348b839a2d1 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java @@ -56,7 +56,7 @@ import org.apache.hadoop.fs.azurebfs.services.AuthType; import org.apache.hadoop.fs.azurebfs.services.KeyProvider; import org.apache.hadoop.fs.azurebfs.services.SimpleKeyProvider; -import org.apache.hadoop.security.ssl.OpenSSLSocketFactory; +import org.apache.hadoop.fs.azurebfs.utils.SSLSocketFactoryEx; import org.apache.hadoop.security.ProviderUtils; import org.apache.hadoop.util.ReflectionUtils; @@ -435,7 +435,7 @@ public String getCustomUserAgentPrefix() { return this.userAgentId; } - public OpenSSLSocketFactory.SSLChannelMode getPreferredSSLFactoryOption() { + public SSLSocketFactoryEx.SSLChannelMode getPreferredSSLFactoryOption() { return getEnum(FS_AZURE_SSL_CHANNEL_MODE_KEY, DEFAULT_FS_AZURE_SSL_CHANNEL_MODE); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java index 5964f90bd13a7..97443079b1b48 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java @@ -20,7 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.security.ssl.OpenSSLSocketFactory; +import org.apache.hadoop.fs.azurebfs.utils.SSLSocketFactoryEx; /** * Responsible to keep all the Azure Blob File System related configurations. @@ -59,8 +59,8 @@ public final class FileSystemConfigurations { public static final boolean DEFAULT_ENABLE_FLUSH = true; public static final boolean DEFAULT_ENABLE_AUTOTHROTTLING = true; - public static final OpenSSLSocketFactory.SSLChannelMode DEFAULT_FS_AZURE_SSL_CHANNEL_MODE - = OpenSSLSocketFactory.SSLChannelMode.Default; + public static final SSLSocketFactoryEx.SSLChannelMode DEFAULT_FS_AZURE_SSL_CHANNEL_MODE + = SSLSocketFactoryEx.SSLChannelMode.Default; public static final boolean DEFAULT_ENABLE_DELEGATION_TOKEN = false; public static final boolean DEFAULT_ENABLE_HTTPS = true; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java index 7f71ae3d1534b..c29543fb799a6 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java @@ -29,7 +29,7 @@ import java.util.Locale; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.security.ssl.OpenSSLSocketFactory; +import org.apache.hadoop.fs.azurebfs.utils.SSLSocketFactoryEx; import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; import org.apache.hadoop.fs.azurebfs.constants.HttpQueryParams; @@ -79,10 +79,10 @@ public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredent if (this.baseUrl.toString().startsWith(HTTPS_SCHEME)) { try { - OpenSSLSocketFactory.initializeDefaultFactory(this.abfsConfiguration.getPreferredSSLFactoryOption()); - sslProviderName = OpenSSLSocketFactory.getDefaultFactory().getProviderName(); + SSLSocketFactoryEx.initializeDefaultFactory(this.abfsConfiguration.getPreferredSSLFactoryOption()); + sslProviderName = SSLSocketFactoryEx.getDefaultFactory().getProviderName(); } catch (IOException e) { - // Suppress exception. Failure to init OpenSSLSocketFactory would have only performance impact. + // Suppress exception. Failure to init SSLSocketFactoryEx would have only performance impact. } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java index bbdd9533fa681..78e1afd6b739b 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java @@ -29,7 +29,7 @@ import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLSocketFactory; -import org.apache.hadoop.security.ssl.OpenSSLSocketFactory; +import org.apache.hadoop.fs.azurebfs.utils.SSLSocketFactoryEx; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.JsonParser; import org.codehaus.jackson.JsonToken; @@ -180,7 +180,7 @@ public AbfsHttpOperation(final URL url, final String method, final List Date: Wed, 5 Jun 2019 21:33:00 +0800 Subject: [PATCH 0121/1308] HDFS-14356. Implement HDFS cache on SCM with native PMDK libs. Contributed by Feilong He. --- BUILDING.txt | 28 ++ dev-support/bin/dist-copynativelibs | 8 + hadoop-common-project/hadoop-common/pom.xml | 2 + .../hadoop-common/src/CMakeLists.txt | 21 ++ .../hadoop-common/src/config.h.cmake | 1 + .../apache/hadoop/io/nativeio/NativeIO.java | 135 +++++++++- .../org/apache/hadoop/io/nativeio/NativeIO.c | 252 ++++++++++++++++++ .../org/apache/hadoop/io/nativeio/pmdk_load.c | 106 ++++++++ .../org/apache/hadoop/io/nativeio/pmdk_load.h | 95 +++++++ .../hadoop/io/nativeio/TestNativeIO.java | 153 +++++++++++ .../fsdataset/impl/FsDatasetCache.java | 22 ++ .../fsdataset/impl/FsDatasetImpl.java | 8 + .../fsdataset/impl/FsDatasetUtil.java | 22 ++ .../fsdataset/impl/MappableBlock.java | 6 + .../fsdataset/impl/MappableBlockLoader.java | 11 +- .../impl/MappableBlockLoaderFactory.java | 4 + .../impl/MemoryMappableBlockLoader.java | 8 +- .../fsdataset/impl/MemoryMappedBlock.java | 5 + .../impl/NativePmemMappableBlockLoader.java | 191 +++++++++++++ .../fsdataset/impl/NativePmemMappedBlock.java | 85 ++++++ .../impl/PmemMappableBlockLoader.java | 10 +- .../fsdataset/impl/PmemMappedBlock.java | 5 + 22 files changed, 1166 insertions(+), 12 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.c create mode 100644 hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.h create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappedBlock.java diff --git a/BUILDING.txt b/BUILDING.txt index cc9ac177ca298..8c57a1d8e2d6e 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -78,6 +78,8 @@ Optional packages: $ sudo apt-get install fuse libfuse-dev * ZStandard compression $ sudo apt-get install zstd +* PMDK library for storage class memory(SCM) as HDFS cache backend + Please refer to http://pmem.io/ and https://github.com/pmem/pmdk ---------------------------------------------------------------------------------- Maven main modules: @@ -262,6 +264,32 @@ Maven build goals: invoke, run 'mvn dependency-check:aggregate'. Note that this plugin requires maven 3.1.1 or greater. + PMDK library build options: + + The Persistent Memory Development Kit (PMDK), formerly known as NVML, is a growing + collection of libraries which have been developed for various use cases, tuned, + validated to production quality, and thoroughly documented. These libraries are built + on the Direct Access (DAX) feature available in both Linux and Windows, which allows + applications directly load/store access to persistent memory by memory-mapping files + on a persistent memory aware file system. + + It is currently an optional component, meaning that Hadoop can be built without + this dependency. Please Note the library is used via dynamic module. For getting + more details please refer to the official sites: + http://pmem.io/ and https://github.com/pmem/pmdk. + + * -Drequire.pmdk is used to build the project with PMDK libraries forcibly. With this + option provided, the build will fail if libpmem library is not found. If this option + is not given, the build will generate a version of Hadoop with libhadoop.so. + And storage class memory(SCM) backed HDFS cache is still supported without PMDK involved. + Because PMDK can bring better caching write/read performance, it is recommended to build + the project with this option if user plans to use SCM backed HDFS cache. + * -Dpmdk.lib is used to specify a nonstandard location for PMDK libraries if they are not + under /usr/lib or /usr/lib64. + * -Dbundle.pmdk is used to copy the specified libpmem libraries into the distribution tar + package. This option requires that -Dpmdk.lib is specified. With -Dbundle.pmdk provided, + the build will fail if -Dpmdk.lib is not specified. + ---------------------------------------------------------------------------------- Building components separately diff --git a/dev-support/bin/dist-copynativelibs b/dev-support/bin/dist-copynativelibs index 67d2edf22d304..4a783f086a4dc 100755 --- a/dev-support/bin/dist-copynativelibs +++ b/dev-support/bin/dist-copynativelibs @@ -96,6 +96,12 @@ for i in "$@"; do --isalbundle=*) ISALBUNDLE=${i#*=} ;; + --pmdklib=*) + PMDKLIB=${i#*=} + ;; + --pmdkbundle=*) + PMDKBUNDLE=${i#*=} + ;; --opensslbinbundle=*) OPENSSLBINBUNDLE=${i#*=} ;; @@ -153,6 +159,8 @@ if [[ -d "${LIB_DIR}" ]]; then bundle_native_lib "${OPENSSLLIBBUNDLE}" "openssl.lib" "crypto" "${OPENSSLLIB}" bundle_native_lib "${ISALBUNDLE}" "isal.lib" "isa" "${ISALLIB}" + + bundle_native_lib "${PMDKBUNDLE}" "pmdk.lib" "pmdk" "${PMDKLIB}" fi # Windows diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 64e4d044191cf..5b600538d6ce0 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -682,6 +682,8 @@ ${require.isal} ${isal.prefix} ${isal.lib} + ${require.pmdk} + ${pmdk.lib} ${require.openssl} ${openssl.prefix} ${openssl.lib} diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt index b9287c0f4b5ad..771c685c7026b 100644 --- a/hadoop-common-project/hadoop-common/src/CMakeLists.txt +++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt @@ -121,6 +121,7 @@ else () ENDIF(REQUIRE_ZSTD) endif () +#Require ISA-L set(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) hadoop_set_find_shared_library_version("2") find_library(ISAL_LIBRARY @@ -159,6 +160,25 @@ else (ISAL_LIBRARY) ENDIF(REQUIRE_ISAL) endif (ISAL_LIBRARY) +# Build with PMDK library if -Drequire.pmdk option is specified. +if(REQUIRE_PMDK) + set(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) + hadoop_set_find_shared_library_version("1") + find_library(PMDK_LIBRARY + NAMES pmem + PATHS ${CUSTOM_PMDK_LIB} /usr/lib /usr/lib64) + set(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES}) + + if(PMDK_LIBRARY) + GET_FILENAME_COMPONENT(HADOOP_PMDK_LIBRARY ${PMDK_LIBRARY} NAME) + set(PMDK_SOURCE_FILES ${SRC}/io/nativeio/pmdk_load.c) + else(PMDK_LIBRARY) + MESSAGE(FATAL_ERROR "The required PMDK library is NOT found. PMDK_LIBRARY=${PMDK_LIBRARY}") + endif(PMDK_LIBRARY) +else(REQUIRE_PMDK) + MESSAGE(STATUS "Build without PMDK support.") +endif(REQUIRE_PMDK) + # Build hardware CRC32 acceleration, if supported on the platform. if(CMAKE_SYSTEM_PROCESSOR MATCHES "^i.86$" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64") set(BULK_CRC_ARCH_SOURCE_FIlE "${SRC}/util/bulk_crc32_x86.c") @@ -256,6 +276,7 @@ hadoop_add_dual_library(hadoop ${SRC}/io/compress/zlib/ZlibDecompressor.c ${BZIP2_SOURCE_FILES} ${SRC}/io/nativeio/NativeIO.c + ${PMDK_SOURCE_FILES} ${SRC}/io/nativeio/errno_enum.c ${SRC}/io/nativeio/file_descriptor.c ${SRC}/io/nativeio/SharedFileDescriptorFactory.c diff --git a/hadoop-common-project/hadoop-common/src/config.h.cmake b/hadoop-common-project/hadoop-common/src/config.h.cmake index 40aa467373c8d..7e23a5df3281c 100644 --- a/hadoop-common-project/hadoop-common/src/config.h.cmake +++ b/hadoop-common-project/hadoop-common/src/config.h.cmake @@ -24,6 +24,7 @@ #cmakedefine HADOOP_ZSTD_LIBRARY "@HADOOP_ZSTD_LIBRARY@" #cmakedefine HADOOP_OPENSSL_LIBRARY "@HADOOP_OPENSSL_LIBRARY@" #cmakedefine HADOOP_ISAL_LIBRARY "@HADOOP_ISAL_LIBRARY@" +#cmakedefine HADOOP_PMDK_LIBRARY "@HADOOP_PMDK_LIBRARY@" #cmakedefine HAVE_SYNC_FILE_RANGE #cmakedefine HAVE_POSIX_FADVISE diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java index 4e0cd8fdd865c..1d0eab7f5c1b6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java @@ -100,6 +100,48 @@ public static class POSIX { write. */ public static int SYNC_FILE_RANGE_WAIT_AFTER = 4; + /** + * Keeps the support state of PMDK. + */ + public enum SupportState { + UNSUPPORTED(-1), + PMDK_LIB_NOT_FOUND(1), + SUPPORTED(0); + + private byte stateCode; + SupportState(int stateCode) { + this.stateCode = (byte) stateCode; + } + + public int getStateCode() { + return stateCode; + } + + public String getMessage() { + String msg; + switch (stateCode) { + case -1: + msg = "The native code is built without PMDK support."; + break; + case 1: + msg = "The native code is built with PMDK support, but PMDK libs " + + "are NOT found in execution environment or failed to be loaded."; + break; + case 0: + msg = "The native code is built with PMDK support, and PMDK libs " + + "are loaded successfully."; + break; + default: + msg = "The state code: " + stateCode + " is unrecognized!"; + } + return msg; + } + } + + // Denotes the state of supporting PMDK. The value is set by JNI. + private static SupportState pmdkSupportState = + SupportState.PMDK_LIB_NOT_FOUND; + private static final Logger LOG = LoggerFactory.getLogger(NativeIO.class); // Set to true via JNI if possible @@ -124,6 +166,93 @@ public static void setCacheManipulator(CacheManipulator cacheManipulator) { POSIX.cacheManipulator = cacheManipulator; } + // This method is invoked by JNI. + public static void setPmdkSupportState(int stateCode) { + for (SupportState state : SupportState.values()) { + if (state.getStateCode() == stateCode) { + pmdkSupportState = state; + return; + } + } + LOG.error("The state code: " + stateCode + " is unrecognized!"); + } + + public static boolean isPmdkAvailable() { + LOG.info(pmdkSupportState.getMessage()); + return pmdkSupportState == SupportState.SUPPORTED; + } + + /** + * Denote memory region for a file mapped. + */ + public static class PmemMappedRegion { + private long address; + private long length; + private boolean isPmem; + + public PmemMappedRegion(long address, long length, boolean isPmem) { + this.address = address; + this.length = length; + this.isPmem = isPmem; + } + + public boolean isPmem() { + return this.isPmem; + } + + public long getAddress() { + return this.address; + } + + public long getLength() { + return this.length; + } + } + + /** + * JNI wrapper of persist memory operations. + */ + public static class Pmem { + // check whether the address is a Pmem address or DIMM address + public static boolean isPmem(long address, long length) { + return NativeIO.POSIX.isPmemCheck(address, length); + } + + // create a pmem file and memory map it + public static PmemMappedRegion mapBlock(String path, long length) { + return NativeIO.POSIX.pmemCreateMapFile(path, length); + } + + // unmap a pmem file + public static boolean unmapBlock(long address, long length) { + return NativeIO.POSIX.pmemUnMap(address, length); + } + + // copy data from disk file(src) to pmem file(dest), without flush + public static void memCopy(byte[] src, long dest, boolean isPmem, + long length) { + NativeIO.POSIX.pmemCopy(src, dest, isPmem, length); + } + + // flush the memory content to persistent storage + public static void memSync(PmemMappedRegion region) { + if (region.isPmem()) { + NativeIO.POSIX.pmemDrain(); + } else { + NativeIO.POSIX.pmemSync(region.getAddress(), region.getLength()); + } + } + } + + private static native boolean isPmemCheck(long address, long length); + private static native PmemMappedRegion pmemCreateMapFile(String path, + long length); + private static native boolean pmemUnMap(long address, long length); + private static native void pmemCopy(byte[] src, long dest, boolean isPmem, + long length); + private static native void pmemDrain(); + private static native void pmemSync(long address, long length); + /** * Used to manipulate the operating system cache. */ @@ -143,8 +272,8 @@ public long getOperatingSystemPageSize() { } public void posixFadviseIfPossible(String identifier, - FileDescriptor fd, long offset, long len, int flags) - throws NativeIOException { + FileDescriptor fd, long offset, long len, int flags) + throws NativeIOException { NativeIO.POSIX.posixFadviseIfPossible(identifier, fd, offset, len, flags); } @@ -748,7 +877,7 @@ public CachedUid(String username, long timestamp) { * user account name, of the format DOMAIN\UserName. This method * will remove the domain part of the full logon name. * - * @param Fthe full principal name containing the domain + * @param name the full principal name containing the domain * @return name with domain removed */ private static String stripDomain(String name) { diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c index 2274d57ca9dd7..3a0641ba26c1f 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c @@ -36,6 +36,10 @@ #include #include #include +#ifdef HADOOP_PMDK_LIBRARY +#include +#include "pmdk_load.h" +#endif #if !(defined(__FreeBSD__) || defined(__MACH__)) #include #endif @@ -60,6 +64,7 @@ #define NATIVE_IO_POSIX_CLASS "org/apache/hadoop/io/nativeio/NativeIO$POSIX" #define NATIVE_IO_STAT_CLASS "org/apache/hadoop/io/nativeio/NativeIO$POSIX$Stat" +#define NATIVE_IO_POSIX_PMEMREGION_CLASS "org/apache/hadoop/io/nativeio/NativeIO$POSIX$PmemMappedRegion" #define SET_INT_OR_RETURN(E, C, F) \ { \ @@ -81,6 +86,12 @@ static jmethodID nioe_ctor; // Please see HADOOP-7156 for details. jobject pw_lock_object; +#ifdef HADOOP_PMDK_LIBRARY +// the NativeIO$POSIX$PmemMappedRegion inner class and its constructor +static jclass pmem_region_clazz = NULL; +static jmethodID pmem_region_ctor = NULL; +#endif + /* * Throw a java.IO.IOException, generating the message from errno. * NB. this is also used form windows_secure_container_executor.c @@ -269,6 +280,63 @@ static void nioe_deinit(JNIEnv *env) { nioe_ctor = NULL; } +#ifdef HADOOP_PMDK_LIBRARY +static int loadPmdkLib(JNIEnv *env) { + char errMsg[1024]; + jclass clazz = (*env)->FindClass(env, NATIVE_IO_POSIX_CLASS); + if (clazz == NULL) { + return 0; // exception has been raised + } + load_pmdk_lib(errMsg, sizeof(errMsg)); + jmethodID mid = (*env)->GetStaticMethodID(env, clazz, "setPmdkSupportState", "(I)V"); + if (mid == 0) { + return 0; + } + if (strlen(errMsg) > 0) { + (*env)->CallStaticVoidMethod(env, clazz, mid, 1); + return 0; + } + (*env)->CallStaticVoidMethod(env, clazz, mid, 0); + return 1; +} + +static void pmem_region_init(JNIEnv *env, jclass nativeio_class) { + + jclass clazz = NULL; + // Init Stat + clazz = (*env)->FindClass(env, NATIVE_IO_POSIX_PMEMREGION_CLASS); + if (!clazz) { + THROW(env, "java/io/IOException", "Failed to get PmemMappedRegion class"); + return; // exception has been raised + } + + // Init PmemMappedRegion class + pmem_region_clazz = (*env)->NewGlobalRef(env, clazz); + if (!pmem_region_clazz) { + THROW(env, "java/io/IOException", "Failed to new global reference of PmemMappedRegion class"); + return; // exception has been raised + } + + pmem_region_ctor = (*env)->GetMethodID(env, pmem_region_clazz, "", "(JJZ)V"); + if (!pmem_region_ctor) { + THROW(env, "java/io/IOException", "Failed to get PmemMappedRegion constructor"); + return; // exception has been raised + } +} + +static void pmem_region_deinit(JNIEnv *env) { + if (pmem_region_ctor != NULL) { + (*env)->DeleteGlobalRef(env, pmem_region_ctor); + pmem_region_ctor = NULL; + } + + if (pmem_region_clazz != NULL) { + (*env)->DeleteGlobalRef(env, pmem_region_clazz); + pmem_region_clazz = NULL; + } + } +#endif + /* * private static native void initNative(); * @@ -292,6 +360,11 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_initNative( #ifdef UNIX errno_enum_init(env); PASS_EXCEPTIONS_GOTO(env, error); +#ifdef HADOOP_PMDK_LIBRARY + if (loadPmdkLib(env)) { + pmem_region_init(env, clazz); + } +#endif #endif return; error: @@ -299,6 +372,9 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_initNative( // class wasn't initted yet #ifdef UNIX stat_deinit(env); +#ifdef HADOOP_PMDK_LIBRARY + pmem_region_deinit(env); +#endif #endif nioe_deinit(env); fd_deinit(env); @@ -1383,3 +1459,179 @@ JNIEnv *env, jclass clazz, jstring jsrc, jstring jdst) /** * vim: sw=2: ts=2: et: */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Class: org_apache_hadoop_io_nativeio_NativeIO_POSIX + * Method: isPmemCheck + * Signature: (JJ)Z + */ +JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_isPmemCheck( +JNIEnv *env, jclass thisClass, jlong address, jlong length) { + #if (defined UNIX) && (defined HADOOP_PMDK_LIBRARY) + jint is_pmem = pmdkLoader->pmem_is_pmem(address, length); + return (is_pmem) ? JNI_TRUE : JNI_FALSE; + #else + THROW(env, "java/lang/UnsupportedOperationException", + "The function isPmemCheck is not supported."); + return JNI_FALSE; + #endif + } + +/* + * Class: org_apache_hadoop_io_nativeio_NativeIO_POSIX + * Method: pmemCreateMapFile + * Signature: (Ljava/lang/String;J)Lorg/apache/hadoop/io/nativeio/NativeIO/POSIX/PmemMappedRegion; + */ +JNIEXPORT jobject JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_pmemCreateMapFile( +JNIEnv *env, jclass thisClass, jstring filePath, jlong fileLength) { + #if (defined UNIX) && (defined HADOOP_PMDK_LIBRARY) + /* create a pmem file and memory map it */ + const char * path = NULL; + void * pmemaddr = NULL; + size_t mapped_len = 0; + int is_pmem = 1; + char msg[1000]; + + path = (*env)->GetStringUTFChars(env, filePath, NULL); + if (!path) { + THROW(env, "java/lang/IllegalArgumentException", "File Path cannot be null"); + return NULL; + } + + if (fileLength <= 0) { + (*env)->ReleaseStringUTFChars(env, filePath, path); + THROW(env, "java/lang/IllegalArgumentException", "File length should be positive"); + return NULL; + } + + pmemaddr = pmdkLoader->pmem_map_file(path, fileLength, PMEM_FILE_CREATE|PMEM_FILE_EXCL, + 0666, &mapped_len, &is_pmem); + + if (!pmemaddr) { + snprintf(msg, sizeof(msg), "Failed to create pmem file. file: %s, length: %x, error msg: %s", path, fileLength, pmem_errormsg()); + THROW(env, "java/io/IOException", msg); + (*env)->ReleaseStringUTFChars(env, filePath, path); + return NULL; + } + + if (fileLength != mapped_len) { + snprintf(msg, sizeof(msg), "Mapped length doesn't match the request length. file :%s, request length:%x, returned length:%x, error msg:%s", path, fileLength, mapped_len, pmem_errormsg()); + THROW(env, "java/io/IOException", msg); + (*env)->ReleaseStringUTFChars(env, filePath, path); + return NULL; + } + + (*env)->ReleaseStringUTFChars(env, filePath, path); + + if ((!pmem_region_clazz) || (!pmem_region_ctor)) { + THROW(env, "java/io/IOException", "PmemMappedRegion class or constructor is NULL"); + return NULL; + } + + jobject ret = (*env)->NewObject(env, pmem_region_clazz, pmem_region_ctor, pmemaddr, mapped_len, (jboolean)is_pmem); + return ret; + + #else + THROW(env, "java/lang/UnsupportedOperationException", + "The function pmemCreateMapFile is not supported."); + return NULL; + #endif + } + +/* + * Class: org_apache_hadoop_io_nativeio_NativeIO_POSIX + * Method: pmemUnMap + * Signature: (JJ)V + */ +JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_pmemUnMap( +JNIEnv *env, jclass thisClass, jlong address, jlong length) { + #if (defined UNIX) && (defined HADOOP_PMDK_LIBRARY) + int succeed = 0; + char msg[1000]; + succeed = pmdkLoader->pmem_unmap(address, length); + // succeed = -1 failure; succeed = 0 success + if (succeed != 0) { + snprintf(msg, sizeof(msg), "Failed to unmap region. address: %x, length: %x, error msg: %s", address, length, pmem_errormsg()); + THROW(env, "java/io/IOException", msg); + return JNI_FALSE; + } else { + return JNI_TRUE; + } + #else + THROW(env, "java/lang/UnsupportedOperationException", + "The function pmemUnMap is not supported."); + return JNI_FALSE; + #endif + } + +/* + * Class: org_apache_hadoop_io_nativeio_NativeIO_POSIX + * Method: pmemCopy + * Signature: ([BJJ)V + */ +JNIEXPORT void JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_pmemCopy( +JNIEnv *env, jclass thisClass, jbyteArray buf, jlong address, jboolean is_pmem, jlong length) { + #if (defined UNIX) && (defined HADOOP_PMDK_LIBRARY) + char msg[1000]; + jbyte* srcBuf = (*env)->GetByteArrayElements(env, buf, 0); + snprintf(msg, sizeof(msg), "Pmem copy content. dest: %x, length: %x, src: %x ", address, length, srcBuf); + if (is_pmem) { + pmdkLoader->pmem_memcpy_nodrain(address, srcBuf, length); + } else { + memcpy(address, srcBuf, length); + } + (*env)->ReleaseByteArrayElements(env, buf, srcBuf, 0); + return; + #else + THROW(env, "java/lang/UnsupportedOperationException", + "The function pmemCopy is not supported."); + #endif + } + +/* + * Class: org_apache_hadoop_io_nativeio_NativeIO + * Method: pmemDrain + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_pmemDrain( +JNIEnv *env, jclass thisClass) { + #if (defined UNIX) && (defined HADOOP_PMDK_LIBRARY) + pmdkLoader->pmem_drain(); + #else + THROW(env, "java/lang/UnsupportedOperationException", + "The function pmemDrain is not supported."); + #endif + } + + /* + * Class: org_apache_hadoop_io_nativeio_NativeIO_POSIX + * Method: pmemSync + * Signature: (JJ)V + */ +JNIEXPORT void JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_pmemSync + (JNIEnv * env, jclass thisClass, jlong address, jlong length) { + + #if (defined UNIX) && (defined HADOOP_PMDK_LIBRARY) + int succeed = 0; + char msg[1000]; + succeed = pmdkLoader->pmem_msync(address, length); + // succeed = -1 failure + if (succeed = -1) { + snprintf(msg, sizeof(msg), "Failed to msync region. address: %x, length: %x, error msg: %s", address, length, pmem_errormsg()); + THROW(env, "java/io/IOException", msg); + return; + } + #else + THROW(env, "java/lang/UnsupportedOperationException", + "The function pmemSync is not supported."); + #endif + } + + +#ifdef __cplusplus +} +#endif diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.c new file mode 100644 index 0000000000000..f7d6cfba27075 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.c @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "org_apache_hadoop.h" +#include "pmdk_load.h" +#include "org_apache_hadoop_io_nativeio_NativeIO.h" +#include "org_apache_hadoop_io_nativeio_NativeIO_POSIX.h" + +#ifdef UNIX +#include +#include +#include +#include + +#include "config.h" +#endif + +PmdkLibLoader * pmdkLoader; + +/** + * pmdk_load.c + * Utility of loading the libpmem library and the required functions. + * Building of this codes won't rely on any libpmem source codes, but running + * into this will rely on successfully loading of the dynamic library. + * + */ + +static const char* load_functions() { +#ifdef UNIX + PMDK_LOAD_DYNAMIC_SYMBOL((pmdkLoader->pmem_map_file), "pmem_map_file"); + PMDK_LOAD_DYNAMIC_SYMBOL((pmdkLoader->pmem_unmap), "pmem_unmap"); + PMDK_LOAD_DYNAMIC_SYMBOL((pmdkLoader->pmem_is_pmem), "pmem_is_pmem"); + PMDK_LOAD_DYNAMIC_SYMBOL((pmdkLoader->pmem_drain), "pmem_drain"); + PMDK_LOAD_DYNAMIC_SYMBOL((pmdkLoader->pmem_memcpy_nodrain), "pmem_memcpy_nodrain"); + PMDK_LOAD_DYNAMIC_SYMBOL((pmdkLoader->pmem_msync), "pmem_msync"); +#endif + return NULL; +} + +void load_pmdk_lib(char* err, size_t err_len) { + const char* errMsg; + const char* library = NULL; +#ifdef UNIX + Dl_info dl_info; +#else + LPTSTR filename = NULL; +#endif + + err[0] = '\0'; + + if (pmdkLoader != NULL) { + return; + } + pmdkLoader = calloc(1, sizeof(PmdkLibLoader)); + + // Load PMDK library + #ifdef UNIX + pmdkLoader->libec = dlopen(HADOOP_PMDK_LIBRARY, RTLD_LAZY | RTLD_GLOBAL); + if (pmdkLoader->libec == NULL) { + snprintf(err, err_len, "Failed to load %s (%s)", + HADOOP_PMDK_LIBRARY, dlerror()); + return; + } + // Clear any existing error + dlerror(); + #endif + errMsg = load_functions(pmdkLoader->libec); + if (errMsg != NULL) { + snprintf(err, err_len, "Loading functions from PMDK failed: %s", errMsg); + } + +#ifdef UNIX + if(dladdr(pmdkLoader->pmem_map_file, &dl_info)) { + library = dl_info.dli_fname; + } +#else + if (GetModuleFileName(pmdkLoader->libec, filename, 256) > 0) { + library = filename; + } +#endif + + if (library == NULL) { + library = HADOOP_PMDK_LIBRARY; + } + + pmdkLoader->libname = strdup(library); +} diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.h new file mode 100644 index 0000000000000..c93a076fc0e5f --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.h @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "org_apache_hadoop.h" + +#ifdef UNIX +#include +#include +#include +#include +#endif + +#ifndef _PMDK_LOAD_H_ +#define _PMDK_LOAD_H_ + + +#ifdef UNIX +// For libpmem.h +typedef void * (*__d_pmem_map_file)(const char *path, size_t len, int flags, mode_t mode, + size_t *mapped_lenp, int *is_pmemp); +typedef int (* __d_pmem_unmap)(void *addr, size_t len); +typedef int (*__d_pmem_is_pmem)(const void *addr, size_t len); +typedef void (*__d_pmem_drain)(void); +typedef void * (*__d_pmem_memcpy_nodrain)(void *pmemdest, const void *src, size_t len); +typedef int (* __d_pmem_msync)(const void *addr, size_t len); + +#endif + +typedef struct __PmdkLibLoader { + // The loaded library handle + void* libec; + char* libname; + __d_pmem_map_file pmem_map_file; + __d_pmem_unmap pmem_unmap; + __d_pmem_is_pmem pmem_is_pmem; + __d_pmem_drain pmem_drain; + __d_pmem_memcpy_nodrain pmem_memcpy_nodrain; + __d_pmem_msync pmem_msync; +} PmdkLibLoader; + +extern PmdkLibLoader * pmdkLoader; + +/** + * A helper function to dlsym a 'symbol' from a given library-handle. + */ + +#ifdef UNIX + +static __attribute__ ((unused)) +void *myDlsym(void *handle, const char *symbol) { + void *func_ptr = dlsym(handle, symbol); + return func_ptr; +} + +/* A helper macro to dlsym the requisite dynamic symbol in NON-JNI env. */ +#define PMDK_LOAD_DYNAMIC_SYMBOL(func_ptr, symbol) \ + if ((func_ptr = myDlsym(pmdkLoader->libec, symbol)) == NULL) { \ + return "Failed to load symbol" symbol; \ + } + +#endif + +/** + * Return 0 if not support, 1 otherwise. + */ +int build_support_pmdk(); + +/** + * Initialize and load PMDK library, returning error message if any. + * + * @param err The err message buffer. + * @param err_len The length of the message buffer. + */ +void load_pmdk_lib(char* err, size_t err_len); + +#endif //_PMDK_LOAD_H_ \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java index 6b3c2325d8ff6..a14928c7b4e24 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java @@ -25,6 +25,8 @@ import java.io.FileWriter; import java.io.IOException; import java.io.RandomAccessFile; +import java.nio.file.Files; +import java.nio.file.Paths; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; import java.nio.channels.FileChannel.MapMode; @@ -782,4 +784,155 @@ public void testNativeFadviseConsts() { assertTrue("Native POSIX_FADV_NOREUSE const not set", POSIX_FADV_NOREUSE >= 0); } + + + @Test (timeout=10000) + public void testPmemCheckParameters() { + assumeNotWindows("Native PMDK not supported on Windows"); + // Skip testing while the build or environment does not support PMDK + assumeTrue(NativeIO.POSIX.isPmdkAvailable()); + + // Please make sure /mnt/pmem0 is a persistent memory device with total + // volume size 'volumeSize' + String filePath = "/$:"; + long length = 0; + long volumnSize = 16 * 1024 * 1024 * 1024L; + + // Incorrect file length + try { + NativeIO.POSIX.Pmem.mapBlock(filePath, length); + fail("Illegal length parameter should be detected"); + } catch (Exception e) { + LOG.info(e.getMessage()); + } + + // Incorrect file length + filePath = "/mnt/pmem0/test_native_io"; + length = -1L; + try { + NativeIO.POSIX.Pmem.mapBlock(filePath, length); + fail("Illegal length parameter should be detected"); + }catch (Exception e) { + LOG.info(e.getMessage()); + } + } + + @Test (timeout=10000) + public void testPmemMapMultipleFiles() { + assumeNotWindows("Native PMDK not supported on Windows"); + // Skip testing while the build or environment does not support PMDK + assumeTrue(NativeIO.POSIX.isPmdkAvailable()); + + // Please make sure /mnt/pmem0 is a persistent memory device with total + // volume size 'volumeSize' + String filePath = "/mnt/pmem0/test_native_io"; + long length = 0; + long volumnSize = 16 * 1024 * 1024 * 1024L; + + // Multiple files, each with 128MB size, aggregated size exceeds volume + // limit 16GB + length = 128 * 1024 * 1024L; + long fileNumber = volumnSize / length; + LOG.info("File number = " + fileNumber); + for (int i = 0; i < fileNumber; i++) { + String path = filePath + i; + LOG.info("File path = " + path); + NativeIO.POSIX.Pmem.mapBlock(path, length); + } + try { + NativeIO.POSIX.Pmem.mapBlock(filePath, length); + fail("Request map extra file when persistent memory is all occupied"); + } catch (Exception e) { + LOG.info(e.getMessage()); + } + } + + @Test (timeout=10000) + public void testPmemMapBigFile() { + assumeNotWindows("Native PMDK not supported on Windows"); + // Skip testing while the build or environment does not support PMDK + assumeTrue(NativeIO.POSIX.isPmdkAvailable()); + + // Please make sure /mnt/pmem0 is a persistent memory device with total + // volume size 'volumeSize' + String filePath = "/mnt/pmem0/test_native_io_big"; + long length = 0; + long volumeSize = 16 * 1024 * 1024 * 1024L; + + // One file length exceeds persistent memory volume 16GB. + length = volumeSize + 1024L; + try { + LOG.info("File length = " + length); + NativeIO.POSIX.Pmem.mapBlock(filePath, length); + fail("File length exceeds persistent memory total volume size"); + }catch (Exception e) { + LOG.info(e.getMessage()); + deletePmemMappedFile(filePath); + } + } + + @Test (timeout=10000) + public void testPmemCopy() throws IOException { + assumeNotWindows("Native PMDK not supported on Windows"); + // Skip testing while the build or environment does not support PMDK + assumeTrue(NativeIO.POSIX.isPmdkAvailable()); + + // Create and map a block file. Please make sure /mnt/pmem0 is a persistent + // memory device. + String filePath = "/mnt/pmem0/copy"; + long length = 4096; + PmemMappedRegion region = NativeIO.POSIX.Pmem.mapBlock(filePath, length); + assertTrue(NativeIO.POSIX.Pmem.isPmem(region.getAddress(), length)); + assertFalse(NativeIO.POSIX.Pmem.isPmem(region.getAddress(), length + 100)); + assertFalse(NativeIO.POSIX.Pmem.isPmem(region.getAddress() + 100, length)); + assertFalse(NativeIO.POSIX.Pmem.isPmem(region.getAddress() - 100, length)); + + // Copy content to mapped file + byte[] data = generateSequentialBytes(0, (int) length); + NativeIO.POSIX.Pmem.memCopy(data, region.getAddress(), region.isPmem(), + length); + + // Read content before pmemSync + byte[] readBuf1 = new byte[(int)length]; + IOUtils.readFully(new FileInputStream(filePath), readBuf1, 0, (int)length); + assertArrayEquals(data, readBuf1); + + byte[] readBuf2 = new byte[(int)length]; + // Sync content to persistent memory twice + NativeIO.POSIX.Pmem.memSync(region); + NativeIO.POSIX.Pmem.memSync(region); + // Read content after pmemSync twice + IOUtils.readFully(new FileInputStream(filePath), readBuf2, 0, (int)length); + assertArrayEquals(data, readBuf2); + + //Read content after unmap twice + NativeIO.POSIX.Pmem.unmapBlock(region.getAddress(), length); + NativeIO.POSIX.Pmem.unmapBlock(region.getAddress(), length); + byte[] readBuf3 = new byte[(int)length]; + IOUtils.readFully(new FileInputStream(filePath), readBuf3, 0, (int)length); + assertArrayEquals(data, readBuf3); + } + + private static byte[] generateSequentialBytes(int start, int length) { + byte[] result = new byte[length]; + + for (int i = 0; i < length; i++) { + result[i] = (byte) ((start + i) % 127); + } + return result; + } + + private static void deletePmemMappedFile(String filePath) { + try { + if (filePath != null) { + boolean result = Files.deleteIfExists(Paths.get(filePath)); + if (!result) { + throw new IOException(); + } + } + } catch (Throwable e) { + LOG.error("Failed to delete the mapped file " + filePath + + " from persistent memory", e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index 4fab214a05e43..37e548e22049f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -214,6 +214,28 @@ String getReplicaCachePath(String bpid, long blockId) { return PmemVolumeManager.getInstance().getCachePath(key); } + /** + * Get cache address on persistent memory for read operation. + * The cache address comes from PMDK lib function when mapping + * block to persistent memory. + * + * @param bpid blockPoolId + * @param blockId blockId + * @return address + */ + long getCacheAddress(String bpid, long blockId) { + if (cacheLoader.isTransientCache() || + !isCached(bpid, blockId)) { + return -1; + } + if (!(cacheLoader.isNativeLoader())) { + return -1; + } + ExtendedBlockId key = new ExtendedBlockId(blockId, bpid); + MappableBlock mappableBlock = mappableBlockMap.get(key).mappableBlock; + return mappableBlock.getAddress(); + } + /** * @return List of cached blocks suitable for translation into a * {@link BlockListAsLongs} for a cache report. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 80738d3dcf268..76110d68b88a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -803,6 +803,14 @@ private InputStream getBlockInputStreamWithCheckingPmemCache( String cachePath = cacheManager.getReplicaCachePath( b.getBlockPoolId(), b.getBlockId()); if (cachePath != null) { + long addr = cacheManager.getCacheAddress( + b.getBlockPoolId(), b.getBlockId()); + if (addr != -1) { + LOG.debug("Get InputStream by cache address."); + return FsDatasetUtil.getDirectInputStream( + addr, info.getBlockDataLength()); + } + LOG.debug("Get InputStream by cache file path."); return FsDatasetUtil.getInputStreamAndSeek( new File(cachePath), seekOffset); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java index 5308b60b594f5..fbd02c76820cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java @@ -25,7 +25,10 @@ import java.io.IOException; import java.io.InputStream; import java.io.RandomAccessFile; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; import java.net.URI; +import java.nio.ByteBuffer; import java.nio.channels.Channels; import java.nio.file.Files; import java.nio.file.Paths; @@ -42,6 +45,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DataChecksum; +import org.apache.htrace.shaded.fasterxml.jackson.databind.util.ByteBufferBackedInputStream; /** Utility methods. */ @InterfaceAudience.Private @@ -131,6 +135,24 @@ public static InputStream getInputStreamAndSeek(File file, long offset) } } + public static InputStream getDirectInputStream(long addr, long length) + throws IOException { + try { + Class directByteBufferClass = + Class.forName("java.nio.DirectByteBuffer"); + Constructor constructor = + directByteBufferClass.getDeclaredConstructor(long.class, int.class); + constructor.setAccessible(true); + ByteBuffer byteBuffer = + (ByteBuffer) constructor.newInstance(addr, (int)length); + return new ByteBufferBackedInputStream(byteBuffer); + } catch (ClassNotFoundException | NoSuchMethodException | + IllegalAccessException | InvocationTargetException | + InstantiationException e) { + throw new IOException(e); + } + } + /** * Find the meta-file for the specified block file and then return the * generation stamp from the name of the meta-file. Generally meta file will diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java index 0fff32741c658..a00c442b834d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java @@ -35,4 +35,10 @@ public interface MappableBlock extends Closeable { * @return the number of bytes that have been cached. */ long getLength(); + + /** + * Get cache address if applicable. + * Return -1 if not applicable. + */ + long getAddress(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java index 3ec84164c879e..5b9ba3a1d6a25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java @@ -64,8 +64,7 @@ public abstract class MappableBlockLoader { * @return The Mappable block. */ abstract MappableBlock load(long length, FileInputStream blockIn, - FileInputStream metaIn, String blockFileName, - ExtendedBlockId key) + FileInputStream metaIn, String blockFileName, ExtendedBlockId key) throws IOException; /** @@ -106,6 +105,11 @@ abstract MappableBlock load(long length, FileInputStream blockIn, */ abstract boolean isTransientCache(); + /** + * Check whether this is a native pmem cache loader. + */ + abstract boolean isNativeLoader(); + /** * Clean up cache, can be used during DataNode shutdown. */ @@ -117,8 +121,7 @@ void shutdown() { * Verifies the block's checksum. This is an I/O intensive operation. */ protected void verifyChecksum(long length, FileInputStream metaIn, - FileChannel blockChannel, String blockFileName) - throws IOException { + FileChannel blockChannel, String blockFileName) throws IOException { // Verify the checksum from the block's meta file // Get the DataChecksum from the meta file header BlockMetadataHeader header = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoaderFactory.java index 43b1b531afe9a..65693735b2808 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoaderFactory.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.datanode.DNConf; +import org.apache.hadoop.io.nativeio.NativeIO; /** * Creates MappableBlockLoader. @@ -42,6 +43,9 @@ public static MappableBlockLoader createCacheLoader(DNConf conf) { if (conf.getPmemVolumes() == null || conf.getPmemVolumes().length == 0) { return new MemoryMappableBlockLoader(); } + if (NativeIO.isAvailable() && NativeIO.POSIX.isPmdkAvailable()) { + return new NativePmemMappableBlockLoader(); + } return new PmemMappableBlockLoader(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappableBlockLoader.java index 52d8d931c0490..dd4188c0b12bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappableBlockLoader.java @@ -66,8 +66,7 @@ void initialize(FsDatasetCache cacheManager) throws IOException { */ @Override MappableBlock load(long length, FileInputStream blockIn, - FileInputStream metaIn, String blockFileName, - ExtendedBlockId key) + FileInputStream metaIn, String blockFileName, ExtendedBlockId key) throws IOException { MemoryMappedBlock mappableBlock = null; MappedByteBuffer mmap = null; @@ -116,4 +115,9 @@ long release(ExtendedBlockId key, long bytesCount) { public boolean isTransientCache() { return true; } + + @Override + public boolean isNativeLoader() { + return false; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappedBlock.java index c09ad1a588731..47dfeae32621b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappedBlock.java @@ -44,6 +44,11 @@ public long getLength() { return length; } + @Override + public long getAddress() { + return -1L; + } + @Override public void close() { if (mmap != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java new file mode 100644 index 0000000000000..09e9454e764cf --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java @@ -0,0 +1,191 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; + +import com.google.common.base.Preconditions; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.ExtendedBlockId; +import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; +import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.io.nativeio.NativeIO.POSIX; +import org.apache.hadoop.util.DataChecksum; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedInputStream; +import java.io.DataInputStream; +import java.io.FileInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; + +/** + * Map block to persistent memory with native PMDK libs. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class NativePmemMappableBlockLoader extends PmemMappableBlockLoader { + private static final Logger LOG = + LoggerFactory.getLogger(NativePmemMappableBlockLoader.class); + + @Override + void initialize(FsDatasetCache cacheManager) throws IOException { + super.initialize(cacheManager); + } + + /** + * Load the block. + * + * Map the block and verify its checksum. + * + * The block will be mapped to PmemDir/BlockPoolId-BlockId, in which PmemDir + * is a persistent memory volume chosen by PmemVolumeManager. + * + * @param length The current length of the block. + * @param blockIn The block input stream. Should be positioned at the + * start. The caller must close this. + * @param metaIn The meta file input stream. Should be positioned at + * the start. The caller must close this. + * @param blockFileName The block file name, for logging purposes. + * @param key The extended block ID. + * + * @throws IOException If mapping block to persistent memory fails or + * checksum fails. + * + * @return The Mappable block. + */ + @Override + public MappableBlock load(long length, FileInputStream blockIn, + FileInputStream metaIn, String blockFileName, + ExtendedBlockId key) + throws IOException { + NativePmemMappedBlock mappableBlock = null; + POSIX.PmemMappedRegion region = null; + String filePath = null; + + FileChannel blockChannel = null; + try { + blockChannel = blockIn.getChannel(); + if (blockChannel == null) { + throw new IOException("Block InputStream has no FileChannel."); + } + + assert NativeIO.isAvailable(); + filePath = PmemVolumeManager.getInstance().getCachePath(key); + region = POSIX.Pmem.mapBlock(filePath, length); + if (region == null) { + throw new IOException("Failed to map the block " + blockFileName + + " to persistent storage."); + } + verifyChecksumAndMapBlock(region, length, metaIn, blockChannel, + blockFileName); + mappableBlock = new NativePmemMappedBlock(region.getAddress(), + region.getLength(), key); + LOG.info("Successfully cached one replica:{} into persistent memory" + + ", [cached path={}, address={}, length={}]", key, filePath, + region.getAddress(), length); + } finally { + IOUtils.closeQuietly(blockChannel); + if (mappableBlock == null) { + if (region != null) { + // unmap content from persistent memory + POSIX.Pmem.unmapBlock(region.getAddress(), + region.getLength()); + FsDatasetUtil.deleteMappedFile(filePath); + } + } + } + return mappableBlock; + } + + /** + * Verifies the block's checksum meanwhile map block to persistent memory. + * This is an I/O intensive operation. + */ + private void verifyChecksumAndMapBlock(POSIX.PmemMappedRegion region, + long length, FileInputStream metaIn, FileChannel blockChannel, + String blockFileName) throws IOException { + // Verify the checksum from the block's meta file + // Get the DataChecksum from the meta file header + BlockMetadataHeader header = + BlockMetadataHeader.readHeader(new DataInputStream( + new BufferedInputStream(metaIn, BlockMetadataHeader + .getHeaderSize()))); + FileChannel metaChannel = null; + try { + metaChannel = metaIn.getChannel(); + if (metaChannel == null) { + throw new IOException("Cannot get FileChannel" + + " from Block InputStream meta file."); + } + DataChecksum checksum = header.getChecksum(); + final int bytesPerChecksum = checksum.getBytesPerChecksum(); + final int checksumSize = checksum.getChecksumSize(); + final int numChunks = (8 * 1024 * 1024) / bytesPerChecksum; + ByteBuffer blockBuf = ByteBuffer.allocate(numChunks * bytesPerChecksum); + ByteBuffer checksumBuf = ByteBuffer.allocate(numChunks * checksumSize); + // Verify the checksum + int bytesVerified = 0; + long mappedAddress = -1L; + if (region != null) { + mappedAddress = region.getAddress(); + } + while (bytesVerified < length) { + Preconditions.checkState(bytesVerified % bytesPerChecksum == 0, + "Unexpected partial chunk before EOF."); + assert bytesVerified % bytesPerChecksum == 0; + int bytesRead = fillBuffer(blockChannel, blockBuf); + if (bytesRead == -1) { + throw new IOException( + "Checksum verification failed for the block " + blockFileName + + ": premature EOF"); + } + blockBuf.flip(); + // Number of read chunks, including partial chunk at end + int chunks = (bytesRead + bytesPerChecksum - 1) / bytesPerChecksum; + checksumBuf.limit(chunks * checksumSize); + fillBuffer(metaChannel, checksumBuf); + checksumBuf.flip(); + checksum.verifyChunkedSums(blockBuf, checksumBuf, blockFileName, + bytesVerified); + // Success + bytesVerified += bytesRead; + // Copy data to persistent file + POSIX.Pmem.memCopy(blockBuf.array(), mappedAddress, + region.isPmem(), bytesRead); + mappedAddress += bytesRead; + // Clear buffer + blockBuf.clear(); + checksumBuf.clear(); + } + if (region != null) { + POSIX.Pmem.memSync(region); + } + } finally { + IOUtils.closeQuietly(metaChannel); + } + } + + @Override + public boolean isNativeLoader() { + return true; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappedBlock.java new file mode 100644 index 0000000000000..92012b2d930e2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappedBlock.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.ExtendedBlockId; +import org.apache.hadoop.io.nativeio.NativeIO; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +/** + * Represents an HDFS block that is mapped to persistent memory by the DataNode. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class NativePmemMappedBlock implements MappableBlock { + private static final Logger LOG = + LoggerFactory.getLogger(NativePmemMappedBlock.class); + + private long pmemMappedAddress = -1L; + private long length; + private ExtendedBlockId key; + + NativePmemMappedBlock(long pmemMappedAddress, long length, + ExtendedBlockId key) { + assert length > 0; + this.pmemMappedAddress = pmemMappedAddress; + this.length = length; + this.key = key; + } + + @Override + public long getLength() { + return length; + } + + @Override + public long getAddress() { + return pmemMappedAddress; + } + + @Override + public void close() { + if (pmemMappedAddress != -1L) { + String cacheFilePath = + PmemVolumeManager.getInstance().getCachePath(key); + try { + // Current libpmem will report error when pmem_unmap is called with + // length not aligned with page size, although the length is returned + // by pmem_map_file. + boolean success = + NativeIO.POSIX.Pmem.unmapBlock(pmemMappedAddress, length); + if (!success) { + throw new IOException("Failed to unmap the mapped file from " + + "pmem address: " + pmemMappedAddress); + } + pmemMappedAddress = -1L; + FsDatasetUtil.deleteMappedFile(cacheFilePath); + LOG.info("Successfully uncached one replica:{} from persistent memory" + + ", [cached path={}, length={}]", key, cacheFilePath, length); + } catch (IOException e) { + LOG.warn("IOException occurred for block {}!", key, e); + } + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappableBlockLoader.java index 239fff815b1bd..70a42c41f2875 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappableBlockLoader.java @@ -43,7 +43,7 @@ public class PmemMappableBlockLoader extends MappableBlockLoader { @Override void initialize(FsDatasetCache cacheManager) throws IOException { - LOG.info("Initializing cache loader: PmemMappableBlockLoader."); + LOG.info("Initializing cache loader: " + this.getClass().getName()); DNConf dnConf = cacheManager.getDnConf(); PmemVolumeManager.init(dnConf.getPmemVolumes()); pmemVolumeManager = PmemVolumeManager.getInstance(); @@ -71,8 +71,7 @@ void initialize(FsDatasetCache cacheManager) throws IOException { */ @Override MappableBlock load(long length, FileInputStream blockIn, - FileInputStream metaIn, String blockFileName, - ExtendedBlockId key) + FileInputStream metaIn, String blockFileName, ExtendedBlockId key) throws IOException { PmemMappedBlock mappableBlock = null; String cachePath = null; @@ -132,6 +131,11 @@ public boolean isTransientCache() { return false; } + @Override + public boolean isNativeLoader() { + return false; + } + @Override void shutdown() { LOG.info("Clean up cache on persistent memory during shutdown."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappedBlock.java index 25c3d400bd2aa..a49626a321b25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappedBlock.java @@ -49,6 +49,11 @@ public long getLength() { return length; } + @Override + public long getAddress() { + return -1L; + } + @Override public void close() { String cacheFilePath = From 0b1e288deb2c330521b9bb1d1803481afe49168b Mon Sep 17 00:00:00 2001 From: ChenSammi Date: Thu, 6 Jun 2019 00:09:36 +0800 Subject: [PATCH 0122/1308] HDDS-1637. Fix random test failure TestSCMContainerPlacementRackAware. Contributed by Sammi Chen. (#904) --- .../algorithms/SCMContainerPlacementRackAware.java | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index ffebb84c4a7a0..e126f27c1f1dd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -237,6 +237,7 @@ private Node chooseNode(List excludedNodes, Node affinityNode, long sizeRequired) throws SCMException { int ancestorGen = RACK_LEVEL; int maxRetry = MAX_RETRY; + List excludedNodesForCapacity = null; while(true) { Node node = networkTopology.chooseRandom(NetConstants.ROOT, null, excludedNodes, affinityNode, ancestorGen); @@ -265,6 +266,9 @@ private Node chooseNode(List excludedNodes, Node affinityNode, if (hasEnoughSpace((DatanodeDetails)node, sizeRequired)) { LOG.debug("Datanode {} is chosen. Required size is {}", node.toString(), sizeRequired); + if (excludedNodes != null && excludedNodesForCapacity != null) { + excludedNodes.removeAll(excludedNodesForCapacity); + } return node; } else { maxRetry--; @@ -275,6 +279,15 @@ private Node chooseNode(List excludedNodes, Node affinityNode, LOG.info(errMsg); throw new SCMException(errMsg, null); } + if (excludedNodesForCapacity == null) { + excludedNodesForCapacity = new ArrayList<>(); + } + excludedNodesForCapacity.add(node); + if (excludedNodes == null) { + excludedNodes = excludedNodesForCapacity; + } else { + excludedNodes.add(node); + } } } } From 3b1c2577d773ab42578033721c39822965092e56 Mon Sep 17 00:00:00 2001 From: Ajay Yadav <7813154+ajayydv@users.noreply.github.com> Date: Wed, 5 Jun 2019 14:42:10 -0700 Subject: [PATCH 0123/1308] HDDS-1541. Implement addAcl,removeAcl,setAcl,getAcl for Key. Contributed by Ajay Kumat. (#885) --- .../hadoop/ozone/client/rpc/RpcClient.java | 29 +- .../org/apache/hadoop/ozone/OzoneAcl.java | 3 +- .../hadoop/ozone/om/helpers/OmKeyArgs.java | 18 +- .../hadoop/ozone/om/helpers/OmKeyInfo.java | 26 +- .../ozone/om/helpers/OmOzoneAclMap.java | 34 +- ...ManagerProtocolClientSideTranslatorPB.java | 11 + .../ozone/security/acl/OzoneObjInfo.java | 57 +-- .../hadoop/ozone/web/utils/OzoneUtils.java | 29 ++ .../src/main/proto/OzoneManagerProtocol.proto | 2 + .../rpc/TestOzoneRpcClientAbstract.java | 284 ++++++++------ .../om/TestMultipleContainerReadWrite.java | 2 + .../ozone/om/TestOmBlockVersioning.java | 3 + .../hadoop/ozone/om/TestOzoneManager.java | 4 + .../hadoop/ozone/om/TestScmSafeMode.java | 2 + .../storage/DistributedStorageHandler.java | 7 + .../hadoop/ozone/om/BucketManagerImpl.java | 116 +++--- .../apache/hadoop/ozone/om/KeyManager.java | 40 ++ .../hadoop/ozone/om/KeyManagerImpl.java | 359 +++++++++++++++++- .../apache/hadoop/ozone/om/OzoneManager.java | 11 +- .../hadoop/ozone/om/VolumeManagerImpl.java | 4 +- .../om/ratis/OzoneManagerRatisServer.java | 2 + .../OzoneManagerRequestHandler.java | 12 + .../ozone/om/TestKeyDeletingService.java | 2 + .../hadoop/ozone/om/TestKeyManagerImpl.java | 13 +- 24 files changed, 837 insertions(+), 233 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index cb6ac539a9a14..48968a4647f80 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -405,15 +405,7 @@ public void createBucket( .setKeyName(bucketArgs.getEncryptionKey()).build(); } - List listOfAcls = new ArrayList<>(); - //User ACL - listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, - ugi.getUserName(), userRights)); - //Group ACLs of the User - List userGroups = Arrays.asList(UserGroupInformation - .createRemoteUser(ugi.getUserName()).getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(ACLIdentityType.GROUP, group, groupRights))); + List listOfAcls = getAclList(); //ACLs from BucketArgs if(bucketArgs.getAcls() != null) { listOfAcls.addAll(bucketArgs.getAcls()); @@ -437,6 +429,16 @@ public void createBucket( ozoneManagerClient.createBucket(builder.build()); } + /** + * Helper function to get default acl list for current user. + * + * @return listOfAcls + * */ + private List getAclList() { + return OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(), + userRights, groupRights); + } + @Override public void addBucketAcls( String volumeName, String bucketName, List addAcls) @@ -629,6 +631,7 @@ public OzoneOutputStream createKey( .setType(HddsProtos.ReplicationType.valueOf(type.toString())) .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) .addAllMetadata(metadata) + .setAcls(getAclList()) .build(); OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); @@ -819,6 +822,7 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName, .setKeyName(keyName) .setType(HddsProtos.ReplicationType.valueOf(type.toString())) .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) + .setAcls(getAclList()) .build(); OmMultipartInfo multipartInfo = ozoneManagerClient .initiateMultipartUpload(keyArgs); @@ -848,6 +852,7 @@ public OzoneOutputStream createMultipartKey(String volumeName, .setIsMultipartKey(true) .setMultipartUploadID(uploadID) .setMultipartUploadPartNumber(partNumber) + .setAcls(getAclList()) .build(); OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); @@ -963,7 +968,10 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, public void createDirectory(String volumeName, String bucketName, String keyName) throws IOException { OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setKeyName(keyName).build(); + .setBucketName(bucketName) + .setKeyName(keyName) + .setAcls(getAclList()) + .build(); ozoneManagerClient.createDirectory(keyArgs); } @@ -990,6 +998,7 @@ public OzoneOutputStream createFile(String volumeName, String bucketName, .setDataSize(size) .setType(HddsProtos.ReplicationType.valueOf(type.name())) .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) + .setAcls(getAclList()) .build(); OpenKeySession keySession = ozoneManagerClient.createFile(keyArgs, overWrite, recursive); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index 9a50ee03c1351..8ee33b4e9990b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -46,9 +46,10 @@ public class OzoneAcl { private ACLIdentityType type; private String name; private BitSet aclBitSet; + public static final BitSet ZERO_BITSET = new BitSet(0); /** - * Constructor for OzoneAcl. + * Default constructor. */ public OzoneAcl() { } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index d90345cc00742..de07d08fb2d19 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -19,6 +19,7 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.Auditable; @@ -45,13 +46,15 @@ public final class OmKeyArgs implements Auditable { private final int multipartUploadPartNumber; private Map metadata; private boolean refreshPipeline; + private List acls; @SuppressWarnings("parameternumber") private OmKeyArgs(String volumeName, String bucketName, String keyName, long dataSize, ReplicationType type, ReplicationFactor factor, List locationInfoList, boolean isMultipart, String uploadID, int partNumber, - Map metadataMap, boolean refreshPipeline) { + Map metadataMap, boolean refreshPipeline, + List acls) { this.volumeName = volumeName; this.bucketName = bucketName; this.keyName = keyName; @@ -64,6 +67,7 @@ private OmKeyArgs(String volumeName, String bucketName, String keyName, this.multipartUploadPartNumber = partNumber; this.metadata = metadataMap; this.refreshPipeline = refreshPipeline; + this.acls = acls; } public boolean getIsMultipartKey() { @@ -86,6 +90,10 @@ public ReplicationFactor getFactor() { return factor; } + public List getAcls() { + return acls; + } + public String getVolumeName() { return volumeName; } @@ -166,6 +174,7 @@ public static class Builder { private int multipartUploadPartNumber; private Map metadata = new HashMap<>(); private boolean refreshPipeline; + private List acls; public Builder setVolumeName(String volume) { this.volumeName = volume; @@ -202,6 +211,11 @@ public Builder setLocationInfoList(List locationInfos) { return this; } + public Builder setAcls(List listOfAcls) { + this.acls = listOfAcls; + return this; + } + public Builder setIsMultipartKey(boolean isMultipart) { this.isMultipartKey = isMultipart; return this; @@ -235,7 +249,7 @@ public Builder setRefreshPipeline(boolean refresh) { public OmKeyArgs build() { return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, type, factor, locationInfoList, isMultipartKey, multipartUploadID, - multipartUploadPartNumber, metadata, refreshPipeline); + multipartUploadPartNumber, metadata, refreshPipeline, acls); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index 07f7909f5dd8f..4e3862de721cc 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.util.Time; @@ -50,6 +51,10 @@ public final class OmKeyInfo extends WithMetadata { private HddsProtos.ReplicationType type; private HddsProtos.ReplicationFactor factor; private FileEncryptionInfo encInfo; + /** + * ACL Information. + */ + private List acls; @SuppressWarnings("parameternumber") OmKeyInfo(String volumeName, String bucketName, String keyName, @@ -58,7 +63,7 @@ public final class OmKeyInfo extends WithMetadata { HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, Map metadata, - FileEncryptionInfo encInfo) { + FileEncryptionInfo encInfo, List acls) { this.volumeName = volumeName; this.bucketName = bucketName; this.keyName = keyName; @@ -81,6 +86,7 @@ public final class OmKeyInfo extends WithMetadata { this.type = type; this.metadata = metadata; this.encInfo = encInfo; + this.acls = acls; } public String getVolumeName() { @@ -216,6 +222,10 @@ public FileEncryptionInfo getFileEncryptionInfo() { return encInfo; } + public List getAcls() { + return acls; + } + /** * Builder of OmKeyInfo. */ @@ -232,6 +242,7 @@ public static class Builder { private HddsProtos.ReplicationFactor factor; private Map metadata; private FileEncryptionInfo encInfo; + private List acls; public Builder() { this.metadata = new HashMap<>(); @@ -299,11 +310,16 @@ public Builder setFileEncryptionInfo(FileEncryptionInfo feInfo) { return this; } + public Builder setAcls(List listOfAcls) { + this.acls = listOfAcls; + return this; + } + public OmKeyInfo build() { return new OmKeyInfo( volumeName, bucketName, keyName, omKeyLocationInfoGroups, dataSize, creationTime, modificationTime, type, factor, metadata, - encInfo); + encInfo, acls); } } @@ -327,6 +343,9 @@ public KeyInfo getProtobuf() { if (encInfo != null) { kb.setFileEncryptionInfo(OMPBHelper.convert(encInfo)); } + if(acls != null) { + kb.addAllAcls(acls); + } return kb.build(); } @@ -345,7 +364,8 @@ public static OmKeyInfo getFromProtobuf(KeyInfo keyInfo) { keyInfo.getFactor(), KeyValueUtil.getFromProtobuf(keyInfo.getMetadataList()), keyInfo.hasFileEncryptionInfo() ? OMPBHelper.convert(keyInfo - .getFileEncryptionInfo()): null); + .getFileEncryptionInfo()): null, + keyInfo.getAclsList()); } @Override diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java index cc181f7549afb..0484f4b00e862 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java @@ -36,6 +36,7 @@ import java.util.HashMap; import java.util.Objects; +import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights.ALL; @@ -81,8 +82,17 @@ public void addAcl(OzoneAcl acl) throws OMException { if (!getMap(aclType).containsKey(acl.getName())) { getMap(aclType).put(acl.getName(), acl.getAclBitSet()); } else { - // throw exception if acl is already added. - throw new OMException("Acl " + acl + " already exist.", INVALID_REQUEST); + // Check if we are adding new rights to existing acl. + BitSet temp = (BitSet) acl.getAclBitSet().clone(); + BitSet curRights = (BitSet) getMap(aclType).get(acl.getName()).clone(); + temp.or(curRights); + + if (temp.equals(curRights)) { + // throw exception if acl is already added. + throw new OMException("Acl " + acl + " already exist.", + INVALID_REQUEST); + } + getMap(aclType).get(acl.getName()).or(acl.getAclBitSet()); } } @@ -105,9 +115,25 @@ public void removeAcl(OzoneAcl acl) throws OMException { Objects.requireNonNull(acl, "Acl should not be null."); OzoneAclType aclType = OzoneAclType.valueOf(acl.getType().name()); if (getMap(aclType).containsKey(acl.getName())) { - getMap(aclType).remove(acl.getName()); + BitSet aclRights = getMap(aclType).get(acl.getName()); + BitSet bits = (BitSet) acl.getAclBitSet().clone(); + bits.and(aclRights); + + if (bits.equals(ZERO_BITSET)) { + // throw exception if acl doesn't exist. + throw new OMException("Acl [" + acl + "] doesn't exist.", + INVALID_REQUEST); + } + + acl.getAclBitSet().and(aclRights); + aclRights.xor(acl.getAclBitSet()); + + // Remove the acl as all rights are already set to 0. + if (aclRights.equals(ZERO_BITSET)) { + getMap(aclType).remove(acl.getName()); + } } else { - // throw exception if acl is already added. + // throw exception if acl doesn't exist. throw new OMException("Acl [" + acl + "] doesn't exist.", INVALID_REQUEST); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 7d1e4151a7110..c93ed3cabac83 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -668,6 +668,11 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()); + if(args.getAcls() != null) { + keyArgs.addAllAcls(args.getAcls().stream().distinct().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } + if (args.getFactor() != null) { keyArgs.setFactor(args.getFactor()); } @@ -991,6 +996,8 @@ public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws .setBucketName(omKeyArgs.getBucketName()) .setKeyName(omKeyArgs.getKeyName()) .setFactor(omKeyArgs.getFactor()) + .addAllAcls(omKeyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .setType(omKeyArgs.getType()); multipartInfoInitiateRequest.setKeyArgs(keyArgs.build()); @@ -1276,6 +1283,8 @@ public void createDirectory(OmKeyArgs args) throws IOException { .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) + .addAllAcls(args.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .build(); CreateDirectoryRequest request = CreateDirectoryRequest.newBuilder() .setKeyArgs(keyArgs) @@ -1412,6 +1421,8 @@ public OpenKeySession createFile(OmKeyArgs args, .setDataSize(args.getDataSize()) .setType(args.getType()) .setFactor(args.getFactor()) + .addAllAcls(args.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .build(); CreateFileRequest createFileRequest = CreateFileRequest.newBuilder() .setKeyArgs(keyArgs) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java index 16df10fb274aa..cbb9fb8e21a6d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java @@ -16,10 +16,10 @@ */ package org.apache.hadoop.ozone.security.acl; -import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import java.util.StringTokenizer; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; /** * Class representing an ozone object. @@ -45,16 +45,14 @@ public String getPath() { case VOLUME: return getVolumeName(); case BUCKET: - return getVolumeName() + OzoneConsts.OZONE_URI_DELIMITER - + getBucketName(); + return getVolumeName() + OZONE_URI_DELIMITER + getBucketName(); case KEY: - return getVolumeName() + OzoneConsts.OZONE_URI_DELIMITER - + getBucketName() + OzoneConsts.OZONE_URI_DELIMITER + getKeyName(); + return getVolumeName() + OZONE_URI_DELIMITER + getBucketName() + + OZONE_URI_DELIMITER + getKeyName(); default: throw new IllegalArgumentException("Unknown resource " + "type" + getResourceType()); } - } @Override @@ -77,25 +75,36 @@ public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj Builder builder = new Builder() .setResType(ResourceType.valueOf(proto.getResType().name())) .setStoreType(StoreType.valueOf(proto.getStoreType().name())); - StringTokenizer tokenizer = new StringTokenizer(proto.getPath(), - OzoneConsts.OZONE_URI_DELIMITER); - // Set volume name. - if (tokenizer.hasMoreTokens()) { - builder.setVolumeName(tokenizer.nextToken()); - } - // Set bucket name. - if (tokenizer.hasMoreTokens()) { - builder.setBucketName(tokenizer.nextToken()); + String[] tokens = StringUtils.splitPreserveAllTokens(proto.getPath(), + OZONE_URI_DELIMITER); + if(tokens == null) { + throw new IllegalArgumentException("Unexpected path:" + proto.getPath()); } - // Set key name - if (tokenizer.hasMoreTokens()) { - StringBuffer sb = new StringBuffer(); - while (tokenizer.hasMoreTokens()) { - sb.append(OzoneConsts.OZONE_URI_DELIMITER); - sb.append(tokenizer.nextToken()); - sb.append(OzoneConsts.OZONE_URI_DELIMITER); + // Set volume name. + switch (proto.getResType()) { + case VOLUME: + builder.setVolumeName(tokens[0]); + break; + case BUCKET: + if (tokens.length < 2) { + throw new IllegalArgumentException("Unexpected argument for " + + "Ozone key. Path:" + proto.getPath()); + } + builder.setVolumeName(tokens[0]); + builder.setBucketName(tokens[1]); + break; + case KEY: + if (tokens.length != 3) { + throw new IllegalArgumentException("Unexpected argument for " + + "Ozone key. Path:" + proto.getPath()); } - builder.setKeyName(sb.toString()); + builder.setVolumeName(tokens[0]); + builder.setBucketName(tokens[1]); + builder.setKeyName(tokens[2]); + break; + default: + throw new IllegalArgumentException("Unexpected type for " + + "Ozone key. Type:" + proto.getResType()); } return builder.build(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java index 24840910300e3..70bc3ed3e2394 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java @@ -23,6 +23,8 @@ import java.nio.charset.Charset; import java.text.ParseException; import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.List; import java.util.Locale; import java.util.TimeZone; import java.util.UUID; @@ -31,11 +33,16 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.ratis.util.TimeDuration; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; + /** * Set of Utility functions used in ozone. */ @@ -236,4 +243,26 @@ public static long getTimeDurationInMS(Configuration conf, String key, return getTimeDuration(conf, key, defaultValue) .toLong(TimeUnit.MILLISECONDS); } + + /** + * Helper function to get deafult acl list for current user. + * + * @param userName + * @param userGroups + * @return listOfAcls + * */ + public static List getAclList(String userName, + List userGroups, ACLType userRights, ACLType groupRights) { + + List listOfAcls = new ArrayList<>(); + + // User ACL. + listOfAcls.add(new OzoneAcl(USER, userName, userRights)); + if(userGroups != null) { + // Group ACLs of the User. + userGroups.stream().forEach((group) -> listOfAcls.add( + new OzoneAcl(GROUP, group, groupRights))); + } + return listOfAcls; + } } diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index 694c641194f2e..303241e27e2ed 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -615,6 +615,7 @@ message KeyArgs { optional string multipartUploadID = 9; optional uint32 multipartNumber = 10; repeated hadoop.hdds.KeyValue metadata = 11; + repeated OzoneAclInfo acls = 12; } message KeyLocation { @@ -652,6 +653,7 @@ message KeyInfo { optional uint64 latestVersion = 10; repeated hadoop.hdds.KeyValue metadata = 11; optional FileEncryptionInfoProto fileEncryptionInfo = 12; + repeated OzoneAclInfo acls = 13; } message OzoneFileStatusProto { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 3810270a7ca1d..2a03107254be1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -84,8 +84,10 @@ import org.apache.hadoop.ozone.s3.util.OzoneS3Util; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.Time; @@ -94,6 +96,9 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; + +import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.either; import org.junit.Assert; @@ -607,8 +612,8 @@ public void testPutKey() String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); + value.getBytes().length, STAND_ALONE, + ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); OzoneKey key = bucket.getKey(keyName); @@ -617,8 +622,8 @@ public void testPutKey() byte[] fileContent = new byte[value.getBytes().length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE)); + keyName, STAND_ALONE, + ONE)); Assert.assertEquals(value, new String(fileContent)); Assert.assertTrue(key.getCreationTime() >= currentTime); Assert.assertTrue(key.getModificationTime() >= currentTime); @@ -639,7 +644,7 @@ public void testValidateBlockLengthWithCommitKey() throws IOException { // create the initial key with size 0, write will allocate the first block. OzoneOutputStream out = bucket.createKey(keyName, 0, - ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<>()); + STAND_ALONE, ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); @@ -677,7 +682,7 @@ public void testPutKeyRatisOneNode() OzoneOutputStream out = bucket.createKey(keyName, value.getBytes().length, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); OzoneKey key = bucket.getKey(keyName); @@ -687,7 +692,7 @@ public void testPutKeyRatisOneNode() is.read(fileContent); is.close(); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, ReplicationType.RATIS, ReplicationFactor.ONE)); + keyName, ReplicationType.RATIS, ONE)); Assert.assertEquals(value, new String(fileContent)); Assert.assertTrue(key.getCreationTime() >= currentTime); Assert.assertTrue(key.getModificationTime() >= currentTime); @@ -832,7 +837,7 @@ private void createAndCorruptKey(String volumeName, String bucketName, // Write data into a key OzoneOutputStream out = bucket.createKey(keyName, value.getBytes().length, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); @@ -904,8 +909,8 @@ public void testGetKeyDetails() throws IOException, OzoneException { //String keyValue = "this is a test value.glx"; // create the initial key with size 0, write will allocate the first block. OzoneOutputStream out = bucket.createKey(keyName, - keyValue.getBytes().length, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); + keyValue.getBytes().length, STAND_ALONE, + ONE, new HashMap<>()); out.write(keyValue.getBytes()); out.close(); @@ -993,7 +998,7 @@ public void testReadKeyWithCorruptedData() throws IOException { // Write data into a key OzoneOutputStream out = bucket.createKey(keyName, value.getBytes().length, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); @@ -1161,8 +1166,8 @@ public void testDeleteKey() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); + value.getBytes().length, STAND_ALONE, + ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); OzoneKey key = bucket.getKey(keyName); @@ -1185,8 +1190,8 @@ public void testRenameKey() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OzoneOutputStream out = bucket.createKey(fromKeyName, - value.getBytes().length, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); + value.getBytes().length, STAND_ALONE, + ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); OzoneKey key = bucket.getKey(fromKeyName); @@ -1380,25 +1385,25 @@ public void testListKey() byte[] value = RandomStringUtils.randomAscii(10240).getBytes(); OzoneOutputStream one = volAbucketA.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); one.write(value); one.close(); OzoneOutputStream two = volAbucketB.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); two.write(value); two.close(); OzoneOutputStream three = volBbucketA.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); three.write(value); three.close(); OzoneOutputStream four = volBbucketB.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); four.write(value); four.close(); @@ -1413,25 +1418,25 @@ public void testListKey() byte[] value = RandomStringUtils.randomAscii(10240).getBytes(); OzoneOutputStream one = volAbucketA.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); one.write(value); one.close(); OzoneOutputStream two = volAbucketB.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); two.write(value); two.close(); OzoneOutputStream three = volBbucketA.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); three.write(value); three.close(); OzoneOutputStream four = volBbucketB.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); four.write(value); four.close(); @@ -1512,7 +1517,7 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - ReplicationType.STAND_ALONE, ReplicationFactor.ONE); + STAND_ALONE, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -1524,7 +1529,7 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws // Call initiate multipart upload for the same key again, this should // generate a new uploadID. multipartInfo = bucket.initiateMultipartUpload(keyName, - ReplicationType.STAND_ALONE, ReplicationFactor.ONE); + STAND_ALONE, ONE); assertNotNull(multipartInfo); Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); @@ -1580,7 +1585,7 @@ public void testUploadPartWithNoOverride() throws IOException { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - ReplicationType.STAND_ALONE, ReplicationFactor.ONE); + STAND_ALONE, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -1618,7 +1623,7 @@ public void testUploadPartOverrideWithStandAlone() throws IOException { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - ReplicationType.STAND_ALONE, ReplicationFactor.ONE); + STAND_ALONE, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -1746,7 +1751,6 @@ public void testMultipartUpload() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); doMultipartUpload(bucket, keyName, (byte)98); - } @@ -1782,18 +1786,18 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); // Initiate multipart upload - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); // Upload Parts Map partsMap = new TreeMap<>(); // Uploading part 1 with less than min size - String partName = uploadPart(bucket, keyName, uploadID, 1, "data".getBytes( - UTF_8)); + String partName = uploadPart(bucket, keyName, uploadID, 1, + "data".getBytes(UTF_8)); partsMap.put(1, partName); - partName = uploadPart(bucket, keyName, uploadID, 2, "data".getBytes( - UTF_8)); + partName = uploadPart(bucket, keyName, uploadID, 2, + "data".getBytes(UTF_8)); partsMap.put(2, partName); @@ -1815,8 +1819,8 @@ public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); // We have not uploaded any parts, but passing some list it should throw // error. @@ -1840,8 +1844,8 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw @@ -1865,8 +1869,8 @@ public void testMultipartUploadWithMissingParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw @@ -1905,8 +1909,8 @@ public void testAbortUploadSuccessWithOutAnyParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); bucket.abortMultipartUpload(keyName, uploadID); } @@ -1921,8 +1925,8 @@ public void testAbortUploadSuccessWithParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); bucket.abortMultipartUpload(keyName, uploadID); } @@ -1939,8 +1943,8 @@ public void testListMultipartUploadParts() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); String partName1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(1, partName1); @@ -1956,7 +1960,7 @@ public void testListMultipartUploadParts() throws Exception { OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); - Assert.assertEquals(ReplicationType.STAND_ALONE, + Assert.assertEquals(STAND_ALONE, ozoneMultipartUploadPartListParts.getReplicationType()); Assert.assertEquals(3, ozoneMultipartUploadPartListParts.getPartInfoList().size()); @@ -1990,8 +1994,8 @@ public void testListMultipartUploadPartsWithContinuation() OzoneBucket bucket = volume.getBucket(bucketName); Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); String partName1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(1, partName1); @@ -2007,7 +2011,7 @@ public void testListMultipartUploadPartsWithContinuation() OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); - Assert.assertEquals(ReplicationType.STAND_ALONE, + Assert.assertEquals(STAND_ALONE, ozoneMultipartUploadPartListParts.getReplicationType()); Assert.assertEquals(2, @@ -2095,8 +2099,8 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -2108,7 +2112,7 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() Assert.assertEquals(0, ozoneMultipartUploadPartListParts.getPartInfoList().size()); - Assert.assertEquals(ReplicationType.STAND_ALONE, + Assert.assertEquals(STAND_ALONE, ozoneMultipartUploadPartListParts.getReplicationType()); // As we don't have any parts with greater than partNumberMarker and list @@ -2138,54 +2142,43 @@ public void testListPartsWithInvalidUploadID() throws Exception { public void testNativeAclsForVolume() throws Exception { String volumeName = UUID.randomUUID().toString(); store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); + OzoneObj ozObj = new OzoneObjInfo.Builder() .setVolumeName(volumeName) .setResType(OzoneObj.ResourceType.VOLUME) .setStoreType(OzoneObj.StoreType.OZONE) .build(); - // Get acls for volume. - List volAcls = store.getAcl(ozObj); - volAcls.forEach(a -> assertTrue(volume.getAcls().contains(a))); - // Remove all acl's. - for (OzoneAcl a : volAcls) { - store.removeAcl(ozObj, a); - } - List newAcls = store.getAcl(ozObj); - OzoneVolume finalVolume = store.getVolume(volumeName); - assertTrue(finalVolume.getAcls().size() == 0); - assertTrue(newAcls.size() == 0); - - // Add acl's and then call getAcl. - for (OzoneAcl a : volAcls) { - // Try removing an acl which doesn't exist, it should return false. - assertFalse(finalVolume.getAcls().contains(a)); - assertFalse(store.removeAcl(ozObj, a)); + validateOzoneAcl(ozObj); + } - assertTrue(store.addAcl(ozObj, a)); - finalVolume = store.getVolume(volumeName); - assertTrue(finalVolume.getAcls().contains(a)); + @Test + public void testNativeAclsForBucket() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); - // Call addAcl again, this time operation will fail as - // acl is already added. - assertFalse(store.addAcl(ozObj, a)); - } - assertTrue(finalVolume.getAcls().size() == volAcls.size()); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + assertNotNull("Bucket creation failed", bucket); + OzoneObj ozObj = new OzoneObjInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setResType(OzoneObj.ResourceType.BUCKET) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); - // Reset acl's. - store.setAcl(ozObj, newAcls); - finalVolume = store.getVolume(volumeName); - newAcls = store.getAcl(ozObj); - assertTrue(newAcls.size() == 0); - assertTrue(finalVolume.getAcls().size() == 0); + validateOzoneAcl(ozObj); } @Test - public void testNativeAclsForBucket() throws Exception { + public void testNativeAclsForKey() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); + String key1 = UUID.randomUUID().toString(); + String key2 = UUID.randomUUID().toString(); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); @@ -2193,48 +2186,121 @@ public void testNativeAclsForBucket() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); assertNotNull("Bucket creation failed", bucket); + writeKey(key1, bucket); + writeKey(key2, bucket); + OzoneObj ozObj = new OzoneObjInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) - .setResType(OzoneObj.ResourceType.BUCKET) + .setKeyName(key1) + .setResType(OzoneObj.ResourceType.KEY) .setStoreType(OzoneObj.StoreType.OZONE) .build(); + + validateOzoneAcl(ozObj); + } + + /** + * Helper function to get default acl list for current user. + * + * @return list of default Acls. + * @throws IOException + * */ + private List getAclList(OzoneConfiguration conf) + throws IOException { + List listOfAcls = new ArrayList<>(); + //User ACL + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); + ACLType userRights = aclConfig.getUserDefaultRights(); + ACLType groupRights = aclConfig.getGroupDefaultRights(); + + listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, + ugi.getUserName(), userRights)); + //Group ACLs of the User + List userGroups = Arrays.asList(UserGroupInformation + .createRemoteUser(ugi.getUserName()).getGroupNames()); + userGroups.stream().forEach((group) -> listOfAcls.add( + new OzoneAcl(ACLIdentityType.GROUP, group, groupRights))); + return listOfAcls; + } + + /** + * Helper function to validate ozone Acl for given object. + * @param ozObj + * */ + private void validateOzoneAcl(OzoneObj ozObj) throws IOException { // Get acls for volume. - List volAcls = store.getAcl(ozObj); - volAcls.forEach(a -> assertTrue(bucket.getAcls().contains(a))); + List expectedAcls = getAclList(new OzoneConfiguration()); + + // Case:1 Add new acl permission to existing acl. + if(expectedAcls.size()>0) { + OzoneAcl oldAcl = expectedAcls.get(0); + OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), + ACLType.READ_ACL); + // Verify that operation successful. + assertTrue(store.addAcl(ozObj, newAcl)); + List acls = store.getAcl(ozObj); + + assertTrue(acls.size() == expectedAcls.size()); + boolean aclVerified = false; + for(OzoneAcl acl: acls) { + if(acl.getName().equals(newAcl.getName())) { + assertTrue(acl.getAclList().contains(ACLType.READ_ACL)); + aclVerified = true; + } + } + assertTrue("New acl expected but not found.", aclVerified); + aclVerified = false; + + // Case:2 Remove newly added acl permission. + assertTrue(store.removeAcl(ozObj, newAcl)); + acls = store.getAcl(ozObj); + assertTrue(acls.size() == expectedAcls.size()); + for(OzoneAcl acl: acls) { + if(acl.getName().equals(newAcl.getName())) { + assertFalse(acl.getAclList().contains(ACLType.READ_ACL)); + aclVerified = true; + } + } + assertTrue("New acl expected but not found.", aclVerified); + } else { + fail("Default acl should not be empty."); + } + + List keyAcls = store.getAcl(ozObj); + expectedAcls.forEach(a -> assertTrue(keyAcls.contains(a))); // Remove all acl's. - for (OzoneAcl a : volAcls) { - assertTrue(store.removeAcl(ozObj, a)); + for (OzoneAcl a : expectedAcls) { + store.removeAcl(ozObj, a); } List newAcls = store.getAcl(ozObj); - OzoneBucket finalBuck = volume.getBucket(bucketName); - assertTrue(finalBuck.getAcls().size() == 0); assertTrue(newAcls.size() == 0); // Add acl's and then call getAcl. - for (OzoneAcl a : volAcls) { - // Try removing an acl which doesn't exist, it should return false. - assertFalse(finalBuck.getAcls().contains(a)); - assertFalse(store.removeAcl(ozObj, a)); - - // Add acl should succeed. + int aclCount = 0; + for (OzoneAcl a : expectedAcls) { + aclCount++; assertTrue(store.addAcl(ozObj, a)); - finalBuck = volume.getBucket(bucketName); - assertTrue(finalBuck.getAcls().contains(a)); - - // Call addAcl again, this time operation will return false as - // acl is already added. - assertFalse(store.addAcl(ozObj, a)); + assertTrue(store.getAcl(ozObj).size() == aclCount); } - assertTrue(finalBuck.getAcls().size() == volAcls.size()); + newAcls = store.getAcl(ozObj); + assertTrue(newAcls.size() == expectedAcls.size()); + List finalNewAcls = newAcls; + expectedAcls.forEach(a -> assertTrue(finalNewAcls.contains(a))); // Reset acl's. - store.setAcl(ozObj, newAcls); - finalBuck = volume.getBucket(bucketName); + store.setAcl(ozObj, new ArrayList<>()); newAcls = store.getAcl(ozObj); assertTrue(newAcls.size() == 0); - assertTrue(finalBuck.getAcls().size() == 0); + } + + private void writeKey(String key1, OzoneBucket bucket) throws IOException { + OzoneOutputStream out = bucket.createKey(key1, 1024, STAND_ALONE, + ONE, new HashMap<>()); + out.write(RandomStringUtils.random(1024).getBytes()); + out.close(); } private byte[] generateData(int size, byte val) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java index 529d476e615ff..08e4130486659 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java @@ -112,6 +112,7 @@ public void testWriteRead() throws Exception { String dataString = RandomStringUtils.randomAscii(3 * (int)OzoneConsts.MB); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); keyArgs.setSize(3 * (int)OzoneConsts.MB); + keyArgs.setUserName(userName); try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) { outputStream.write(dataString.getBytes()); @@ -190,6 +191,7 @@ public void testPartialRead() throws Exception { String dataString = RandomStringUtils.randomAscii(500); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); keyArgs.setSize(500); + keyArgs.setUserName(userName); try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) { outputStream.write(dataString.getBytes()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java index a1b20bd5881f9..25314db554e87 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java @@ -44,6 +44,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.ArrayList; import java.util.LinkedList; import java.util.List; @@ -116,6 +117,7 @@ public void testAllocateCommit() throws Exception { .setKeyName(keyName) .setDataSize(1000) .setRefreshPipeline(true) + .setAcls(new ArrayList<>()) .build(); // 1st update, version 0 @@ -220,6 +222,7 @@ public void testReadLatestVersion() throws Exception { String dataString = RandomStringUtils.randomAlphabetic(100); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); + keyArgs.setUserName(userName); // this write will create 1st version with one block try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { stream.write(dataString.getBytes()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java index 6d4702fa9340b..1057e7a23ba28 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java @@ -486,6 +486,7 @@ public void testDeleteNonEmptyBucket() throws Exception { String dataString = RandomStringUtils.randomAscii(100); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); keyArgs.setSize(100); + keyArgs.setUserName(userName); try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { stream.write(dataString.getBytes()); } @@ -525,6 +526,7 @@ public void testGetKeyWriterReader() throws IOException, OzoneException { String dataString = RandomStringUtils.randomAscii(100); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); keyArgs.setSize(100); + keyArgs.setUserName(userName); try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { stream.write(dataString.getBytes()); } @@ -567,6 +569,7 @@ public void testKeyOverwrite() throws IOException, OzoneException { KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); keyArgs.setSize(100); + keyArgs.setUserName(userName); String dataString = RandomStringUtils.randomAscii(100); try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { stream.write(dataString.getBytes()); @@ -577,6 +580,7 @@ public void testKeyOverwrite() throws IOException, OzoneException { // That is this overwrite only overwrites the keys on OM. We need to // garbage collect those blocks from datanode. KeyArgs keyArgs2 = new KeyArgs(volumeName, bucketName, keyName, userArgs); + keyArgs2.setUserName(userName); storageHandler.newKeyWriter(keyArgs2); Assert .assertEquals(numKeyAllocateFails, omMetrics.getNumKeyAllocateFails()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 4b6118715e7eb..400286855cea5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om; import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.Map; import org.apache.commons.lang3.RandomStringUtils; @@ -134,6 +135,7 @@ public void testSafeModeOperations() throws Exception { .setBucketName(bucketName) .setKeyName(keyName) .setDataSize(1000) + .setAcls(Collections.emptyList()) .build(); OmVolumeArgs volArgs = new OmVolumeArgs.Builder() .setAdminName(adminName) diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java index 42c7238427db3..a4aa361e39862 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java @@ -66,7 +66,9 @@ import java.io.IOException; import java.io.OutputStream; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import java.util.Objects; import java.util.concurrent.TimeUnit; /** @@ -444,6 +446,8 @@ public BucketInfo getBucketInfo(BucketArgs args) @Override public OutputStream newKeyWriter(KeyArgs args) throws IOException, OzoneException { + Objects.requireNonNull(args.getUserName(), + "Username should not be null"); OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) @@ -451,6 +455,9 @@ public OutputStream newKeyWriter(KeyArgs args) throws IOException, .setDataSize(args.getSize()) .setType(xceiverClientManager.getType()) .setFactor(xceiverClientManager.getFactor()) + .setAcls(OzoneUtils.getAclList(args.getUserName(), + args.getGroups() != null ? Arrays.asList(args.getGroups()) : null, + ACLType.ALL, ACLType.ALL)) .build(); // contact OM to allocate a block for key. OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 18cc266a11617..ea8f5f052171f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.om; import java.io.IOException; +import java.util.ArrayList; +import java.util.BitSet; import java.util.List; import java.util.Objects; @@ -40,6 +42,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; /** @@ -404,27 +407,44 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { throw new OMException("Bucket " + bucket + " is not found", BUCKET_NOT_FOUND); } - List list = bucketInfo.getAcls(); - if(!validateAddAcl(acl, list)) { - // New acl can't be added as it is not consistent with existing ACLs. - LOG.info("New acl:{} can't be added as it is not consistent with " + - "existing ACLs:{}.", acl, StringUtils.join(",", list)); - return false; + + // Case 1: When we are adding more rights to existing user/group. + boolean addToExistingAcl = false; + for(OzoneAcl a: bucketInfo.getAcls()) { + if(a.getName().equals(acl.getName()) && + a.getType().equals(acl.getType())) { + BitSet bits = (BitSet) acl.getAclBitSet().clone(); + bits.or(a.getAclBitSet()); + + if (bits.equals(a.getAclBitSet())) { + return false; + } + a.getAclBitSet().or(acl.getAclBitSet()); + addToExistingAcl = true; + break; + } } - list.add(acl); - OmBucketInfo updatedBucket = OmBucketInfo.newBuilder() - .setVolumeName(bucketInfo.getVolumeName()) - .setBucketName(bucketInfo.getBucketName()) - .setStorageType(bucketInfo.getStorageType()) - .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) - .setCreationTime(bucketInfo.getCreationTime()) - .setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo()) - .addAllMetadata(bucketInfo.getMetadata()) - .setAcls(list) - .build(); - // TODO:HDDS-1619 OM HA changes required for all acl operations. - metadataManager.getBucketTable().put(dbBucketKey, updatedBucket); + // Case 2: When a completely new acl is added. + if(!addToExistingAcl) { + List newAcls = bucketInfo.getAcls(); + if(newAcls == null) { + newAcls = new ArrayList<>(); + } + newAcls.add(acl); + bucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(bucketInfo.getVolumeName()) + .setBucketName(bucketInfo.getBucketName()) + .setStorageType(bucketInfo.getStorageType()) + .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) + .setCreationTime(bucketInfo.getCreationTime()) + .setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo()) + .addAllMetadata(bucketInfo.getMetadata()) + .setAcls(newAcls) + .build(); + } + + metadataManager.getBucketTable().put(dbBucketKey, bucketInfo); } catch (IOException ex) { if (!(ex instanceof OMException)) { LOG.error("Add acl operation failed for bucket:{}/{} acl:{}", @@ -466,26 +486,31 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { throw new OMException("Bucket " + bucket + " is not found", BUCKET_NOT_FOUND); } - List list = bucketInfo.getAcls(); - if (!list.contains(acl)) { - // Return false if acl doesn't exist in current ACLs. - LOG.info("Acl:{} not found in existing ACLs:{}.", acl, - StringUtils.join(",", list)); - return false; + + // When we are removing subset of rights from existing acl. + for(OzoneAcl a: bucketInfo.getAcls()) { + if(a.getName().equals(acl.getName()) && + a.getType().equals(acl.getType())) { + BitSet bits = (BitSet) acl.getAclBitSet().clone(); + bits.and(a.getAclBitSet()); + + if (bits.equals(ZERO_BITSET)) { + return false; + } + bits = (BitSet) acl.getAclBitSet().clone(); + bits.and(a.getAclBitSet()); + a.getAclBitSet().xor(bits); + + if(a.getAclBitSet().equals(ZERO_BITSET)) { + bucketInfo.getAcls().remove(a); + } + break; + } else { + return false; + } } - list.remove(acl); - OmBucketInfo updatedBucket = OmBucketInfo.newBuilder() - .setVolumeName(bucketInfo.getVolumeName()) - .setBucketName(bucketInfo.getBucketName()) - .setStorageType(bucketInfo.getStorageType()) - .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) - .setCreationTime(bucketInfo.getCreationTime()) - .setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo()) - .addAllMetadata(bucketInfo.getMetadata()) - .setAcls(list) - .build(); - metadataManager.getBucketTable().put(dbBucketKey, updatedBucket); + metadataManager.getBucketTable().put(dbBucketKey, bucketInfo); } catch (IOException ex) { if (!(ex instanceof OMException)) { LOG.error("Remove acl operation failed for bucket:{}/{} acl:{}", @@ -552,23 +577,6 @@ public boolean setAcl(OzoneObj obj, List acls) throws IOException { return true; } - /** - * Validates if a new acl addition is consistent with current ACL list. - * @param newAcl new acl to be added. - * @param currentAcls list of acls. - * - * @return true if newAcl addition to existing acls is valid, else false. - * */ - private boolean validateAddAcl(OzoneAcl newAcl, List currentAcls) { - - // Check 1: Check for duplicate. - if(currentAcls.contains(newAcl)) { - return false; - } - - return true; - } - /** * Returns list of ACLs for given Ozone object. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index 0006e93fa9ff7..51c0cfae631b3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -18,6 +18,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -35,6 +36,7 @@ .KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .KeyLocation; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.utils.BackgroundService; import java.io.IOException; @@ -285,4 +287,42 @@ OmMultipartUploadListParts listParts(String volumeName, String bucketName, String keyName, String uploadID, int partNumberMarker, int maxParts) throws IOException; + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * + * @throws IOException if there is error. + * */ + boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException; + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * + * @throws IOException if there is error. + * */ + boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException; + + /** + * Acls to be set for given Ozone object. This operations reset ACL for + * given object to list of ACLs provided in argument. + * @param obj Ozone object. + * @param acls List of acls. + * + * @throws IOException if there is error. + * */ + boolean setAcl(OzoneObj obj, List acls) throws IOException; + + /** + * Returns list of ACLs for given Ozone object. + * @param obj Ozone object. + * + * @throws IOException if there is error. + * */ + List getAcl(OzoneObj obj) throws IOException; + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 9a915d56a05f4..b7b4b12a2e6d1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -26,11 +26,13 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.TimeUnit; import java.security.GeneralSecurityException; import java.security.PrivilegedExceptionAction; +import java.util.stream.Collectors; import com.google.common.base.Strings; import org.apache.commons.codec.digest.DigestUtils; @@ -49,6 +51,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; @@ -74,7 +77,11 @@ .KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .KeyLocation; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights; import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.ozone.common.BlockGroup; @@ -108,6 +115,10 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; import static org.apache.hadoop.util.Time.monotonicNow; import org.slf4j.Logger; @@ -218,14 +229,14 @@ private void validateBucket(String volumeName, String bucketName) if (metadataManager.getVolumeTable().get(volumeKey) == null) { LOG.error("volume not found: {}", volumeName); throw new OMException("Volume not found", - OMException.ResultCodes.VOLUME_NOT_FOUND); + VOLUME_NOT_FOUND); } // if the volume exists but bucket does not exist, throw bucket not found // exception LOG.error("bucket not found: {}/{} ", volumeName, bucketName); throw new OMException("Bucket not found", - OMException.ResultCodes.BUCKET_NOT_FOUND); + BUCKET_NOT_FOUND); } } @@ -243,7 +254,7 @@ private void validateS3Bucket(String volumeName, String bucketName) if (metadataManager.getBucketTable().get(bucketKey) == null) { LOG.error("bucket not found: {}/{} ", volumeName, bucketName); throw new OMException("Bucket not found", - ResultCodes.BUCKET_NOT_FOUND); + BUCKET_NOT_FOUND); } } @@ -266,7 +277,7 @@ public OmKeyLocationInfo addAllocatedBlock(OmKeyArgs args, long clientID, LOG.error("Allocate block for a key not in open status in meta store" + " /{}/{}/{} with ID {}", volumeName, bucketName, keyName, clientID); throw new OMException("Open Key not found", - OMException.ResultCodes.KEY_NOT_FOUND); + KEY_NOT_FOUND); } OmKeyLocationInfo omKeyLocationInfo = @@ -295,7 +306,7 @@ public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, LOG.error("Allocate block for a key not in open status in meta store" + " /{}/{}/{} with ID {}", volumeName, bucketName, keyName, clientID); throw new OMException("Open Key not found", - OMException.ResultCodes.KEY_NOT_FOUND); + KEY_NOT_FOUND); } // current version not committed, so new blocks coming now are added to @@ -402,6 +413,9 @@ public EncryptedKeyVersion run() throws IOException { @Override public OpenKeySession openKey(OmKeyArgs args) throws IOException { Preconditions.checkNotNull(args); + Preconditions.checkNotNull(args.getAcls(), "Default acls " + + "should be set."); + String volumeName = args.getVolumeName(); String bucketName = args.getBucketName(); String keyName = args.getKeyName(); @@ -582,7 +596,7 @@ private OmKeyInfo createKeyInfo(OmKeyArgs keyArgs, ReplicationFactor factor, ReplicationType type, long size, FileEncryptionInfo encInfo) { - return new OmKeyInfo.Builder() + OmKeyInfo.Builder builder = new OmKeyInfo.Builder() .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) @@ -593,8 +607,12 @@ private OmKeyInfo createKeyInfo(OmKeyArgs keyArgs, .setDataSize(size) .setReplicationType(type) .setReplicationFactor(factor) - .setFileEncryptionInfo(encInfo) - .build(); + .setFileEncryptionInfo(encInfo); + if(keyArgs.getAcls() != null) { + builder.setAcls(keyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } + return builder.build(); } @Override @@ -615,7 +633,7 @@ public void commitKey(OmKeyArgs args, long clientID) throws IOException { OmKeyInfo keyInfo = metadataManager.getOpenKeyTable().get(openKey); if (keyInfo == null) { throw new OMException("Commit a key without corresponding entry " + - objectKey, ResultCodes.KEY_NOT_FOUND); + objectKey, KEY_NOT_FOUND); } keyInfo.setDataSize(args.getDataSize()); keyInfo.setModificationTime(Time.now()); @@ -655,7 +673,7 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { LOG.debug("volume:{} bucket:{} Key:{} not found", volumeName, bucketName, keyName); throw new OMException("Key not found", - OMException.ResultCodes.KEY_NOT_FOUND); + KEY_NOT_FOUND); } if (grpcBlockTokenEnabled) { String remoteUser = getRemoteUser().getShortUserName(); @@ -700,7 +718,7 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { LOG.debug("Get key failed for volume:{} bucket:{} key:{}", volumeName, bucketName, keyName, ex); throw new OMException(ex.getMessage(), - OMException.ResultCodes.KEY_NOT_FOUND); + KEY_NOT_FOUND); } finally { metadataManager.getLock().releaseBucketLock(volumeName, bucketName); } @@ -733,7 +751,7 @@ public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { + "Key: {} not found.", volumeName, bucketName, fromKeyName, toKeyName, fromKeyName); throw new OMException("Key not found", - OMException.ResultCodes.KEY_NOT_FOUND); + KEY_NOT_FOUND); } // A rename is a no-op if the target and source name is same. @@ -790,7 +808,7 @@ public void deleteKey(OmKeyArgs args) throws IOException { OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); if (keyInfo == null) { throw new OMException("Key not found", - OMException.ResultCodes.KEY_NOT_FOUND); + KEY_NOT_FOUND); } else { // directly delete key with no blocks from db. This key need not be // moved to deleted table. @@ -922,6 +940,8 @@ public OmMultipartInfo applyInitiateMultipartUpload(OmKeyArgs keyArgs, .setReplicationFactor(keyArgs.getFactor()) .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, locations))) + .setAcls(keyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .build(); DBStore store = metadataManager.getStore(); try (BatchOperation batch = store.initBatchOperation()) { @@ -1155,13 +1175,13 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( .setDataSize(size) .setOmKeyLocationInfos( Collections.singletonList(keyLocationInfoGroup)) - .build(); + .setAcls(omKeyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())).build(); } else { // Already a version exists, so we should add it as a new version. // But now as versioning is not supported, just following the commit - // key approach. - // When versioning support comes, then we can uncomment below code - // keyInfo.addNewVersion(locations); + // key approach. When versioning support comes, then we can uncomment + // below code keyInfo.addNewVersion(locations); keyInfo.updateLocationInfoList(locations); } DBStore store = metadataManager.getStore(); @@ -1330,6 +1350,305 @@ public OmMultipartUploadListParts listParts(String volumeName, } } + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * @throws IOException if there is error. + */ + @Override + public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + validateOzoneObj(obj); + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + String keyName = obj.getKeyName(); + + metadataManager.getLock().acquireBucketLock(volume, bucket); + try { + validateBucket(volume, bucket); + String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); + OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); + Table keyTable; + if (keyInfo == null) { + keyInfo = metadataManager.getOpenKeyTable().get(objectKey); + if (keyInfo == null) { + throw new OMException("Key not found. Key:" + + objectKey, KEY_NOT_FOUND); + } + keyTable = metadataManager.getOpenKeyTable(); + } else { + keyTable = metadataManager.getKeyTable(); + } + List newAcls = new ArrayList<>(keyInfo.getAcls()); + OzoneAclInfo newAcl = null; + for(OzoneAclInfo a: keyInfo.getAcls()) { + if(a.getName().equals(acl.getName())) { + List rights = + new ArrayList<>(a.getRightsList()); + for (IAccessAuthorizer.ACLType aclType : acl.getAclList()) { + rights.add(OzoneAclRights.valueOf(aclType.name())); + } + newAcl = OzoneAclInfo.newBuilder() + .setType(a.getType()) + .setName(a.getName()) + .addAllRights(rights) + .build(); + newAcls.remove(a); + newAcls.add(newAcl); + break; + } + } + if(newAcl == null) { + newAcls.add(OzoneAcl.toProtobuf(acl)); + } + + OmKeyInfo newObj = new OmKeyInfo.Builder() + .setBucketName(keyInfo.getBucketName()) + .setKeyName(keyInfo.getKeyName()) + .setReplicationFactor(keyInfo.getFactor()) + .setReplicationType(keyInfo.getType()) + .setVolumeName(keyInfo.getVolumeName()) + .setOmKeyLocationInfos(keyInfo.getKeyLocationVersions()) + .setCreationTime(keyInfo.getCreationTime()) + .setModificationTime(keyInfo.getModificationTime()) + .setAcls(newAcls) + .setDataSize(keyInfo.getDataSize()) + .setFileEncryptionInfo(keyInfo.getFileEncryptionInfo()) + .build(); + keyTable.put(objectKey, newObj); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Add acl operation failed for key:{}/{}/{}", volume, + bucket, keyName, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseBucketLock(volume, bucket); + } + return true; + } + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * @throws IOException if there is error. + */ + @Override + public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + validateOzoneObj(obj); + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + String keyName = obj.getKeyName(); + + metadataManager.getLock().acquireBucketLock(volume, bucket); + try { + validateBucket(volume, bucket); + String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); + OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); + Table keyTable; + if (keyInfo == null) { + keyInfo = metadataManager.getOpenKeyTable().get(objectKey); + if (keyInfo == null) { + throw new OMException("Key not found. Key:" + + objectKey, KEY_NOT_FOUND); + } + keyTable = metadataManager.getOpenKeyTable(); + } else { + keyTable = metadataManager.getKeyTable(); + } + + List newAcls = new ArrayList<>(keyInfo.getAcls()); + OzoneAclInfo newAcl = OzoneAcl.toProtobuf(acl); + + if(newAcls.contains(OzoneAcl.toProtobuf(acl))) { + newAcls.remove(newAcl); + } else { + // Acl to be removed might be a subset of existing acls. + for(OzoneAclInfo a: keyInfo.getAcls()) { + if(a.getName().equals(acl.getName())) { + List rights = + new ArrayList<>(a.getRightsList()); + for (IAccessAuthorizer.ACLType aclType : acl.getAclList()) { + rights.remove(OzoneAclRights.valueOf(aclType.name())); + } + newAcl = OzoneAclInfo.newBuilder() + .setType(a.getType()) + .setName(a.getName()) + .addAllRights(rights) + .build(); + newAcls.remove(a); + newAcls.add(newAcl); + break; + } + } + if(newAcl == null) { + newAcls.add(OzoneAcl.toProtobuf(acl)); + } + } + + OmKeyInfo newObj = new OmKeyInfo.Builder() + .setBucketName(keyInfo.getBucketName()) + .setKeyName(keyInfo.getKeyName()) + .setReplicationFactor(keyInfo.getFactor()) + .setReplicationType(keyInfo.getType()) + .setVolumeName(keyInfo.getVolumeName()) + .setOmKeyLocationInfos(keyInfo.getKeyLocationVersions()) + .setCreationTime(keyInfo.getCreationTime()) + .setModificationTime(keyInfo.getModificationTime()) + .setAcls(newAcls) + .setDataSize(keyInfo.getDataSize()) + .setFileEncryptionInfo(keyInfo.getFileEncryptionInfo()) + .build(); + + keyTable.put(objectKey, newObj); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Remove acl operation failed for key:{}/{}/{}", volume, + bucket, keyName, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseBucketLock(volume, bucket); + } + return true; + } + + /** + * Acls to be set for given Ozone object. This operations reset ACL for given + * object to list of ACLs provided in argument. + * + * @param obj Ozone object. + * @param acls List of acls. + * @throws IOException if there is error. + */ + @Override + public boolean setAcl(OzoneObj obj, List acls) throws IOException { + validateOzoneObj(obj); + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + String keyName = obj.getKeyName(); + + metadataManager.getLock().acquireBucketLock(volume, bucket); + try { + validateBucket(volume, bucket); + String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); + OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); + Table keyTable; + if (keyInfo == null) { + keyInfo = metadataManager.getOpenKeyTable().get(objectKey); + if (keyInfo == null) { + throw new OMException("Key not found. Key:" + + objectKey, KEY_NOT_FOUND); + } + keyTable = metadataManager.getOpenKeyTable(); + } else { + keyTable = metadataManager.getKeyTable(); + } + + List newAcls = new ArrayList<>(); + for (OzoneAcl a : acls) { + newAcls.add(OzoneAcl.toProtobuf(a)); + } + OmKeyInfo newObj = new OmKeyInfo.Builder() + .setBucketName(keyInfo.getBucketName()) + .setKeyName(keyInfo.getKeyName()) + .setReplicationFactor(keyInfo.getFactor()) + .setReplicationType(keyInfo.getType()) + .setVolumeName(keyInfo.getVolumeName()) + .setOmKeyLocationInfos(keyInfo.getKeyLocationVersions()) + .setCreationTime(keyInfo.getCreationTime()) + .setModificationTime(keyInfo.getModificationTime()) + .setAcls(newAcls) + .setDataSize(keyInfo.getDataSize()) + .setFileEncryptionInfo(keyInfo.getFileEncryptionInfo()) + .build(); + + keyTable.put(objectKey, newObj); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Set acl operation failed for key:{}/{}/{}", volume, + bucket, keyName, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseBucketLock(volume, bucket); + } + return true; + } + + /** + * Returns list of ACLs for given Ozone object. + * + * @param obj Ozone object. + * @throws IOException if there is error. + */ + @Override + public List getAcl(OzoneObj obj) throws IOException { + validateOzoneObj(obj); + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + String keyName = obj.getKeyName(); + + metadataManager.getLock().acquireBucketLock(volume, bucket); + try { + validateBucket(volume, bucket); + String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); + OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); + if (keyInfo == null) { + keyInfo = metadataManager.getOpenKeyTable().get(objectKey); + if (keyInfo == null) { + throw new OMException("Key not found. Key:" + + objectKey, KEY_NOT_FOUND); + } + } + + List acls = new ArrayList<>(); + for (OzoneAclInfo a : keyInfo.getAcls()) { + acls.add(OzoneAcl.fromProtobuf(a)); + } + return acls; + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Get acl operation failed for key:{}/{}/{}", volume, + bucket, keyName, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseBucketLock(volume, bucket); + } + } + + /** + * Helper method to validate ozone object. + * @param obj + * */ + private void validateOzoneObj(OzoneObj obj) throws OMException { + Objects.requireNonNull(obj); + + if (!obj.getResourceType().equals(KEY)) { + throw new IllegalArgumentException("Unexpected argument passed to " + + "KeyManager. OzoneObj type:" + obj.getResourceType()); + } + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + String keyName = obj.getKeyName(); + + if (Strings.isNullOrEmpty(volume)) { + throw new OMException("Volume name is required.", VOLUME_NOT_FOUND); + } + if (Strings.isNullOrEmpty(bucket)) { + throw new OMException("Bucket name is required.", BUCKET_NOT_FOUND); + } + if (Strings.isNullOrEmpty(keyName)) { + throw new OMException("Key name is required.", KEY_NOT_FOUND); + } + } + /** * OzoneFS api to get file status for an entry. * @@ -1420,7 +1739,7 @@ public void createDirectory(OmKeyArgs args) throws IOException { return; } OmKeyInfo dirDbKeyInfo = - createDirectoryKey(volumeName, bucketName, keyName); + createDirectoryKey(volumeName, bucketName, keyName, args.getAcls()); String dirDbKey = metadataManager .getOzoneKey(volumeName, bucketName, dirDbKeyInfo.getKeyName()); metadataManager.getKeyTable().put(dirDbKey, dirDbKeyInfo); @@ -1430,7 +1749,7 @@ public void createDirectory(OmKeyArgs args) throws IOException { } private OmKeyInfo createDirectoryKey(String volumeName, String bucketName, - String keyName) throws IOException { + String keyName, List acls) throws IOException { // verify bucket exists OmBucketInfo bucketInfo = getBucketInfo(volumeName, bucketName); @@ -1448,6 +1767,8 @@ private OmKeyInfo createDirectoryKey(String volumeName, String bucketName, .setReplicationType(ReplicationType.RATIS) .setReplicationFactor(ReplicationFactor.ONE) .setFileEncryptionInfo(encInfo) + .setAcls(acls.stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 02412786578ba..8ea8e2ca4b8d6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -2971,9 +2971,10 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { switch (obj.getResourceType()) { case VOLUME: return volumeManager.addAcl(obj, acl); - case BUCKET: return bucketManager.addAcl(obj, acl); + case KEY: + return keyManager.addAcl(obj, acl); default: throw new OMException("Unexpected resource type: " + obj.getResourceType(), INVALID_REQUEST); @@ -3001,6 +3002,8 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { case BUCKET: return bucketManager.removeAcl(obj, acl); + case KEY: + return keyManager.removeAcl(obj, acl); default: throw new OMException("Unexpected resource type: " + obj.getResourceType(), INVALID_REQUEST); @@ -3025,9 +3028,10 @@ public boolean setAcl(OzoneObj obj, List acls) throws IOException { switch (obj.getResourceType()) { case VOLUME: return volumeManager.setAcl(obj, acls); - case BUCKET: return bucketManager.setAcl(obj, acls); + case KEY: + return keyManager.setAcl(obj, acls); default: throw new OMException("Unexpected resource type: " + obj.getResourceType(), INVALID_REQUEST); @@ -3050,9 +3054,10 @@ public List getAcl(OzoneObj obj) throws IOException { switch (obj.getResourceType()) { case VOLUME: return volumeManager.getAcl(obj); - case BUCKET: return bucketManager.getAcl(obj); + case KEY: + return keyManager.getAcl(obj); default: throw new OMException("Unexpected resource type: " + obj.getResourceType(), INVALID_REQUEST); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java index 9519f770c662c..6ff289a81bbe9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java @@ -542,7 +542,7 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { try { volumeArgs.addAcl(acl); } catch (OMException ex) { - LOG.info("Add acl failed.", ex); + LOG.debug("Add acl failed.", ex); return false; } metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); @@ -592,7 +592,7 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { try { volumeArgs.removeAcl(acl); } catch (OMException ex) { - LOG.info("Remove acl failed.", ex); + LOG.debug("Remove acl failed.", ex); return false; } metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index d78cc66e37aa2..58ab181402dc9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -170,6 +170,8 @@ private OMResponse processReply(OMRequest omRequest, RaftClientReply reply) omResponse.setMessage(stateMachineException.getCause().getMessage()); omResponse.setStatus(parseErrorStatus( stateMachineException.getCause().getMessage())); + LOG.debug("Error while executing ratis request. " + + "stateMachineException: ", stateMachineException); return omResponse.build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index b82265024a3c7..568262f55548b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -576,6 +576,8 @@ private CreateKeyResponse createKey(CreateKeyRequest request) .setIsMultipartKey(keyArgs.getIsMultipartKey()) .setMultipartUploadID(keyArgs.getMultipartUploadID()) .setMultipartUploadPartNumber(keyArgs.getMultipartNumber()) + .setAcls(keyArgs.getAclsList().stream().map(a -> + OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) .build(); if (keyArgs.hasDataSize()) { omKeyArgs.setDataSize(keyArgs.getDataSize()); @@ -825,6 +827,8 @@ private MultipartInfoInitiateResponse initiateMultiPartUpload( .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .setType(keyArgs.getType()) + .setAcls(keyArgs.getAclsList().stream().map(a -> + OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) .setFactor(keyArgs.getFactor()) .build(); OmMultipartInfo multipartInfo = impl.initiateMultipartUpload(omKeyArgs); @@ -847,6 +851,8 @@ private MultipartInfoInitiateResponse applyInitiateMultiPartUpload( .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .setType(keyArgs.getType()) + .setAcls(keyArgs.getAclsList().stream().map(a -> + OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) .setFactor(keyArgs.getFactor()) .build(); OmMultipartInfo multipartInfo = @@ -905,6 +911,8 @@ private MultipartUploadCompleteResponse completeMultipartUpload( .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) + .setAcls(keyArgs.getAclsList().stream().map(a -> + OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) .setMultipartUploadID(keyArgs.getMultipartUploadID()) .build(); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = impl @@ -1050,6 +1058,8 @@ private void createDirectory(CreateDirectoryRequest request) .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) + .setAcls(keyArgs.getAclsList().stream().map(a -> + OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) .build(); impl.createDirectory(omKeyArgs); } @@ -1064,6 +1074,8 @@ private CreateFileResponse createFile( .setDataSize(keyArgs.getDataSize()) .setType(keyArgs.getType()) .setFactor(keyArgs.getFactor()) + .setAcls(keyArgs.getAclsList().stream().map(a -> + OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) .build(); OpenKeySession keySession = impl.createFile(omKeyArgs, request.getIsOverwrite(), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java index ab846b85f8a59..357feeb7b1add 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java @@ -22,6 +22,7 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -192,6 +193,7 @@ private void createAndDeleteKeys(KeyManager keyManager, int keyCount, .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) + .setAcls(Collections.emptyList()) .setLocationInfoList(new ArrayList<>()) .build(); //Open, Commit and Delete the Keys in the Key Manager. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index ad2b2b196e3f9..fb323fe0c63e3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -49,6 +49,8 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; @@ -60,6 +62,7 @@ import org.mockito.Mockito; import static org.apache.hadoop.ozone.OzoneConfigKeys.*; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; /** * Test class for @{@link KeyManagerImpl}. @@ -173,11 +176,14 @@ public void allocateBlockFailureInSafeMode() throws Exception { @Test public void openKeyFailureInSafeMode() throws Exception { + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); KeyManager keyManager1 = new KeyManagerImpl(mockScmBlockLocationProtocol, metadataManager, conf, "om1", null); OmKeyArgs keyArgs = createBuilder() .setKeyName(KEY_NAME) .setDataSize(1000) + .setAcls(OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(), + ALL, ALL)) .build(); LambdaTestUtils.intercept(OMException.class, "SafeModePrecheck failed for allocateBlock", () -> { @@ -355,7 +361,7 @@ public void testLookupFile() throws IOException { } } - private OmKeyArgs createKeyArgs(String toKeyName) { + private OmKeyArgs createKeyArgs(String toKeyName) throws IOException { return createBuilder().setKeyName(toKeyName).build(); } @@ -542,12 +548,15 @@ private List createFiles(String parent, return keyNames; } - private OmKeyArgs.Builder createBuilder() { + private OmKeyArgs.Builder createBuilder() throws IOException { + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); return new OmKeyArgs.Builder() .setBucketName(BUCKET_NAME) .setFactor(ReplicationFactor.ONE) .setDataSize(0) .setType(ReplicationType.STAND_ALONE) + .setAcls(OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(), + ALL, ALL)) .setVolumeName(VOLUME_NAME); } } \ No newline at end of file From 294695dd57cb75f2756a31a54264bdd37b32bb01 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Wed, 5 Jun 2019 18:52:39 -0400 Subject: [PATCH 0124/1308] HADOOP-16314. Make sure all web end points are covered by the same authentication filter. Contributed by Prabhu Joseph --- .../org/apache/hadoop/http/HttpServer2.java | 48 ++-- .../org/apache/hadoop/http/WebServlet.java | 59 +++++ .../src/site/markdown/HttpAuthentication.md | 4 +- .../apache/hadoop/http/TestGlobalFilter.java | 4 +- .../hadoop/http/TestHttpServerWithSpnego.java | 238 ++++++++++++++++++ .../apache/hadoop/http/TestPathFilter.java | 2 - .../apache/hadoop/http/TestServletFilter.java | 1 - .../org/apache/hadoop/log/TestLogLevel.java | 9 + .../server/namenode/NameNodeHttpServer.java | 12 - ...tDFSInotifyEventInputStreamKerberized.java | 9 + .../hdfs/qjournal/TestSecureNNWithQJM.java | 8 + .../hadoop/hdfs/web/TestWebHdfsTokens.java | 8 + .../TestWebHdfsWithAuthenticationFilter.java | 18 +- .../apache/hadoop/yarn/webapp/Dispatcher.java | 9 + .../util/timeline/TimelineServerUtils.java | 10 +- .../resourcemanager/webapp/RMWebAppUtil.java | 4 + .../reader/TimelineReaderServer.java | 13 +- .../webproxy/amfilter/TestSecureAmFilter.java | 10 +- 18 files changed, 412 insertions(+), 54 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/WebServlet.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index fb2dff5d02d05..7825e08dac545 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -27,6 +27,7 @@ import java.net.MalformedURLException; import java.net.URI; import java.net.URL; +import java.util.Arrays; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; @@ -66,6 +67,8 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer; +import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; import org.apache.hadoop.security.authentication.util.SignerSecretProvider; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.ssl.SSLFactory; @@ -90,7 +93,6 @@ import org.eclipse.jetty.server.handler.RequestLogHandler; import org.eclipse.jetty.server.session.AbstractSessionManager; import org.eclipse.jetty.server.session.SessionHandler; -import org.eclipse.jetty.servlet.DefaultServlet; import org.eclipse.jetty.servlet.FilterHolder; import org.eclipse.jetty.servlet.FilterMapping; import org.eclipse.jetty.servlet.ServletContextHandler; @@ -155,7 +157,7 @@ public final class HttpServer2 implements FilterContainer { // gets stored. public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf"; public static final String ADMINS_ACL = "admins.acl"; - public static final String SPNEGO_FILTER = "SpnegoFilter"; + public static final String SPNEGO_FILTER = "authentication"; public static final String NO_CACHE_FILTER = "NoCacheFilter"; public static final String BIND_ADDRESS = "bind.address"; @@ -433,7 +435,9 @@ public HttpServer2 build() throws IOException { HttpServer2 server = new HttpServer2(this); - if (this.securityEnabled) { + if (this.securityEnabled && + !this.conf.get(authFilterConfigurationPrefix + "type"). + equals(PseudoAuthenticationHandler.TYPE)) { server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey); } @@ -608,13 +612,6 @@ private void initializeWebServer(String name, String hostName, } addDefaultServlets(); - - if (pathSpecs != null) { - for (String path : pathSpecs) { - LOG.info("adding path spec: " + path); - addFilterPathMapping(path, webAppContext); - } - } } private void addListener(ServerConnector connector) { @@ -625,7 +622,7 @@ private static WebAppContext createWebAppContext(Builder b, AccessControlList adminsAcl, final String appDir) { WebAppContext ctx = new WebAppContext(); ctx.setDefaultsDescriptor(null); - ServletHolder holder = new ServletHolder(new DefaultServlet()); + ServletHolder holder = new ServletHolder(new WebServlet()); Map params = ImmutableMap. builder() .put("acceptRanges", "true") .put("dirAllowed", "false") @@ -684,10 +681,16 @@ private static FilterInitializer[] getFilterInitializers(Configuration conf) { return null; } - FilterInitializer[] initializers = new FilterInitializer[classes.length]; - for(int i = 0; i < classes.length; i++) { + List> classList = new ArrayList<>(Arrays.asList(classes)); + if (classList.contains(AuthenticationFilterInitializer.class) && + classList.contains(ProxyUserAuthenticationFilterInitializer.class)) { + classList.remove(AuthenticationFilterInitializer.class); + } + + FilterInitializer[] initializers = new FilterInitializer[classList.size()]; + for(int i = 0; i < classList.size(); i++) { initializers[i] = (FilterInitializer)ReflectionUtils.newInstance( - classes[i], conf); + classList.get(i), conf); } return initializers; } @@ -735,7 +738,7 @@ protected void addDefaultApps(ContextHandlerCollection parent, ServletContextHandler staticContext = new ServletContextHandler(parent, "/static"); staticContext.setResourceBase(appDir + "/static"); - staticContext.addServlet(DefaultServlet.class, "/*"); + staticContext.addServlet(WebServlet.class, "/*"); staticContext.setDisplayName("static"); @SuppressWarnings("unchecked") Map params = staticContext.getInitParams(); @@ -812,7 +815,6 @@ public void addJerseyResourcePackage(final String packageName, public void addServlet(String name, String pathSpec, Class clazz) { addInternalServlet(name, pathSpec, clazz, false); - addFilterPathMapping(pathSpec, webAppContext); } /** @@ -869,16 +871,6 @@ public void addInternalServlet(String name, String pathSpec, } } webAppContext.addServlet(holder, pathSpec); - - if(requireAuth && UserGroupInformation.isSecurityEnabled()) { - LOG.info("Adding Kerberos (SPNEGO) filter to " + name); - ServletHandler handler = webAppContext.getServletHandler(); - FilterMapping fmap = new FilterMapping(); - fmap.setPathSpec(pathSpec); - fmap.setFilterName(SPNEGO_FILTER); - fmap.setDispatches(FilterMapping.ALL); - handler.addFilterMapping(fmap); - } } /** @@ -945,8 +937,8 @@ public void addFilter(String name, String classname, Map parameters) { FilterHolder filterHolder = getFilterHolder(name, classname, parameters); - final String[] USER_FACING_URLS = { "*.html", "*.jsp" }; - FilterMapping fmap = getFilterMapping(name, USER_FACING_URLS); + final String[] userFacingUrls = {"/", "/*" }; + FilterMapping fmap = getFilterMapping(name, userFacingUrls); defineFilter(webAppContext, filterHolder, fmap); LOG.info( "Added filter " + name + " (class=" + classname + ") to context " diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/WebServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/WebServlet.java new file mode 100644 index 0000000000000..2eb6c2beb16a6 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/WebServlet.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.http; + +import java.io.IOException; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import org.eclipse.jetty.servlet.DefaultServlet; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Hadoop DefaultServlet for serving static web content. + */ +public class WebServlet extends DefaultServlet { + private static final long serialVersionUID = 3910031415927L; + public static final Logger LOG = LoggerFactory.getLogger(WebServlet.class); + + /** + * Get method is modified to support impersonation and Kerberos + * SPNEGO token by forcing client side redirect when accessing + * "/" (root) of the web application context. + */ + @Override + protected void doGet(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + if (request.getRequestURI().equals("/")) { + StringBuilder location = new StringBuilder(); + location.append("index.html"); + if (request.getQueryString()!=null) { + // echo query string but prevent HTTP response splitting + location.append("?"); + location.append(request.getQueryString() + .replaceAll("\n", "").replaceAll("\r", "")); + } + response.sendRedirect(location.toString()); + } else { + super.doGet(request, response); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md index 97d12fb30168b..ca5ce4898aa71 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md @@ -71,4 +71,6 @@ Trusted Proxy Trusted Proxy adds support to perform operations using end user instead of proxy user. It fetches the end user from doAs query parameter. To enable Trusted Proxy, please set the following configuration parameter: -Add org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer to hadoop.http.filter.initializers at the end in core-site.xml. +Add org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer to hadoop.http.filter.initializers in core-site.xml +instead of org.apache.hadoop.security.AuthenticationFilterInitializer. + diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java index 70361752633c3..ade383883f10e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java @@ -142,6 +142,8 @@ public void testServletFilter() throws Exception { for(int i = 0; i < urls.length; i++) { assertTrue(RECORDS.remove(urls[i])); } - assertTrue(RECORDS.isEmpty()); + assertTrue(RECORDS.size()==1); + // Accesing "/" will redirect to /index.html + assertTrue(RECORDS.contains("/index.html")); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java new file mode 100644 index 0000000000000..ea7c8cd4e6864 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java @@ -0,0 +1,238 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.http; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.KerberosTestUtils; +import org.apache.hadoop.security.authentication.client.AuthenticatedURL; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authentication.server.AuthenticationToken; +import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer; +import org.apache.hadoop.security.authentication.util.Signer; +import org.apache.hadoop.security.authentication.util.SignerSecretProvider; +import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.Assert; + +import java.io.File; +import java.io.FileWriter; +import java.io.Writer; +import java.net.HttpURLConnection; +import java.net.URI; +import java.net.URL; +import java.util.Properties; +import static org.junit.Assert.assertTrue; + +/** + * This class is tested for http server with SPNEGO authentication. + */ +public class TestHttpServerWithSpnego { + + static final Log LOG = LogFactory.getLog(TestHttpServerWithSpnego.class); + + private static final String SECRET_STR = "secret"; + private static final String HTTP_USER = "HTTP"; + private static final String PREFIX = "hadoop.http.authentication."; + private static final long TIMEOUT = 20000; + + private static File httpSpnegoKeytabFile = new File( + KerberosTestUtils.getKeytabFile()); + private static String httpSpnegoPrincipal = + KerberosTestUtils.getServerPrincipal(); + private static String realm = KerberosTestUtils.getRealm(); + + private static File testRootDir = new File("target", + TestHttpServerWithSpnego.class.getName() + "-root"); + private static MiniKdc testMiniKDC; + private static File secretFile = new File(testRootDir, SECRET_STR); + + @BeforeClass + public static void setUp() throws Exception { + try { + testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir); + testMiniKDC.start(); + testMiniKDC.createPrincipal( + httpSpnegoKeytabFile, HTTP_USER + "/localhost"); + } catch (Exception e) { + assertTrue("Couldn't setup MiniKDC", false); + } + Writer w = new FileWriter(secretFile); + w.write("secret"); + w.close(); + } + + @AfterClass + public static void tearDown() { + if (testMiniKDC != null) { + testMiniKDC.stop(); + } + } + + /** + * groupA + * - userA + * groupB + * - userA, userB + * groupC + * - userC + * SPNEGO filter has been enabled. + * userA has the privilege to impersonate users in groupB. + * userA has admin access to all default servlets, but userB + * and userC don't have. So "/logs" can only be accessed by userA. + * @throws Exception + */ + @Test + public void testAuthenticationWithProxyUser() throws Exception { + Configuration spengoConf = getSpengoConf(new Configuration()); + + //setup logs dir + System.setProperty("hadoop.log.dir", testRootDir.getAbsolutePath()); + + // Setup user group + UserGroupInformation.createUserForTesting("userA", + new String[]{"groupA", "groupB"}); + UserGroupInformation.createUserForTesting("userB", + new String[]{"groupB"}); + UserGroupInformation.createUserForTesting("userC", + new String[]{"groupC"}); + + // Make userA impersonate users in groupB + spengoConf.set("hadoop.proxyuser.userA.hosts", "*"); + spengoConf.set("hadoop.proxyuser.userA.groups", "groupB"); + ProxyUsers.refreshSuperUserGroupsConfiguration(spengoConf); + + HttpServer2 httpServer = null; + try { + // Create http server to test. + httpServer = getCommonBuilder() + .setConf(spengoConf) + .setACL(new AccessControlList("userA groupA")) + .build(); + httpServer.start(); + + // Get signer to encrypt token + Signer signer = getSignerToEncrypt(); + + // setup auth token for userA + AuthenticatedURL.Token token = getEncryptedAuthToken(signer, "userA"); + + String serverURL = "http://" + + NetUtils.getHostPortString(httpServer.getConnectorAddress(0)) + "/"; + + // The default authenticator is kerberos. + AuthenticatedURL authUrl = new AuthenticatedURL(); + + // userA impersonates userB, it's allowed. + for (String servlet : + new String[]{"stacks", "jmx", "conf"}) { + HttpURLConnection conn = authUrl + .openConnection(new URL(serverURL + servlet + "?doAs=userB"), + token); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + } + + // userA cannot impersonate userC, it fails. + for (String servlet : + new String[]{"stacks", "jmx", "conf"}){ + HttpURLConnection conn = authUrl + .openConnection(new URL(serverURL + servlet + "?doAs=userC"), + token); + Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN, + conn.getResponseCode()); + } + + + // "/logs" and "/logLevel" require admin authorization, + // only userA has the access. + for (String servlet : + new String[]{"logLevel", "logs"}) { + HttpURLConnection conn = authUrl + .openConnection(new URL(serverURL + servlet), token); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + } + + // Setup token for userB + token = getEncryptedAuthToken(signer, "userB"); + + // userB cannot access these servlets. + for (String servlet : + new String[]{"logLevel", "logs"}) { + HttpURLConnection conn = authUrl + .openConnection(new URL(serverURL + servlet), token); + Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN, + conn.getResponseCode()); + } + + } finally { + if (httpServer != null) { + httpServer.stop(); + } + } + } + + private AuthenticatedURL.Token getEncryptedAuthToken(Signer signer, + String user) throws Exception { + AuthenticationToken token = + new AuthenticationToken(user, user, "kerberos"); + token.setExpires(System.currentTimeMillis() + TIMEOUT); + return new AuthenticatedURL.Token(signer.sign(token.toString())); + } + + private Signer getSignerToEncrypt() throws Exception { + SignerSecretProvider secretProvider = + StringSignerSecretProviderCreator.newStringSignerSecretProvider(); + Properties secretProviderProps = new Properties(); + secretProviderProps.setProperty( + AuthenticationFilter.SIGNATURE_SECRET, SECRET_STR); + secretProvider.init(secretProviderProps, null, TIMEOUT); + return new Signer(secretProvider); + } + + private Configuration getSpengoConf(Configuration conf) { + conf = new Configuration(); + conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, + ProxyUserAuthenticationFilterInitializer.class.getName()); + conf.set(PREFIX + "type", "kerberos"); + conf.setBoolean(PREFIX + "simple.anonymous.allowed", false); + conf.set(PREFIX + "signature.secret.file", + secretFile.getAbsolutePath()); + conf.set(PREFIX + "kerberos.keytab", + httpSpnegoKeytabFile.getAbsolutePath()); + conf.set(PREFIX + "kerberos.principal", httpSpnegoPrincipal); + conf.set(PREFIX + "cookie.domain", realm); + conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, + true); + return conf; + } + + private HttpServer2.Builder getCommonBuilder() throws Exception { + return new HttpServer2.Builder().setName("test") + .addEndpoint(new URI("http://localhost:0")) + .setFindPort(true); + } +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java index 4c35b391c39d6..d54503a0086e9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java @@ -35,7 +35,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.NetUtils; -import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -102,7 +101,6 @@ static void access(String urlstring) throws IOException { } } - @Test public void testPathSpecFilters() throws Exception { Configuration conf = new Configuration(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java index eafd0ae9ccae3..a8ecbd4fe28ef 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java @@ -100,7 +100,6 @@ static void access(String urlstring) throws IOException { } } - @Test public void testServletFilter() throws Exception { Configuration conf = new Configuration(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java index fd30b50141f28..3af70e95548ba 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java @@ -34,6 +34,7 @@ import org.apache.hadoop.log.LogLevel.CLI; import org.apache.hadoop.minikdc.KerberosSecurityTestcase; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.KerberosTestUtils; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; @@ -73,6 +74,7 @@ public class TestLogLevel extends KerberosSecurityTestcase { private final Logger log = ((Log4JLogger)testlog).getLogger(); private final static String PRINCIPAL = "loglevel.principal"; private final static String KEYTAB = "loglevel.keytab"; + private static final String PREFIX = "hadoop.http.authentication."; @BeforeClass public static void setUp() throws Exception { @@ -262,6 +264,13 @@ private void testDynamicLogLevel(final String bindProtocol, conf.set(KEYTAB, KerberosTestUtils.getKeytabFile()); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + conf.set(PREFIX + "type", "kerberos"); + conf.set(PREFIX + "kerberos.keytab", KerberosTestUtils.getKeytabFile()); + conf.set(PREFIX + "kerberos.principal", + KerberosTestUtils.getServerPrincipal()); + conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, + AuthenticationFilterInitializer.class.getName()); + conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); UserGroupInformation.setConfiguration(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 69dadf574d4b1..e8874d31a442f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -91,19 +91,7 @@ public static void initWebHdfs(Configuration conf, String hostname, HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_KEY, HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT)); - // add authentication filter for webhdfs - final String className = conf.get( - DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY, - DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT); - final String name = className; - final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; - Map params = getAuthFilterParams(conf, hostname, - httpKeytab); - HttpServer2.defineFilter(httpServer2.getWebAppContext(), name, className, - params, new String[] { pathSpec }); - HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className - + ")"); // add REST CSRF prevention filter if (conf.getBoolean(DFS_WEBHDFS_REST_CSRF_ENABLED_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java index ace7c3bcdef02..c5537b5edc93f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java @@ -26,8 +26,10 @@ import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; import org.apache.hadoop.hdfs.qjournal.TestSecureNNWithQJM; import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; @@ -74,6 +76,7 @@ public class TestDFSInotifyEventInputStreamKerberized { private static final Logger LOG = LoggerFactory.getLogger(TestDFSInotifyEventInputStreamKerberized.class); + private static final String PREFIX = "hadoop.http.authentication."; private File baseDir; private String keystoresDir; @@ -183,6 +186,12 @@ public void initKerberizedCluster() throws Exception { userName + "/" + krbInstance + "@" + kdc.getRealm(); final String spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.getRealm(); + baseConf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, + AuthenticationFilterInitializer.class.getName()); + baseConf.set(PREFIX + "type", "kerberos"); + baseConf.set(PREFIX + "kerberos.keytab", nnKeytabFile.getAbsolutePath()); + baseConf.set(PREFIX + "kerberos.principal", "HTTP/" + krbInstance); + baseConf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); baseConf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); baseConf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java index 8e8bb22229a6f..35bae49bbc4bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java @@ -49,8 +49,10 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; @@ -68,6 +70,7 @@ public class TestSecureNNWithQJM { private static final Path TEST_PATH = new Path("/test-dir"); private static final Path TEST_PATH_2 = new Path("/test-dir-2"); + private static final String PREFIX = "hadoop.http.authentication."; private static HdfsConfiguration baseConf; private static File baseDir; @@ -112,6 +115,11 @@ public static void init() throws Exception { String hdfsPrincipal = userName + "/" + krbInstance + "@" + kdc.getRealm(); String spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.getRealm(); + baseConf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, + AuthenticationFilterInitializer.class.getName()); + baseConf.set(PREFIX + "type", "kerberos"); + baseConf.set(PREFIX + "kerberos.keytab", keytab); + baseConf.set(PREFIX + "kerberos.principal", spnegoPrincipal); baseConf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); baseConf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); baseConf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java index b5d44109f8bdc..f1df39c493fbf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java @@ -63,12 +63,14 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.web.resources.*; import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.io.Text; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; import org.apache.hadoop.security.authentication.util.KerberosName; +import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.test.GenericTestUtils; @@ -80,6 +82,7 @@ import org.junit.Test; public class TestWebHdfsTokens { + private static final String PREFIX = "hadoop.http.authentication."; private static Configuration conf; URI uri = null; @@ -142,6 +145,11 @@ private static void initSecureConf(Configuration secureConf) kdc.createPrincipal(keytabFile, username, username + "/" + krbInstance, "HTTP/" + krbInstance); + secureConf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, + AuthenticationFilterInitializer.class.getName()); + secureConf.set(PREFIX + "type", "kerberos"); + secureConf.set(PREFIX + "kerberos.keytab", keytab); + secureConf.set(PREFIX + "kerberos.principal", spnegoPrincipal); secureConf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, principal); secureConf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); secureConf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, principal); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java index f21fde46224ac..106f368dd6447 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java @@ -34,6 +34,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.http.FilterContainer; +import org.apache.hadoop.http.FilterInitializer; +import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.net.NetUtils; import org.junit.AfterClass; import org.junit.Assert; @@ -63,6 +66,17 @@ public void doFilter(ServletRequest request, ServletResponse response, public void destroy() { } + /** Initializer for Custom Filter. */ + static public class Initializer extends FilterInitializer { + public Initializer() {} + + @Override + public void initFilter(FilterContainer container, Configuration config) { + container.addFilter("customFilter", + TestWebHdfsWithAuthenticationFilter.CustomizedFilter.class. + getName(), null); + } + } } private static Configuration conf; @@ -72,8 +86,8 @@ public void destroy() { @BeforeClass public static void setUp() throws IOException { conf = new Configuration(); - conf.set(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY, - CustomizedFilter.class.getName()); + conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, + CustomizedFilter.Initializer.class.getName()); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:0"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); InetSocketAddress addr = cluster.getNameNode().getHttpAddress(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java index 4d54b6a823ba6..f13a4e990e486 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java @@ -95,6 +95,15 @@ public void service(HttpServletRequest req, HttpServletResponse res) if (uri.equals("/")) { String redirectPath = webApp.getRedirectPath(); if (redirectPath != null && !redirectPath.isEmpty()) { + if (req.getQueryString()!=null) { + StringBuilder query = new StringBuilder(); + query.append(redirectPath); + query.append("?"); + // Prevent HTTP response splitting vulnerability + query.append(req.getQueryString().replaceAll("\r", "") + .replaceAll("\n", "")); + redirectPath = query.toString(); + } res.sendRedirect(redirectPath); return; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java index 15c6d3dadb2e7..086864e8a012d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java @@ -21,6 +21,7 @@ import java.util.LinkedHashSet; import java.util.Set; +import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -50,12 +51,17 @@ private TimelineServerUtils() { */ public static void setTimelineFilters(Configuration conf, String configuredInitializers, Set defaultInitializers) { + + Set ignoreInitializers = new LinkedHashSet<>(); + ignoreInitializers.add(AuthenticationFilterInitializer.class.getName()); + ignoreInitializers.add( + ProxyUserAuthenticationFilterInitializer.class.getName()); + String[] parts = configuredInitializers.split(","); Set target = new LinkedHashSet(); for (String filterInitializer : parts) { filterInitializer = filterInitializer.trim(); - if (filterInitializer.equals( - AuthenticationFilterInitializer.class.getName()) || + if (ignoreInitializers.contains(filterInitializer) || filterInitializer.isEmpty()) { continue; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java index 5e73a5b8a4072..1fd19fdb294d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java @@ -28,6 +28,7 @@ import javax.servlet.http.HttpServletRequest; import org.apache.commons.codec.binary.Base64; +import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -145,6 +146,9 @@ public static void setupSecurityAndFilters(Configuration conf, } target.add(filterInitializer.getName()); } + + target.remove(ProxyUserAuthenticationFilterInitializer.class.getName()); + actualInitializers = StringUtils.join(",", target); LOG.info("Using RM authentication filter(kerberos/delegation-token)" diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java index 8f1e7d74e4179..49c1d4b1e6b05 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java @@ -31,6 +31,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.HttpCrossOriginFilterInitializer; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ReflectionUtils; @@ -159,9 +160,15 @@ protected void addFilters(Configuration conf) { String initializers = conf.get("hadoop.http.filter.initializers", ""); Set defaultInitializers = new LinkedHashSet(); if (!initializers.contains( - TimelineReaderAuthenticationFilterInitializer.class.getName())) { - defaultInitializers.add( - TimelineReaderAuthenticationFilterInitializer.class.getName()); + ProxyUserAuthenticationFilterInitializer.class.getName())) { + if (!initializers.contains( + TimelineReaderAuthenticationFilterInitializer.class.getName())) { + defaultInitializers.add( + TimelineReaderAuthenticationFilterInitializer.class.getName()); + } else { + defaultInitializers.add( + ProxyUserAuthenticationFilterInitializer.class.getName()); + } } defaultInitializers.add( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestSecureAmFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestSecureAmFilter.java index 0a88243823aaf..5bbfc8fafa0ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestSecureAmFilter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestSecureAmFilter.java @@ -36,9 +36,9 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.KerberosTestUtils; +import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -54,6 +54,7 @@ public class TestSecureAmFilter { private String proxyHost = "localhost"; private static final File TEST_ROOT_DIR = new File("target", TestSecureAmFilter.class.getName() + "-root"); + private static final String PREFIX = "hadoop.http.authentication."; private static File httpSpnegoKeytabFile = new File( KerberosTestUtils.getKeytabFile()); private static Configuration rmconf = new Configuration(); @@ -70,7 +71,12 @@ public static void setUp() { rmconf.setBoolean(YarnConfiguration.RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER, true); rmconf.set("hadoop.http.filter.initializers", - RMAuthenticationFilterInitializer.class.getName()); + AuthenticationFilterInitializer.class.getName()); + rmconf.set(PREFIX + "type", "kerberos"); + rmconf.set(PREFIX + "kerberos.keytab", + httpSpnegoKeytabFile.getAbsolutePath()); + rmconf.set(PREFIX + "kerberos.principal", httpSpnegoPrincipal); + rmconf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY, httpSpnegoPrincipal); rmconf.set(YarnConfiguration.RM_KEYTAB, From 73954c1dd98dd9f0aa535aeefcd1484d09fd75dc Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Thu, 6 Jun 2019 11:13:39 +0800 Subject: [PATCH 0125/1308] HDDS-1612. Add 'scmcli printTopology' shell command to print datanode topology. Contributed by Sammi Chen.(#910) --- .../hadoop/hdds/protocol/DatanodeDetails.java | 2 + hadoop-hdds/common/src/main/proto/hdds.proto | 1 + .../apache/hadoop/hdds/scm/cli/SCMCLI.java | 3 +- .../hdds/scm/cli/TopologySubcommand.java | 80 +++++++++++++++++++ 4 files changed, 85 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index be6f44cd414d3..34de02899dcc4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -212,6 +212,8 @@ public HddsProtos.DatanodeDetailsProto getProtoBufMessage() { if (certSerialId != null) { builder.setCertSerialId(certSerialId); } + builder.setNetworkLocation(getNetworkLocation()); + for (Port port : ports) { builder.addPorts(HddsProtos.Port.newBuilder() .setName(port.getName().toString()) diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto index ddde7ea93acb5..2d5cb03ba0031 100644 --- a/hadoop-hdds/common/src/main/proto/hdds.proto +++ b/hadoop-hdds/common/src/main/proto/hdds.proto @@ -34,6 +34,7 @@ message DatanodeDetailsProto { required string hostName = 3; // hostname repeated Port ports = 4; optional string certSerialId = 5; // Certificate serial id. + optional string networkLocation = 6; // Network topology location } /** diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java index 5013a74725652..1a19a3c3ea6b4 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java @@ -84,7 +84,8 @@ CreateSubcommand.class, CloseSubcommand.class, ListPipelinesSubcommand.class, - ClosePipelineSubcommand.class + ClosePipelineSubcommand.class, + TopologySubcommand.class }, mixinStandardHelpOptions = true) public class SCMCLI extends GenericCli { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java new file mode 100644 index 0000000000000..6deccd1dadcd3 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.cli; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import picocli.CommandLine; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONING; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; + +/** + * Handler of printTopology command. + */ +@CommandLine.Command( + name = "printTopology", + description = "Print a tree of the network topology as reported by SCM", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) +public class TopologySubcommand implements Callable { + + @CommandLine.ParentCommand + private SCMCLI parent; + + private static List stateArray = new ArrayList<>(); + + static { + stateArray.add(HEALTHY); + stateArray.add(STALE); + stateArray.add(DEAD); + stateArray.add(DECOMMISSIONING); + stateArray.add(DECOMMISSIONED); + } + + @Override + public Void call() throws Exception { + try (ScmClient scmClient = parent.createScmClient()) { + for (HddsProtos.NodeState state : stateArray) { + List nodes = scmClient.queryNode(state, + HddsProtos.QueryScope.CLUSTER, ""); + if (nodes != null && nodes.size() > 0) { + // show node state + System.out.println("State = " + state.toString()); + // format "hostname/ipAddress networkLocation" + nodes.forEach(node -> { + System.out.print(node.getNodeID().getHostName() + "/" + + node.getNodeID().getIpAddress()); + System.out.println(" " + + (node.getNodeID().getNetworkLocation() != null ? + node.getNodeID().getNetworkLocation() : "NA")); + }); + } + } + return null; + } + } +} \ No newline at end of file From ec26c431f977cae8fd289e7b83c5f5c5e1946634 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Thu, 6 Jun 2019 10:06:10 +0100 Subject: [PATCH 0126/1308] HADOOP-16117. Update AWS SDK to 1.11.563. Contributed by Steve Loughran. Change-Id: I7c46ed2a6378e1370f567acf4cdcfeb93e43fa13 --- NOTICE.txt | 15 +++++---------- hadoop-project/pom.xml | 2 +- .../src/site/markdown/tools/hadoop-aws/testing.md | 4 ++++ 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index b4329b8095a59..2cda1556051b4 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -8,17 +8,12 @@ following notices: * Copyright 2011 FuseSource Corp. http://fusesource.com The binary distribution of this product bundles binaries of -AWS SDK for Java - Bundle 1.11.375, -AWS Java SDK for AWS KMS 1.11.375, -AWS Java SDK for Amazon S3 1.11.375, -AWS Java SDK for AWS STS 1.11.375, -JMES Path Query library 1.0, +AWS SDK for Java - Bundle 1.11.563 (https://github.com/aws/aws-sdk-java), which has the following notices: - * This software includes third party software subject to the following - copyrights: - XML parsing and utility functions from JetS3t - Copyright - 2006-2009 James Murty. - JSON parsing and utility functions from JSON.org - - Copyright 2002 JSON.org. - PKCS#1 PEM encoded private key parsing and utility - functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc. + +This software includes third party software subject to the following copyrights: +- XML parsing and utility functions from JetS3t - Copyright 2006-2009 James Murty. +- PKCS#1 PEM encoded private key parsing and utility functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc. The binary distribution of this product bundles binaries of Gson 2.2.4, diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index f4ec48ac0bf6c..dc8cc8d6baf43 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -146,7 +146,7 @@ 1.3.1 1.0-beta-1 900 - 1.11.375 + 1.11.563 2.3.4 1.6 2.1 diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md index f61b46a8da6ea..7a591d99b60eb 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md @@ -1334,6 +1334,7 @@ bin/hadoop fs -mv $BUCKET/file $BUCKET/file2 # expect "No such file or directory" bin/hadoop fs -stat $BUCKET/file bin/hadoop fs -stat $BUCKET/file2 +bin/hadoop fs -mkdir $BUCKET/dir-no-trailing bin/hadoop fs -mv $BUCKET/file2 $BUCKET/dir-no-trailing bin/hadoop fs -stat $BUCKET/dir-no-trailing/file2 # treated the same as the file stat @@ -1348,6 +1349,9 @@ bin/hadoop fs -test -d $BUCKET/dir-no-trailing/file2 ; echo $? bin/hadoop fs -checksum $BUCKET/dir-no-trailing/file2 # expect "etag" + a long string bin/hadoop fs -D fs.s3a.etag.checksum.enabled=true -checksum $BUCKET/dir-no-trailing/file2 +bin/hadoop fs -expunge -immediate -fs $BUCKET +bin/hdfs fetchdt --webservice $BUCKET secrets.bin +bin/hdfs fetchdt -D fs.s3a.delegation.token.binding=org.apache.hadoop.fs.s3a.auth.delegation.SessionTokenBinding --webservice $BUCKET secrets.bin ``` ### Other tests From 649666e118a7cf92b676eaa56a8be318176c443e Mon Sep 17 00:00:00 2001 From: Sunil G Date: Thu, 6 Jun 2019 14:51:55 +0530 Subject: [PATCH 0127/1308] YARN-9573. DistributedShell cannot specify LogAggregationContext. Contributed by Adam Antal. --- .../applications/distributedshell/Client.java | 46 +++++++++++++------ .../TestDistributedShell.java | 27 +++++++++++ 2 files changed, 60 insertions(+), 13 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index 4bd57dd27f6cc..ff0769384c78a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -68,6 +68,7 @@ import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; +import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Priority; @@ -203,7 +204,9 @@ public class Client { private String nodeAttributeSpec = ""; // log4j.properties file // if available, add to local resources and set into classpath - private String log4jPropFile = ""; + private String log4jPropFile = ""; + // rolling + private String rollingFilesPattern = ""; // Start time for client private final long clientStartTime = System.currentTimeMillis(); @@ -280,7 +283,7 @@ public static void main(String[] args) { } if (result) { LOG.info("Application completed successfully"); - System.exit(0); + System.exit(0); } LOG.error("Application failed to complete successfully"); System.exit(2); @@ -344,6 +347,8 @@ public Client(Configuration conf) throws Exception { opts.addOption("enforce_execution_type", false, "Flag to indicate whether to enforce execution type of containers"); opts.addOption("log_properties", true, "log4j.properties file"); + opts.addOption("rolling_log_pattern", true, + "pattern for files that should be aggregated in a rolling fashion"); opts.addOption("keep_containers_across_application_attempts", false, "Flag to indicate whether to keep containers across application " + "attempts." @@ -443,6 +448,10 @@ public boolean init(String[] args) throws ParseException { } } + if (cliParser.hasOption("rolling_log_pattern")) { + rollingFilesPattern = cliParser.getOptionValue("rolling_log_pattern"); + } + if (cliParser.hasOption("help")) { printUsage(); return false; @@ -488,7 +497,7 @@ public boolean init(String[] args) throws ParseException { if (!cliParser.hasOption("jar")) { throw new IllegalArgumentException("No jar file specified for application master"); - } + } appMasterJar = cliParser.getOptionValue("jar"); @@ -689,16 +698,16 @@ public boolean run() throws IOException, YarnException { + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() - + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); + + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" - + ", queueName=" + aclInfo.getQueueName() + + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } - } + } if (domainId != null && domainId.length() > 0 && toCreateDomain) { prepareTimelineDomain(); @@ -795,7 +804,7 @@ public boolean run() throws IOException, YarnException { // set local resources for the application master // local files or archives as needed - // In this scenario, the jar file for the application master is part of the local resources + // In this scenario, the jar file for the application master is part of the local resources Map localResources = new HashMap(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); @@ -851,7 +860,7 @@ public boolean run() throws IOException, YarnException { // To do this, we need to first copy into the filesystem that is visible // to the yarn framework. // We do not need to set this as a local resource for the application - // master as the application master does not need it. + // master as the application master does not need it. String hdfsShellScriptLocation = ""; long hdfsShellScriptLen = 0; long hdfsShellScriptTimestamp = 0; @@ -897,7 +906,7 @@ public boolean run() throws IOException, YarnException { env.put(DSConstants.DISTRIBUTEDSHELLTIMELINEDOMAIN, domainId); } - // Add AppMaster.jar location to classpath + // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. @@ -1001,7 +1010,7 @@ public boolean run() throws IOException, YarnException { LOG.info("Completed setting up app master command " + command.toString()); List commands = new ArrayList(); - commands.add(command.toString()); + commands.add(command.toString()); // Set up the container launch context for the application master ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance( @@ -1062,6 +1071,8 @@ public boolean run() throws IOException, YarnException { // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); + specifyLogAggregationContext(appContext); + // Submit the application to the applications manager // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success @@ -1079,6 +1090,15 @@ public boolean run() throws IOException, YarnException { } + @VisibleForTesting + void specifyLogAggregationContext(ApplicationSubmissionContext appContext) { + if (!rollingFilesPattern.isEmpty()) { + LogAggregationContext logAggregationContext = LogAggregationContext + .newInstance(null, null, rollingFilesPattern, ""); + appContext.setLogAggregationContext(logAggregationContext); + } + } + /** * Monitor the submitted application for completion. * Kill application if time expires. @@ -1127,9 +1147,9 @@ private boolean monitorApplication(ApplicationId appId) + " YarnState=" + state.toString() + ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop"); return false; - } + } } - else if (YarnApplicationState.KILLED == state + else if (YarnApplicationState.KILLED == state || YarnApplicationState.FAILED == state) { LOG.info("Application did not finish." + " YarnState=" + state.toString() + ", DSFinalStatus=" + dsStatus.toString() @@ -1163,7 +1183,7 @@ private void forceKillApplication(ApplicationId appId) // Response can be ignored as it is non-null on success or // throws an exception in case of failures - yarnClient.killApplication(appId); + yarnClient.killApplication(appId); } private void addToLocalResources(FileSystem fs, String fileSrcPath, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java index 025e543881e2e..ba7bf7a560db1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.applications.distributedshell; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -62,12 +63,14 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain; @@ -97,6 +100,7 @@ import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin; import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree; +import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; import org.junit.After; import org.junit.Assert; @@ -958,6 +962,29 @@ public void testDSShellWithCustomLogPropertyFile() throws Exception { Assert.assertTrue(LOG_AM.isDebugEnabled()); } + @Test + public void testSpecifyingLogAggregationContext() throws Exception { + String regex = ".*(foo|bar)\\d"; + String[] args = { + "--jar", + APPMASTER_JAR, + "--shell_command", + "echo", + "--rolling_log_pattern", + regex + }; + final Client client = + new Client(new Configuration(yarnCluster.getConfig())); + Assert.assertTrue(client.init(args)); + + ApplicationSubmissionContext context = + Records.newRecord(ApplicationSubmissionContext.class); + client.specifyLogAggregationContext(context); + LogAggregationContext logContext = context.getLogAggregationContext(); + assertEquals(logContext.getRolledLogsIncludePattern(), regex); + assertTrue(logContext.getRolledLogsExcludePattern().isEmpty()); + } + public void testDSShellWithCommands() throws Exception { String[] args = { From f7c77b395f6f01b4e687b9feb2e77b907d8e943f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Elek?= Date: Thu, 6 Jun 2019 11:53:18 +0200 Subject: [PATCH 0128/1308] HDDS-1458. Create a maven profile to run fault injection tests. Contributed by Eric Yang. --- hadoop-ozone/.gitignore | 20 ++++ .../dev-support/bin/dist-layout-stitching | 5 +- .../network-tests/pom.xml | 103 ++++++++++++++++++ .../src/test}/blockade/README.md | 20 ++-- .../test}/blockade/blockadeUtils/__init__.py | 0 .../test}/blockade/blockadeUtils/blockade.py | 0 .../test}/blockade/clusterUtils/__init__.py | 0 .../blockade/clusterUtils/cluster_utils.py | 2 +- .../src/test}/blockade/conftest.py | 0 .../src/test}/blockade/ozone/__init__.py | 0 .../src/test}/blockade/ozone/cluster.py | 20 +++- .../blockade/test_blockade_client_failure.py | 18 ++- .../test_blockade_datanode_isolation.py | 3 +- .../src/test}/blockade/test_blockade_flaky.py | 18 ++- .../blockade/test_blockade_mixed_failure.py | 18 ++- ...ckade_mixed_failure_three_nodes_isolate.py | 18 ++- .../test_blockade_mixed_failure_two_nodes.py | 18 ++- .../blockade/test_blockade_scm_isolation.py | 18 ++- .../network-tests/src/test}/blockade/util.py | 0 .../src/test/compose/docker-compose.yaml | 50 +++++++++ .../src/test/compose/docker-config | 77 +++++++++++++ hadoop-ozone/fault-injection-test/pom.xml | 35 ++++++ hadoop-ozone/pom.xml | 2 +- pom.ozone.xml | 2 +- 24 files changed, 404 insertions(+), 43 deletions(-) create mode 100644 hadoop-ozone/.gitignore create mode 100644 hadoop-ozone/fault-injection-test/network-tests/pom.xml rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/README.md (71%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/blockadeUtils/__init__.py (100%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/blockadeUtils/blockade.py (100%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/clusterUtils/__init__.py (100%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/clusterUtils/cluster_utils.py (99%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/conftest.py (100%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/ozone/__init__.py (100%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/ozone/cluster.py (92%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/test_blockade_client_failure.py (88%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/test_blockade_datanode_isolation.py (99%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/test_blockade_flaky.py (79%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/test_blockade_mixed_failure.py (91%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/test_blockade_mixed_failure_three_nodes_isolate.py (94%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/test_blockade_mixed_failure_two_nodes.py (92%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/test_blockade_scm_isolation.py (91%) rename hadoop-ozone/{dist/src/main => fault-injection-test/network-tests/src/test}/blockade/util.py (100%) create mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-compose.yaml create mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-config create mode 100644 hadoop-ozone/fault-injection-test/pom.xml diff --git a/hadoop-ozone/.gitignore b/hadoop-ozone/.gitignore new file mode 100644 index 0000000000000..93c683135f1a2 --- /dev/null +++ b/hadoop-ozone/.gitignore @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*~ +*.pyc +.blockade +.cache +__pycache__ diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching index 6178dfe72aa36..c9f81bd99769f 100755 --- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching +++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching @@ -83,6 +83,7 @@ run mkdir -p ./bin run mkdir -p ./sbin run mkdir -p ./etc run mkdir -p ./libexec +run mkdir -p ./tests run cp -r "${ROOT}/hadoop-common-project/hadoop-common/src/main/conf" "etc/hadoop" run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties" "etc/hadoop" @@ -108,6 +109,9 @@ run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/workers.sh" "sb run cp "${ROOT}/hadoop-ozone/common/src/main/bin/start-ozone.sh" "sbin/" run cp "${ROOT}/hadoop-ozone/common/src/main/bin/stop-ozone.sh" "sbin/" +# fault injection tests +run cp -r "${ROOT}/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade" tests + #shaded datanode service run mkdir -p "./share/hadoop/ozoneplugin" run cp "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}-plugin.jar" "./share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin-${HDDS_VERSION}.jar" @@ -119,7 +123,6 @@ cp -r "${ROOT}/hadoop-hdds/docs/target/classes/docs" ./ #compose files are preprocessed: properties (eg. project.version) are replaced first by maven. run cp -p -R "${ROOT}/hadoop-ozone/dist/target/compose" . run cp -p -r "${ROOT}/hadoop-ozone/dist/src/main/smoketest" . -run cp -p -r "${ROOT}/hadoop-ozone/dist/src/main/blockade" . run cp -p -r "${ROOT}/hadoop-ozone/dist/target/k8s" kubernetes run cp -p -r "${ROOT}/hadoop-ozone/dist/src/main/Dockerfile" . diff --git a/hadoop-ozone/fault-injection-test/network-tests/pom.xml b/hadoop-ozone/fault-injection-test/network-tests/pom.xml new file mode 100644 index 0000000000000..3b29480c00675 --- /dev/null +++ b/hadoop-ozone/fault-injection-test/network-tests/pom.xml @@ -0,0 +1,103 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-ozone-fault-injection-test + 0.5.0-SNAPSHOT + + hadoop-ozone-network-tests + Apache Hadoop Ozone Network Tests + Apache Hadoop Ozone Network Tests + jar + + + + + maven-resources-plugin + 3.1.0 + + + copy-resources + process-resources + + copy-resources + + + ${project.build.directory} + + + src/test/compose + true + + docker-compose.yaml + docker-config + + + + + + + + + + + + + it + + ${basedir}../../dist/target/ozone-${project.version} + + + + + org.codehaus.mojo + exec-maven-plugin + + + integration-test + + exec + + + python + + -m + pytest + -s + ${basedir}/src/test/blockade/ + + + + ${ozone.home} + + + ${project.build.directory} + + + + + + + + + + + + diff --git a/hadoop-ozone/dist/src/main/blockade/README.md b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/README.md similarity index 71% rename from hadoop-ozone/dist/src/main/blockade/README.md rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/README.md index fb582054998a3..b9f3c7345230b 100644 --- a/hadoop-ozone/dist/src/main/blockade/README.md +++ b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/README.md @@ -18,19 +18,25 @@ Following python packages need to be installed before running the tests : 1. blockade 2. pytest==2.8.7 +Running test as part of the maven build: + +mvn clean verify -Pit + +Running test as part of the released binary: + You can execute all blockade tests with following command-lines: ``` cd $DIRECTORY_OF_OZONE -python -m pytest -s blockade/ +python -m pytest -s tests/blockade/ ``` You can also execute fewer blockade tests with following command-lines: ``` cd $DIRECTORY_OF_OZONE -python -m pytest -s blockade/ -e.g: python -m pytest -s blockade/test_blockade_datanode_isolation.py +python -m pytest -s tests/blockade/ +e.g: python -m pytest -s tests/blockade/test_blockade_datanode_isolation.py ``` You can change the default 'sleep' interval in the tests with following @@ -38,9 +44,9 @@ command-lines: ``` cd $DIRECTORY_OF_OZONE -python -m pytest -s blockade/ --containerStatusSleep= +python -m pytest -s tests/blockade/ --containerStatusSleep= -e.g: python -m pytest -s blockade/ --containerStatusSleep=720 +e.g: python -m pytest -s tests/blockade/ --containerStatusSleep=720 ``` By default, second phase of the tests will not be run. @@ -49,6 +55,6 @@ command-lines: ``` cd $DIRECTORY_OF_OZONE -python -m pytest -s blockade/ --runSecondPhase=true +python -m pytest -s tests/blockade/ --runSecondPhase=true -``` \ No newline at end of file +``` diff --git a/hadoop-ozone/dist/src/main/blockade/blockadeUtils/__init__.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/blockadeUtils/__init__.py similarity index 100% rename from hadoop-ozone/dist/src/main/blockade/blockadeUtils/__init__.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/blockadeUtils/__init__.py diff --git a/hadoop-ozone/dist/src/main/blockade/blockadeUtils/blockade.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/blockadeUtils/blockade.py similarity index 100% rename from hadoop-ozone/dist/src/main/blockade/blockadeUtils/blockade.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/blockadeUtils/blockade.py diff --git a/hadoop-ozone/dist/src/main/blockade/clusterUtils/__init__.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/clusterUtils/__init__.py similarity index 100% rename from hadoop-ozone/dist/src/main/blockade/clusterUtils/__init__.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/clusterUtils/__init__.py diff --git a/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/clusterUtils/cluster_utils.py similarity index 99% rename from hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/clusterUtils/cluster_utils.py index cf67380255c37..53e3fa037f9c8 100644 --- a/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py +++ b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/clusterUtils/cluster_utils.py @@ -332,4 +332,4 @@ def find_om_scm_client_datanodes(cls, container_list): datanodes = sorted( list(filter(lambda x: 'datanode' in x, container_list))) client = filter(lambda x: 'ozone_client' in x, container_list) - return om, scm, client, datanodes \ No newline at end of file + return om, scm, client, datanodes diff --git a/hadoop-ozone/dist/src/main/blockade/conftest.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/conftest.py similarity index 100% rename from hadoop-ozone/dist/src/main/blockade/conftest.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/conftest.py diff --git a/hadoop-ozone/dist/src/main/blockade/ozone/__init__.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/__init__.py similarity index 100% rename from hadoop-ozone/dist/src/main/blockade/ozone/__init__.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/__init__.py diff --git a/hadoop-ozone/dist/src/main/blockade/ozone/cluster.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py similarity index 92% rename from hadoop-ozone/dist/src/main/blockade/ozone/cluster.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py index 4347f86c0d371..f75b3d2c8c298 100644 --- a/hadoop-ozone/dist/src/main/blockade/ozone/cluster.py +++ b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py @@ -21,6 +21,7 @@ import subprocess import yaml import util +from os import environ from subprocess import call from blockadeUtils.blockade import Blockade @@ -44,11 +45,18 @@ class Configuration: """ def __init__(self): - __parent_dir__ = os.path.dirname(os.path.dirname( - os.path.dirname(os.path.realpath(__file__)))) - self.docker_compose_file = os.path.join(__parent_dir__, - "compose", "ozoneblockade", - "docker-compose.yaml") + if "MAVEN_TEST" in os.environ: + compose_dir = environ.get("MAVEN_TEST") + self.docker_compose_file = os.path.join(compose_dir, "docker-compose.yaml") + elif "OZONE_HOME" in os.environ: + compose_dir = os.path.join(environ.get("OZONE_HOME"), "compose", "ozoneblockade") + self.docker_compose_file = os.path.join(compose_dir, "docker-compose.yaml") + else: + __parent_dir__ = os.path.dirname(os.path.dirname(os.path.dirname( + os.path.dirname(os.path.realpath(__file__))))) + self.docker_compose_file = os.path.join(__parent_dir__, + "compose", "ozoneblockade", + "docker-compose.yaml") self._datanode_count = 3 os.environ["DOCKER_COMPOSE_FILE"] = self.docker_compose_file @@ -292,4 +300,4 @@ def container_state_predicate(self, datanode, state): container_states_dn = self.get_container_states(datanode) if container_states_dn and container_states_dn.popitem()[1] == state: return True - return False \ No newline at end of file + return False diff --git a/hadoop-ozone/dist/src/main/blockade/test_blockade_client_failure.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py similarity index 88% rename from hadoop-ozone/dist/src/main/blockade/test_blockade_client_failure.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py index 8c0b518493955..9e1b04f68261e 100644 --- a/hadoop-ozone/dist/src/main/blockade/test_blockade_client_failure.py +++ b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py @@ -19,14 +19,24 @@ import re import time import logging +from os import environ from blockadeUtils.blockade import Blockade from clusterUtils.cluster_utils import ClusterUtils logger = logging.getLogger(__name__) -parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -FILE = os.path.join(parent_dir, "compose", "ozoneblockade", - "docker-compose.yaml") +if "MAVEN_TEST" in os.environ: + compose_dir = environ.get("MAVEN_TEST") + FILE = os.path.join(compose_dir, "docker-compose.yaml") +elif "OZONE_HOME" in os.environ: + compose_dir = environ.get("OZONE_HOME") + FILE = os.path.join(compose_dir, "compose", "ozoneblockade", \ + "docker-compose.yaml") +else: + parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + FILE = os.path.join(parent_dir, "compose", "ozoneblockade", \ + "docker-compose.yaml") + os.environ["DOCKER_COMPOSE_FILE"] = FILE SCALE = 3 CONTAINER_LIST = [] @@ -118,4 +128,4 @@ def test_client_failure_isolate_one_datanode(): test_key_name, "/tmp/") key_checksum = ClusterUtils.find_checksum(FILE, "/tmp/%s" % test_key_name) - assert key_checksum == ORIG_CHECKSUM \ No newline at end of file + assert key_checksum == ORIG_CHECKSUM diff --git a/hadoop-ozone/dist/src/main/blockade/test_blockade_datanode_isolation.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_datanode_isolation.py similarity index 99% rename from hadoop-ozone/dist/src/main/blockade/test_blockade_datanode_isolation.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_datanode_isolation.py index dfa1b703ba814..85d99e213da92 100644 --- a/hadoop-ozone/dist/src/main/blockade/test_blockade_datanode_isolation.py +++ b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_datanode_isolation.py @@ -22,7 +22,6 @@ logger = logging.getLogger(__name__) - def setup_function(function): global cluster cluster = Cluster.create() @@ -135,4 +134,4 @@ def test_datanode_isolation_all(): util.wait_until( lambda: cluster.container_state_predicate_all_closed(cluster.datanodes), int(os.environ["CONTAINER_STATUS_SLEEP"]), 10) - assert cluster.container_state_predicate_all_closed(cluster.datanodes) \ No newline at end of file + assert cluster.container_state_predicate_all_closed(cluster.datanodes) diff --git a/hadoop-ozone/dist/src/main/blockade/test_blockade_flaky.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_flaky.py similarity index 79% rename from hadoop-ozone/dist/src/main/blockade/test_blockade_flaky.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_flaky.py index a79bd4fcc2afc..6f1df18bec025 100644 --- a/hadoop-ozone/dist/src/main/blockade/test_blockade_flaky.py +++ b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_flaky.py @@ -19,14 +19,24 @@ import logging import random import pytest +from os import environ from blockadeUtils.blockade import Blockade from ozone.cluster import Cluster logger = logging.getLogger(__name__) -parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -FILE = os.path.join(parent_dir, "compose", "ozoneblockade", - "docker-compose.yaml") +if "MAVEN_TEST" in os.environ: + compose_dir = environ.get("MAVEN_TEST") + FILE = os.path.join(compose_dir, "docker-compose.yaml") +elif "OZONE_HOME" in os.environ: + compose_dir = environ.get("OZONE_HOME") + FILE = os.path.join(compose_dir, "compose", "ozoneblockade", \ + "docker-compose.yaml") +else: + parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + FILE = os.path.join(parent_dir, "compose", "ozoneblockade", \ + "docker-compose.yaml") + os.environ["DOCKER_COMPOSE_FILE"] = FILE SCALE = 6 CONTAINER_LIST = [] @@ -64,4 +74,4 @@ def test_flaky(flaky_node): Blockade.make_flaky(flaky_container_name) Blockade.blockade_status() exit_code, output = cluster.run_freon(1, 1, 1, 10240) - assert exit_code == 0, "freon run failed with output=[%s]" % output \ No newline at end of file + assert exit_code == 0, "freon run failed with output=[%s]" % output diff --git a/hadoop-ozone/dist/src/main/blockade/test_blockade_mixed_failure.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure.py similarity index 91% rename from hadoop-ozone/dist/src/main/blockade/test_blockade_mixed_failure.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure.py index 8493ce02729f0..86d5311d19226 100644 --- a/hadoop-ozone/dist/src/main/blockade/test_blockade_mixed_failure.py +++ b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure.py @@ -19,13 +19,23 @@ import time import logging import re +from os import environ from blockadeUtils.blockade import Blockade from clusterUtils.cluster_utils import ClusterUtils logger = logging.getLogger(__name__) -parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -FILE = os.path.join(parent_dir, "compose", "ozoneblockade", - "docker-compose.yaml") +if "MAVEN_TEST" in os.environ: + compose_dir = environ.get("MAVEN_TEST") + FILE = os.path.join(compose_dir, "docker-compose.yaml") +elif "OZONE_HOME" in os.environ: + compose_dir = environ.get("OZONE_HOME") + FILE = os.path.join(compose_dir, "compose", "ozoneblockade", \ + "docker-compose.yaml") +else: + parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + FILE = os.path.join(parent_dir, "compose", "ozoneblockade", \ + "docker-compose.yaml") + os.environ["DOCKER_COMPOSE_FILE"] = FILE SCALE = 3 INCREASED_SCALE = 5 @@ -146,4 +156,4 @@ def test_one_dn_isolate_other_dn(run_second_phase): "The container should have at least three closed replicas." _, output = \ ClusterUtils.run_freon(FILE, 1, 1, 1, 10240, "RATIS", "THREE") - assert re.search("Status: Success", output) is not None \ No newline at end of file + assert re.search("Status: Success", output) is not None diff --git a/hadoop-ozone/dist/src/main/blockade/test_blockade_mixed_failure_three_nodes_isolate.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_three_nodes_isolate.py similarity index 94% rename from hadoop-ozone/dist/src/main/blockade/test_blockade_mixed_failure_three_nodes_isolate.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_three_nodes_isolate.py index 0e500258477c1..ab4c2d4869971 100644 --- a/hadoop-ozone/dist/src/main/blockade/test_blockade_mixed_failure_three_nodes_isolate.py +++ b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_three_nodes_isolate.py @@ -19,13 +19,23 @@ import time import logging import re +from os import environ from blockadeUtils.blockade import Blockade from clusterUtils.cluster_utils import ClusterUtils logger = logging.getLogger(__name__) -parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -FILE = os.path.join(parent_dir, "compose", "ozoneblockade", - "docker-compose.yaml") +if "MAVEN_TEST" in os.environ: + compose_dir = environ.get("MAVEN_TEST") + FILE = os.path.join(compose_dir, "docker-compose.yaml") +elif "OZONE_HOME" in os.environ: + compose_dir = environ.get("OZONE_HOME") + FILE = os.path.join(compose_dir, "compose", "ozoneblockade", \ + "docker-compose.yaml") +else: + parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + FILE = os.path.join(parent_dir, "compose", "ozoneblockade", \ + "docker-compose.yaml") + os.environ["DOCKER_COMPOSE_FILE"] = FILE SCALE = 3 INCREASED_SCALE = 5 @@ -222,4 +232,4 @@ def test_three_dns_isolate_threescmfailure(run_second_phase): count_closed_container_datanodes = filter( lambda x: x == 'CLOSED', all_datanodes_container_status) assert len(count_closed_container_datanodes) == 3, \ - "The container should have three closed replicas." \ No newline at end of file + "The container should have three closed replicas." diff --git a/hadoop-ozone/dist/src/main/blockade/test_blockade_mixed_failure_two_nodes.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_two_nodes.py similarity index 92% rename from hadoop-ozone/dist/src/main/blockade/test_blockade_mixed_failure_two_nodes.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_two_nodes.py index b8df2fa54bbe1..03da7d03ed530 100644 --- a/hadoop-ozone/dist/src/main/blockade/test_blockade_mixed_failure_two_nodes.py +++ b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_two_nodes.py @@ -19,13 +19,23 @@ import time import logging import re +from os import environ from blockadeUtils.blockade import Blockade from clusterUtils.cluster_utils import ClusterUtils logger = logging.getLogger(__name__) -parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -FILE = os.path.join(parent_dir, "compose", "ozoneblockade", - "docker-compose.yaml") +if "MAVEN_TEST" in os.environ: + compose_dir = environ.get("MAVEN_TEST") + FILE = os.path.join(compose_dir, "docker-compose.yaml") +elif "OZONE_HOME" in os.environ: + compose_dir = environ.get("OZONE_HOME") + FILE = os.path.join(compose_dir, "compose", "ozoneblockade", \ + "docker-compose.yaml") +else: + parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + FILE = os.path.join(parent_dir, "compose", "ozoneblockade", \ + "docker-compose.yaml") + os.environ["DOCKER_COMPOSE_FILE"] = FILE SCALE = 3 INCREASED_SCALE = 5 @@ -170,4 +180,4 @@ def test_two_dns_isolate_scm_different_partition(run_second_phase): assert len(count_closed_container_datanodes) >= 3 _, output = \ ClusterUtils.run_freon(FILE, 1, 1, 1, 10240, "RATIS", "THREE") - assert re.search("Status: Success", output) is not None \ No newline at end of file + assert re.search("Status: Success", output) is not None diff --git a/hadoop-ozone/dist/src/main/blockade/test_blockade_scm_isolation.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_scm_isolation.py similarity index 91% rename from hadoop-ozone/dist/src/main/blockade/test_blockade_scm_isolation.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_scm_isolation.py index 06f42637469cb..47bbb76b76c7d 100644 --- a/hadoop-ozone/dist/src/main/blockade/test_blockade_scm_isolation.py +++ b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_scm_isolation.py @@ -19,13 +19,23 @@ import time import re import logging +from os import environ from blockadeUtils.blockade import Blockade from clusterUtils.cluster_utils import ClusterUtils logger = logging.getLogger(__name__) -parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -FILE = os.path.join(parent_dir, "compose", "ozoneblockade", - "docker-compose.yaml") +if "MAVEN_TEST" in os.environ: + compose_dir = environ.get("MAVEN_TEST") + FILE = os.path.join(compose_dir, "docker-compose.yaml") +elif "OZONE_HOME" in os.environ: + compose_dir = environ.get("OZONE_HOME") + FILE = os.path.join(compose_dir, "compose", "ozoneblockade", \ + "docker-compose.yaml") +else: + parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + FILE = os.path.join(parent_dir, "compose", "ozoneblockade", \ + "docker-compose.yaml") + os.environ["DOCKER_COMPOSE_FILE"] = FILE SCALE = 3 INCREASED_SCALE = 5 @@ -154,4 +164,4 @@ def test_scm_isolation_two_node(run_second_phase): assert len(closed_container_datanodes) >= 3 _, output = \ ClusterUtils.run_freon(FILE, 1, 1, 1, 10240, "RATIS", "THREE") - assert re.search("Status: Success", output) is not None \ No newline at end of file + assert re.search("Status: Success", output) is not None diff --git a/hadoop-ozone/dist/src/main/blockade/util.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/util.py similarity index 100% rename from hadoop-ozone/dist/src/main/blockade/util.py rename to hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/util.py diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-compose.yaml b/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-compose.yaml new file mode 100644 index 0000000000000..6c8e0fb78bed3 --- /dev/null +++ b/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-compose.yaml @@ -0,0 +1,50 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3" +services: + datanode: + image: ${user.name}/ozone:${project.version} + ports: + - 9864 + command: ["/opt/hadoop/bin/ozone","datanode"] + env_file: + - ./docker-config + om: + image: ${user.name}/ozone:${project.version} + ports: + - 9874:9874 + environment: + ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + env_file: + - ./docker-config + command: ["/opt/hadoop/bin/ozone","om"] + scm: + image: ${user.name}/ozone:${project.version} + ports: + - 9876:9876 + env_file: + - ./docker-config + environment: + ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + command: ["/opt/hadoop/bin/ozone","scm"] + ozone_client: + image: ${user.name}/ozone:${project.version} + ports: + - 9869 + command: ["tail", "-f","/etc/passwd"] + env_file: + - ./docker-config diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-config b/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-config new file mode 100644 index 0000000000000..1db1a798d3598 --- /dev/null +++ b/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-config @@ -0,0 +1,77 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +OZONE-SITE.XML_ozone.om.address=om +OZONE-SITE.XML_ozone.om.http-address=om:9874 +OZONE-SITE.XML_ozone.scm.names=scm +OZONE-SITE.XML_ozone.enabled=True +OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id +OZONE-SITE.XML_ozone.scm.block.client.address=scm +OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata +OZONE-SITE.XML_ozone.handler.type=distributed +OZONE-SITE.XML_ozone.scm.client.address=scm +OZONE-SITE.XML_ozone.scm.dead.node.interval=5m +OZONE-SITE.XML_ozone.replication=1 +OZONE-SITE.XML_hdds.datanode.dir=/data/hdds +HDFS-SITE.XML_rpc.metrics.quantile.enable=true +HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 +LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout +LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender +LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR +LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN +LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR + +#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. +#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm + +#LOG4J2.PROPERTIES_* are for Ozone Audit Logging +LOG4J2.PROPERTIES_monitorInterval=30 +LOG4J2.PROPERTIES_filter=read,write +LOG4J2.PROPERTIES_filter.read.type=MarkerFilter +LOG4J2.PROPERTIES_filter.read.marker=READ +LOG4J2.PROPERTIES_filter.read.onMatch=DENY +LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL +LOG4J2.PROPERTIES_filter.write.type=MarkerFilter +LOG4J2.PROPERTIES_filter.write.marker=WRITE +LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL +LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL +LOG4J2.PROPERTIES_appenders=console, rolling +LOG4J2.PROPERTIES_appender.console.type=Console +LOG4J2.PROPERTIES_appender.console.name=STDOUT +LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout +LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n +LOG4J2.PROPERTIES_appender.rolling.type=RollingFile +LOG4J2.PROPERTIES_appender.rolling.name=RollingFile +LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log +LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz +LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout +LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n +LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies +LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy +LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 +LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy +LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB +LOG4J2.PROPERTIES_loggers=audit +LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger +LOG4J2.PROPERTIES_logger.audit.name=OMAudit +LOG4J2.PROPERTIES_logger.audit.level=INFO +LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling +LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile +LOG4J2.PROPERTIES_rootLogger.level=INFO +LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout +LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/fault-injection-test/pom.xml b/hadoop-ozone/fault-injection-test/pom.xml new file mode 100644 index 0000000000000..395c5340bf15c --- /dev/null +++ b/hadoop-ozone/fault-injection-test/pom.xml @@ -0,0 +1,35 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-ozone + 0.5.0-SNAPSHOT + + hadoop-ozone-fault-injection-test + 0.5.0-SNAPSHOT + Apache Hadoop Ozone Fault Injection Tests + Apache Hadoop Ozone Fault Injection Tests + pom + + + network-tests + + + diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 2d80c3a284c79..4a6df28534959 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -53,6 +53,7 @@ ozone-recon-codegen upgrade csi + fault-injection-test @@ -137,7 +138,6 @@ ${ozone.version} test-jar - org.apache.hadoop hadoop-hdds-common diff --git a/pom.ozone.xml b/pom.ozone.xml index 413ca3bb4e3a2..ff841bd6ddcb4 100644 --- a/pom.ozone.xml +++ b/pom.ozone.xml @@ -18,7 +18,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 4.0.0 org.apache.hadoop hadoop-main-ozone - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT Apache Hadoop Ozone Main Apache Hadoop Ozone Main pom From 829848ba2e3e04e3b7bf5a02e0379470eec0809e Mon Sep 17 00:00:00 2001 From: Huan-Ping Su Date: Thu, 6 Jun 2019 12:49:20 +0100 Subject: [PATCH 0129/1308] HADOOP-16344. Make DurationInfo public unstable. Contributed by Huan-Ping Su Change-Id: I64a94cf382f9db78b4ef49a3912f25f0d906af7c --- .../src/main/java/org/apache/hadoop/util/DurationInfo.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java index 9dd75db27c733..2a1b78d0cf00b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java @@ -29,8 +29,8 @@ * This allows it to be used in a try-with-resources clause, and have the * duration automatically logged. */ -@InterfaceAudience.Private -@InterfaceStability.Unstable +@Public +@Unstable public class DurationInfo extends OperationDuration implements AutoCloseable { private final String text; From 9fded678ffcda12b980ab8f01914168de38a8c73 Mon Sep 17 00:00:00 2001 From: supratimdeka <46919641+supratimdeka@users.noreply.github.com> Date: Thu, 6 Jun 2019 18:53:37 +0530 Subject: [PATCH 0130/1308] HDDS-1621. writeData in ChunkUtils should not use AsynchronousFileChannel. Contributed by Supratim Deka (#917) --- .../keyvalue/helpers/ChunkUtils.java | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java index 2781bfacca10e..2993bbb81af09 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java @@ -43,6 +43,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.AsynchronousFileChannel; +import java.nio.channels.FileChannel; import java.nio.channels.FileLock; import java.nio.file.StandardOpenOption; import java.security.NoSuchAlgorithmException; @@ -84,23 +85,20 @@ public static void writeData(File chunkFile, ChunkInfo chunkInfo, throw new StorageContainerException(err, INVALID_WRITE_SIZE); } - AsynchronousFileChannel file = null; + FileChannel file = null; FileLock lock = null; try { long writeTimeStart = Time.monotonicNow(); - file = sync ? - AsynchronousFileChannel.open(chunkFile.toPath(), - StandardOpenOption.CREATE, - StandardOpenOption.WRITE, - StandardOpenOption.SPARSE, - StandardOpenOption.SYNC) : - AsynchronousFileChannel.open(chunkFile.toPath(), + + // skip SYNC and DSYNC to reduce contention on file.lock + file = FileChannel.open(chunkFile.toPath(), StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.SPARSE); - lock = file.lock().get(); - int size = file.write(data, chunkInfo.getOffset()).get(); + + lock = file.lock(); + int size = file.write(data, chunkInfo.getOffset()); // Increment volumeIO stats here. volumeIOStats.incWriteTime(Time.monotonicNow() - writeTimeStart); volumeIOStats.incWriteOpCount(); @@ -128,6 +126,10 @@ public static void writeData(File chunkFile, ChunkInfo chunkInfo, } if (file != null) { try { + if (sync) { + // ensure data and metadata is persisted. Outside the lock + file.force(true); + } file.close(); } catch (IOException e) { throw new StorageContainerException("Error closing chunk file", From 09763925025a3709e6098186348e1afd80cb9f71 Mon Sep 17 00:00:00 2001 From: Weiwei Yang Date: Thu, 6 Jun 2019 19:55:03 +0800 Subject: [PATCH 0131/1308] YARN-9590. Correct incompatible, incomplete and redundant activities. Contributed by Tao Yang. --- .../scheduler/activities/ActivitiesLogger.java | 6 +++--- .../scheduler/activities/ActivitiesManager.java | 4 +++- .../scheduler/capacity/CapacityScheduler.java | 6 +++++- .../resourcemanager/scheduler/capacity/LeafQueue.java | 3 +++ .../allocator/AbstractContainerAllocator.java | 11 ++++------- .../capacity/allocator/RegularContainerAllocator.java | 8 +++++++- .../resourcemanager/webapp/dao/AppAllocationInfo.java | 6 ++++++ .../webapp/TestRMWebServicesSchedulerActivities.java | 4 ++-- ...vicesSchedulerActivitiesWithMultiNodesEnabled.java | 6 +++--- 9 files changed, 36 insertions(+), 18 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java index 58b6c613c9692..e698d1a4d0039 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java @@ -102,20 +102,20 @@ public static void recordAppActivityWithoutAllocation( // Add application-container activity into specific node allocation. activitiesManager.addSchedulingActivityForNode(nodeId, requestName, null, - priorityStr, ActivityState.SKIPPED, diagnostic, type, + priorityStr, appState, diagnostic, type, null); type = "request"; // Add application-container activity into specific node allocation. activitiesManager.addSchedulingActivityForNode(nodeId, application.getApplicationId().toString(), requestName, - priorityStr, ActivityState.SKIPPED, + priorityStr, appState, ActivityDiagnosticConstant.EMPTY, type, allocationRequestId); } // Add queue-application activity into specific node allocation. activitiesManager.addSchedulingActivityForNode(nodeId, application.getQueueName(), application.getApplicationId().toString(), - application.getPriority().toString(), ActivityState.SKIPPED, + application.getPriority().toString(), appState, schedulerKey != null ? ActivityDiagnosticConstant.EMPTY : diagnostic, "app", null); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java index b8ef263747477..2c314727c9d55 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java @@ -339,8 +339,10 @@ void finishAppAllocationRecording(ApplicationId applicationId, appAllocations = curAppAllocations; } } - if (appAllocations.size() == appActivitiesMaxQueueLength) { + int curQueueLength = appAllocations.size(); + while (curQueueLength >= appActivitiesMaxQueueLength) { appAllocations.poll(); + --curQueueLength; } appAllocations.add(appAllocation); Long stopTime = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 57ee69026f64d..e59abee6b51f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -1597,7 +1597,7 @@ private void allocateFromReservedContainer(FiCaSchedulerNode node, } else{ ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, node, queue.getParent().getQueueName(), queue.getQueueName(), - ActivityState.ACCEPTED, ActivityDiagnosticConstant.EMPTY); + ActivityState.SKIPPED, ActivityDiagnosticConstant.EMPTY); ActivitiesLogger.NODE.finishAllocatedNodeAllocation(activitiesManager, node, reservedContainer.getContainerId(), AllocationState.SKIPPED); } @@ -1687,6 +1687,10 @@ private CSAssignment allocateContainersOnMultiNodes( } LOG.debug("This node or this node partition doesn't have available or " + "killable resource"); + ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, null, + "", getRootQueue().getQueueName(), ActivityState.REJECTED, + ActivityDiagnosticConstant.NOT_ABLE_TO_ACCESS_PARTITION + " " + + candidates.getPartition()); return null; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index ca61dc6bad9c5..a178f9e9a0b4e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -1188,6 +1188,9 @@ public CSAssignment assignContainers(Resource clusterResource, application.updateNodeInfoForAMDiagnostics(node); } else if (assignment.getSkippedType() == CSAssignment.SkippedType.QUEUE_LIMIT) { + ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, node, + getParent().getQueueName(), getQueueName(), ActivityState.SKIPPED, + ActivityDiagnosticConstant.QUEUE_SKIPPED_HEADROOM); return assignment; } else{ // If we don't allocate anything, and it is not skipped by application, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java index 829625220e0e4..90b088efdfd43 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java @@ -109,16 +109,10 @@ protected CSAssignment getCSAssignmentFromAllocateResult( allocatedResource); if (rmContainer != null) { - ActivitiesLogger.APP.recordAppActivityWithAllocation( - activitiesManager, node, application, updatedContainer, - ActivityState.RE_RESERVED); ActivitiesLogger.APP.finishSkippedAppAllocationRecording( activitiesManager, application.getApplicationId(), ActivityState.SKIPPED, ActivityDiagnosticConstant.EMPTY); } else { - ActivitiesLogger.APP.recordAppActivityWithAllocation( - activitiesManager, node, application, updatedContainer, - ActivityState.RESERVED); ActivitiesLogger.APP.finishAllocatedAppAllocationRecording( activitiesManager, application.getApplicationId(), updatedContainer.getContainerId(), ActivityState.RESERVED, @@ -149,7 +143,7 @@ protected CSAssignment getCSAssignmentFromAllocateResult( node, application, updatedContainer, ActivityState.ALLOCATED); ActivitiesLogger.APP.finishAllocatedAppAllocationRecording( activitiesManager, application.getApplicationId(), - updatedContainer.getContainerId(), ActivityState.ACCEPTED, + updatedContainer.getContainerId(), ActivityState.ALLOCATED, ActivityDiagnosticConstant.EMPTY); // Update unformed resource @@ -162,6 +156,9 @@ protected CSAssignment getCSAssignmentFromAllocateResult( assignment.setSkippedType( CSAssignment.SkippedType.QUEUE_LIMIT); } + ActivitiesLogger.APP.finishSkippedAppAllocationRecording( + activitiesManager, application.getApplicationId(), + ActivityState.SKIPPED, ActivityDiagnosticConstant.EMPTY); } return assignment; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java index 1f9f6eb7ee78a..2643fd0b7a110 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java @@ -628,6 +628,12 @@ private ContainerAllocation assignContainer(Resource clusterResource, } } + ActivitiesLogger.APP.recordAppActivityWithoutAllocation( + activitiesManager, node, application, schedulerKey, + ActivityDiagnosticConstant.NOT_SUFFICIENT_RESOURCE + + getResourceDiagnostics(capability, availableForDC), + rmContainer == null ? + ActivityState.RESERVED : ActivityState.RE_RESERVED); ContainerAllocation result = new ContainerAllocation(null, pendingAsk.getPerAllocationResource(), AllocationState.RESERVED); result.containerNodeType = type; @@ -824,7 +830,7 @@ private ContainerAllocation allocate(Resource clusterResource, ActivityDiagnosticConstant. APPLICATION_PRIORITY_DO_NOT_NEED_RESOURCE); return new ContainerAllocation(reservedContainer, null, - AllocationState.QUEUE_SKIPPED); + AllocationState.PRIORITY_SKIPPED); } result = ContainerAllocation.PRIORITY_SKIPPED; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAllocationInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAllocationInfo.java index da2be57184ebf..6b0d86ba92b31 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAllocationInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAllocationInfo.java @@ -43,6 +43,7 @@ public class AppAllocationInfo { private long timestamp; private String dateTime; private String allocationState; + private String diagnostic; private List requestAllocation; AppAllocationInfo() { @@ -57,6 +58,7 @@ public class AppAllocationInfo { this.timestamp = allocation.getTime(); this.dateTime = new Date(allocation.getTime()).toString(); this.allocationState = allocation.getAppState().name(); + this.diagnostic = allocation.getDiagnostic(); Map> requestToActivityNodes = allocation.getAllocationAttempts().stream().collect(Collectors .groupingBy((e) -> e.getRequestPriority() + "_" + e @@ -96,4 +98,8 @@ public String getAllocationState() { public List getRequestAllocation() { return requestAllocation; } + + public String getDiagnostic() { + return diagnostic; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java index 7650f7acf1f0c..1e08f05e13414 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java @@ -460,7 +460,7 @@ public void testAppActivityJSON() throws Exception { //Check app activities verifyNumberOfAllocations(json, 1); JSONObject allocations = json.getJSONObject("allocations"); - verifyStateOfAllocations(allocations, "allocationState", "ACCEPTED"); + verifyStateOfAllocations(allocations, "allocationState", "ALLOCATED"); //Check request allocation JSONObject requestAllocationObj = allocations.getJSONObject("requestAllocation"); @@ -527,7 +527,7 @@ public void testAppAssignMultipleContainersPerNodeHeartbeat() JSONArray allocations = json.getJSONArray("allocations"); for (int i = 0; i < allocations.length(); i++) { verifyStateOfAllocations(allocations.getJSONObject(i), - "allocationState", "ACCEPTED"); + "allocationState", "ALLOCATED"); } } finally { rm.stop(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivitiesWithMultiNodesEnabled.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivitiesWithMultiNodesEnabled.java index 525925bb4d221..8383a0d28c617 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivitiesWithMultiNodesEnabled.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivitiesWithMultiNodesEnabled.java @@ -277,7 +277,7 @@ public void testAppAssignContainer() throws Exception { verifyNumberOfAllocations(json, 1); JSONObject allocationObj = json.getJSONObject("allocations"); - verifyStateOfAllocations(allocationObj, "allocationState", "ACCEPTED"); + verifyStateOfAllocations(allocationObj, "allocationState", "ALLOCATED"); JSONObject requestAllocationObj = allocationObj.getJSONObject("requestAllocation"); verifyNumberOfAllocationAttempts(requestAllocationObj, 2); @@ -437,7 +437,7 @@ public void testAppInsufficientResourceDiagnostic() throws Exception { } // check second activity is for first allocation with ALLOCATED state allocationObj = allocationArray.getJSONObject(1); - verifyStateOfAllocations(allocationObj, "allocationState", "ACCEPTED"); + verifyStateOfAllocations(allocationObj, "allocationState", "ALLOCATED"); requestAllocationObj = allocationObj.getJSONObject("requestAllocation"); verifyNumberOfAllocationAttempts(requestAllocationObj, 1); verifyStateOfAllocations(requestAllocationObj, "allocationState", @@ -610,7 +610,7 @@ public void testAppGroupByDiagnostics() throws Exception { } // check second activity is for first allocation with ALLOCATED state allocationObj = allocationArray.getJSONObject(1); - verifyStateOfAllocations(allocationObj, "allocationState", "ACCEPTED"); + verifyStateOfAllocations(allocationObj, "allocationState", "ALLOCATED"); requestAllocationObj = allocationObj.getJSONObject("requestAllocation"); verifyNumberOfAllocationAttempts(requestAllocationObj, 1); verifyStateOfAllocations(requestAllocationObj, "allocationState", From 36757ad6a91b04221b2bf0f77e9dff701c186b2f Mon Sep 17 00:00:00 2001 From: S O'Donnell Date: Thu, 6 Jun 2019 15:57:40 +0200 Subject: [PATCH 0132/1308] HDDS-1645. Change the version of Pico CLI to the latest 3.x release - 3.9.6 Closes #909 --- hadoop-hdds/pom.xml | 2 +- .../java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 581dce04c2375..0e87c2cc8db9f 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -239,7 +239,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> info.picocli picocli - 3.5.2 + 3.9.6 com.google.protobuf diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java index 9c68ab1cec72a..93a001977acec 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java @@ -631,7 +631,8 @@ public void testListVolume() throws Exception { err.reset(); args = new String[] {"volume", "list", url + "/", "--user", user2, "--length", "invalid-length"}; - executeWithError(shell, args, "For input string: \"invalid-length\""); + executeWithError(shell, args, "Invalid value for option " + + "'--length': 'invalid-length' is not an int"); } @Test From 1f244b422793f01738355cf9511f363e03c94249 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Thu, 6 Jun 2019 17:49:12 +0100 Subject: [PATCH 0133/1308] Revert "HADOOP-16344. Make DurationInfo public unstable." This reverts commit 829848ba2e3e04e3b7bf5a02e0379470eec0809e. Change-Id: Ied91250e191b2ba701a8fc697c78b3756ce76be8 --- .../src/main/java/org/apache/hadoop/util/DurationInfo.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java index 2a1b78d0cf00b..9dd75db27c733 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java @@ -29,8 +29,8 @@ * This allows it to be used in a try-with-resources clause, and have the * duration automatically logged. */ -@Public -@Unstable +@InterfaceAudience.Private +@InterfaceStability.Unstable public class DurationInfo extends OperationDuration implements AutoCloseable { private final String text; From 0b8a2608e011eda144db85473a2733742bf0a5e9 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Thu, 6 Jun 2019 10:13:36 -0700 Subject: [PATCH 0134/1308] HDDS-1605. Implement AuditLogging for OM HA Bucket write requests. (#867) --- .../hadoop/ozone/audit/AuditLogger.java | 10 ++++ .../apache/hadoop/ozone/om/OzoneManager.java | 4 ++ .../ozone/om/request/OMClientRequest.java | 39 ++++++++++++++- .../ozone/om/request/RequestAuditor.java | 50 +++++++++++++++++++ .../request/bucket/OMBucketCreateRequest.java | 38 ++++++++++---- .../request/bucket/OMBucketDeleteRequest.java | 40 ++++++++++++--- .../bucket/OMBucketSetPropertyRequest.java | 44 ++++++++++++---- .../bucket/TestOMBucketCreateRequest.java | 7 +++ .../bucket/TestOMBucketDeleteRequest.java | 7 +++ .../TestOMBucketSetPropertyRequest.java | 8 ++- 10 files changed, 217 insertions(+), 30 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java index 9357774313bcd..ee6f45dadb4c4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java @@ -73,4 +73,14 @@ public void logReadFailure(AuditMessage msg) { msg.getThrowable()); } + public void logWrite(AuditMessage auditMessage) { + if (auditMessage.getThrowable() == null) { + this.logger.logIfEnabled(FQCN, Level.INFO, WRITE_MARKER, auditMessage, + auditMessage.getThrowable()); + } else { + this.logger.logIfEnabled(FQCN, Level.ERROR, WRITE_MARKER, auditMessage, + auditMessage.getThrowable()); + } + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 8ea8e2ca4b8d6..b3357e1668ac9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -2458,6 +2458,10 @@ private Map buildAuditMap(String volume){ return auditMap; } + public AuditLogger getAuditLogger() { + return AUDIT; + } + @Override public AuditMessage buildAuditMessageForSuccess(AuditAction op, Map auditMap) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 7650dbd9079d0..e0c17c4a4dbd8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -20,11 +20,18 @@ import java.io.IOException; import java.net.InetAddress; +import java.util.LinkedHashMap; +import java.util.Map; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.audit.AuditAction; +import org.apache.hadoop.ozone.audit.AuditEventStatus; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -41,7 +48,7 @@ * OMClientRequest provides methods which every write OM request should * implement. */ -public abstract class OMClientRequest { +public abstract class OMClientRequest implements RequestAuditor { private final OMRequest omRequest; @@ -173,4 +180,34 @@ protected OMResponse createErrorOMResponse(OMResponse.Builder omResponse, return omResponse.build(); } + /** + * Log the auditMessage. + * @param auditLogger + * @param auditMessage + */ + protected void auditLog(AuditLogger auditLogger, AuditMessage auditMessage) { + auditLogger.logWrite(auditMessage); + } + + @Override + public AuditMessage buildAuditMessage(AuditAction op, + Map< String, String > auditMap, Throwable throwable, + OzoneManagerProtocolProtos.UserInfo userInfo) { + return new AuditMessage.Builder() + .setUser(userInfo != null ? userInfo.getUserName() : null) + .atIp(userInfo != null ? userInfo.getRemoteAddress() : null) + .forOperation(op.getAction()) + .withParams(auditMap) + .withResult(throwable != null ? AuditEventStatus.FAILURE.toString() : + AuditEventStatus.SUCCESS.toString()) + .withException(throwable) + .build(); + } + + @Override + public Map buildVolumeAuditMap(String volume) { + Map auditMap = new LinkedHashMap<>(); + auditMap.put(OzoneConsts.VOLUME, volume); + return auditMap; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java new file mode 100644 index 0000000000000..731b8011f3d1a --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request; + +import java.util.Map; + +import org.apache.hadoop.ozone.audit.AuditAction; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +/** + * Interface for OM Requests to convert to audit objects. + */ +public interface RequestAuditor { + + /** + * Build AuditMessage. + * @param op + * @param auditMap + * @param throwable + * @param userInfo + * @return + */ + AuditMessage buildAuditMessage(AuditAction op, + Map auditMap, Throwable throwable, + OzoneManagerProtocolProtos.UserInfo userInfo); + + /** + * Build auditMap with specified volume. + * @param volume + * @return auditMap. + */ + Map buildVolumeAuditMap(String volume); +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 885b4c290e0c7..5970e5de5a77d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -28,6 +28,8 @@ import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; @@ -117,6 +119,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OzoneManagerProtocolProtos.Status.OK); OmBucketInfo omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo); + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + try { // check Acl if (ozoneManager.getAclsEnabled()) { @@ -128,6 +133,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, LOG.error("Bucket creation failed for bucket:{} in volume:{}", bucketName, volumeName, ex); omMetrics.incNumBucketCreateFails(); + auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_BUCKET, + omBucketInfo.toAuditMap(), ex, userInfo)); return new OMBucketCreateResponse(omBucketInfo, createErrorOMResponse(omResponse, ex)); } @@ -135,6 +142,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, String volumeKey = metadataManager.getVolumeKey(volumeName); String bucketKey = metadataManager.getBucketKey(volumeName, bucketName); + IOException exception = null; metadataManager.getLock().acquireVolumeLock(volumeName); metadataManager.getLock().acquireBucketLock(volumeName, bucketName); try { @@ -152,27 +160,35 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMException.ResultCodes.BUCKET_ALREADY_EXISTS); } - LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName); - omMetrics.incNumBuckets(); - // Update table cache. metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex)); - // return response. + + } catch (IOException ex) { + exception = ex; + } finally { + metadataManager.getLock().releaseBucketLock(volumeName, bucketName); + metadataManager.getLock().releaseVolumeLock(volumeName); + } + + // Performing audit logging outside of the lock. + auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_BUCKET, + omBucketInfo.toAuditMap(), exception, userInfo)); + + // return response. + if (exception == null) { + LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName); + omMetrics.incNumBuckets(); omResponse.setCreateBucketResponse( CreateBucketResponse.newBuilder().build()); return new OMBucketCreateResponse(omBucketInfo, omResponse.build()); - - } catch (IOException ex) { + } else { omMetrics.incNumBucketCreateFails(); LOG.error("Bucket creation failed for bucket:{} in volume:{}", - bucketName, volumeName, ex); + bucketName, volumeName, exception); return new OMBucketCreateResponse(omBucketInfo, - createErrorOMResponse(omResponse, ex)); - } finally { - metadataManager.getLock().releaseBucketLock(volumeName, bucketName); - metadataManager.getLock().releaseVolumeLock(volumeName); + createErrorOMResponse(omResponse, exception)); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java index 7d17b5e98df11..853bb7293b6c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java @@ -19,14 +19,16 @@ package org.apache.hadoop.ozone.om.request.bucket; import java.io.IOException; +import java.util.Map; import com.google.common.base.Optional; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; @@ -35,6 +37,8 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse; import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .DeleteBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -71,6 +75,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setStatus(OzoneManagerProtocolProtos.Status.OK) .setCmdType(omRequest.getCmdType()); + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + Map auditMap = buildVolumeAuditMap(volumeName); + auditMap.put(OzoneConsts.BUCKET, bucketName); + + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + + try { // check Acl if (ozoneManager.getAclsEnabled()) { @@ -82,10 +94,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omMetrics.incNumBucketDeleteFails(); LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName, volumeName, ex); + auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_BUCKET, + auditMap, ex, userInfo)); return new OMBucketDeleteResponse(volumeName, bucketName, createErrorOMResponse(omResponse, ex)); } + IOException exception = null; // acquire lock omMetadataManager.getLock().acquireBucketLock(volumeName, bucketName); try { @@ -113,20 +128,29 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, new CacheKey<>(bucketKey), new CacheValue<>(Optional.absent(), transactionLogIndex)); - // return response. + } catch (IOException ex) { + exception = ex; + } finally { + omMetadataManager.getLock().releaseBucketLock(volumeName, bucketName); + } + + // Performing audit logging outside of the lock. + auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_BUCKET, + auditMap, exception, userInfo)); + + // return response. + if (exception == null) { + LOG.debug("Deleted bucket:{} in volume:{}", bucketName, volumeName); omResponse.setDeleteBucketResponse( DeleteBucketResponse.newBuilder().build()); return new OMBucketDeleteResponse(volumeName, bucketName, omResponse.build()); - - } catch (IOException ex) { + } else { omMetrics.incNumBucketDeleteFails(); LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName, - volumeName, ex); + volumeName, exception); return new OMBucketDeleteResponse(volumeName, bucketName, - createErrorOMResponse(omResponse, ex)); - } finally { - omMetadataManager.getLock().releaseBucketLock(volumeName, bucketName); + createErrorOMResponse(omResponse, exception)); } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 8866d9c731c9d..4c7057cfe2014 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -22,6 +22,9 @@ import java.util.List; import com.google.common.base.Optional; +import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,6 +51,8 @@ .OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .SetBucketPropertyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .SetBucketPropertyResponse; import org.apache.hadoop.utils.db.cache.CacheKey; @@ -68,6 +73,11 @@ public OMBucketSetPropertyRequest(OMRequest omRequest) { public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex) { + SetBucketPropertyRequest setBucketPropertyRequest = + getOmRequest().getSetBucketPropertyRequest(); + + Preconditions.checkNotNull(setBucketPropertyRequest); + OMMetrics omMetrics = ozoneManager.getOmMetrics(); // This will never be null, on a real Ozone cluster. For tests this might @@ -80,8 +90,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - BucketArgs bucketArgs = - getOmRequest().getSetBucketPropertyRequest().getBucketArgs(); + BucketArgs bucketArgs = setBucketPropertyRequest.getBucketArgs(); OmBucketArgs omBucketArgs = OmBucketArgs.getFromProtobuf(bucketArgs); String volumeName = bucketArgs.getVolumeName(); @@ -93,6 +102,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OzoneManagerProtocolProtos.Status.OK); OmBucketInfo omBucketInfo = null; + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + try { // check Acl if (ozoneManager.getAclsEnabled()) { @@ -104,12 +116,16 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, if (omMetrics != null) { omMetrics.incNumBucketUpdateFails(); } + auditLog(auditLogger, buildAuditMessage(OMAction.UPDATE_BUCKET, + omBucketArgs.toAuditMap(), ex, userInfo)); LOG.error("Setting bucket property failed for bucket:{} in volume:{}", bucketName, volumeName, ex); return new OMBucketSetPropertyResponse(omBucketInfo, createErrorOMResponse(omResponse, ex)); } + IOException exception = null; + // acquire lock omMetadataManager.getLock().acquireBucketLock(volumeName, bucketName); @@ -170,21 +186,31 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, new CacheKey<>(bucketKey), new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex)); - // return response. + } catch (IOException ex) { + exception = ex; + } finally { + omMetadataManager.getLock().releaseBucketLock(volumeName, bucketName); + } + + // Performing audit logging outside of the lock. + auditLog(auditLogger, buildAuditMessage(OMAction.UPDATE_BUCKET, + omBucketArgs.toAuditMap(), exception, userInfo)); + + // return response. + if (exception == null) { + LOG.debug("Setting bucket property for bucket:{} in volume:{}", + bucketName, volumeName); omResponse.setSetBucketPropertyResponse( SetBucketPropertyResponse.newBuilder().build()); return new OMBucketSetPropertyResponse(omBucketInfo, omResponse.build()); - - } catch (IOException ex) { + } else { if (omMetrics != null) { omMetrics.incNumBucketUpdateFails(); } LOG.error("Setting bucket property failed for bucket:{} in volume:{}", - bucketName, volumeName, ex); + bucketName, volumeName, exception); return new OMBucketSetPropertyResponse(omBucketInfo, - createErrorOMResponse(omResponse, ex)); - } finally { - omMetadataManager.getLock().releaseBucketLock(volumeName, bucketName); + createErrorOMResponse(omResponse, exception)); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java index 181643536b509..4485c834d1288 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java @@ -30,6 +30,8 @@ import org.mockito.Mockito; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; @@ -48,6 +50,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.Time; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.when; /** @@ -61,6 +64,7 @@ public class TestOMBucketCreateRequest { private OzoneManager ozoneManager; private OMMetrics omMetrics; private OMMetadataManager omMetadataManager; + private AuditLogger auditLogger; @Before @@ -73,6 +77,9 @@ public void setup() throws Exception { omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + auditLogger = Mockito.mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); } @After diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java index d594e0a8b9a73..8e0b1a8fdabf6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java @@ -30,6 +30,8 @@ import org.mockito.Mockito; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; @@ -43,6 +45,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.when; /** @@ -56,6 +59,7 @@ public class TestOMBucketDeleteRequest { private OzoneManager ozoneManager; private OMMetrics omMetrics; private OMMetadataManager omMetadataManager; + private AuditLogger auditLogger; @Before @@ -68,6 +72,9 @@ public void setup() throws Exception { omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + auditLogger = Mockito.mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); } @After diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java index fc3f04990832e..2f9612a79fea9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java @@ -30,6 +30,8 @@ import org.mockito.Mockito; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; @@ -45,6 +47,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .SetBucketPropertyRequest; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.when; /** @@ -59,7 +62,7 @@ public class TestOMBucketSetPropertyRequest { private OzoneManager ozoneManager; private OMMetrics omMetrics; private OMMetadataManager omMetadataManager; - + private AuditLogger auditLogger; @Before public void setup() throws Exception { @@ -71,6 +74,9 @@ public void setup() throws Exception { omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + auditLogger = Mockito.mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); } @After From 944adc61b1830388d520d4052fc7eb6c7ba2790d Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 6 Jun 2019 10:20:28 -0700 Subject: [PATCH 0135/1308] HDFS-14527. Stop all DataNodes may result in NN terminate. Contributed by He Xiaoqiao. --- .../BlockPlacementPolicyDefault.java | 4 +- ...BlockPlacementPolicyRackFaultTolerant.java | 4 +- .../TestRedundancyMonitor.java | 108 ++++++++++++++++++ 3 files changed, 114 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRedundancyMonitor.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 6fed8a18f0af1..1320c80cde3eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -348,7 +348,9 @@ protected int[] getMaxNodesPerRack(int numOfChosen, int numOfReplicas) { } // No calculation needed when there is only one rack or picking one node. int numOfRacks = clusterMap.getNumOfRacks(); - if (numOfRacks == 1 || totalNumOfReplicas <= 1) { + // HDFS-14527 return default when numOfRacks = 0 to avoid + // ArithmeticException when calc maxNodesPerRack at following logic. + if (numOfRacks <= 1 || totalNumOfReplicas <= 1) { return new int[] {numOfReplicas, totalNumOfReplicas}; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java index 95c5c880a9a0e..b204450491a7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java @@ -43,7 +43,9 @@ protected int[] getMaxNodesPerRack(int numOfChosen, int numOfReplicas) { } // No calculation needed when there is only one rack or picking one node. int numOfRacks = clusterMap.getNumOfRacks(); - if (numOfRacks == 1 || totalNumOfReplicas <= 1) { + // HDFS-14527 return default when numOfRacks = 0 to avoid + // ArithmeticException when calc maxNodesPerRack at following logic. + if (numOfRacks <= 1 || totalNumOfReplicas <= 1) { return new int[] {numOfReplicas, totalNumOfReplicas}; } // If more racks than replicas, put one replica per rack. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRedundancyMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRedundancyMonitor.java new file mode 100644 index 0000000000000..0667e2611b4f2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRedundancyMonitor.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.TestBlockStoragePolicy; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import static org.apache.hadoop.fs.contract.hdfs.HDFSContract.BLOCK_SIZE; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; + +/** + * This class tests RedundancyMonitor in BlockManager. + */ +public class TestRedundancyMonitor { + private static final String FILENAME = "/dummyfile.txt"; + + /** + * RedundancyMonitor invoke choose target out of global lock when + * #computeDatanodeWork. However it may result in NN terminate when choose + * target meet runtime exception(ArithmeticException) since we stop all + * DataNodes during that time. + * Verify that NN should not terminate even stop all datanodes. + */ + @Test + public void testChooseTargetWhenAllDataNodesStop() throws Throwable { + + HdfsConfiguration conf = new HdfsConfiguration(); + String[] hosts = new String[]{"host1", "host2"}; + String[] racks = new String[]{"/d1/r1", "/d1/r1"}; + try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(conf) + .racks(racks).hosts(hosts).numDataNodes(hosts.length).build()) { + miniCluster.waitActive(); + + FSNamesystem fsn = miniCluster.getNamesystem(); + BlockManager blockManager = fsn.getBlockManager(); + + BlockPlacementPolicyDefault replicator + = (BlockPlacementPolicyDefault) blockManager + .getBlockPlacementPolicy(); + Set dns = blockManager.getDatanodeManager() + .getDatanodes(); + + DelayAnswer delayer = new DelayAnswer(BlockPlacementPolicyDefault.LOG); + NetworkTopology clusterMap = replicator.clusterMap; + NetworkTopology spyClusterMap = spy(clusterMap); + replicator.clusterMap = spyClusterMap; + doAnswer(delayer).when(spyClusterMap).getNumOfRacks(); + + ExecutorService pool = Executors.newFixedThreadPool(2); + + // Trigger chooseTarget + Future chooseTargetFuture = pool.submit(() -> { + replicator.chooseTarget(FILENAME, 2, dns.iterator().next(), + new ArrayList(), false, null, BLOCK_SIZE, + TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null); + return null; + }); + + // Wait until chooseTarget calls NetworkTopology#getNumOfRacks + delayer.waitForCall(); + // Remove all DataNodes + Future stopDatanodesFuture = pool.submit(() -> { + for (DatanodeDescriptor dn : dns) { + spyClusterMap.remove(dn); + } + return null; + }); + // Wait stopDatanodesFuture run finish + stopDatanodesFuture.get(); + + // Allow chooseTarget to proceed + delayer.proceed(); + try { + chooseTargetFuture.get(); + } catch (ExecutionException ee) { + throw ee.getCause(); + } + } + } +} From c8276f3e7669c6fbafe3c6ce17cfa8d6c01431b0 Mon Sep 17 00:00:00 2001 From: Shweta Yakkali Date: Thu, 6 Jun 2019 11:06:48 -0700 Subject: [PATCH 0136/1308] HDDS-1201. Reporting Corruptions in Containers to SCM (#912) --- .../container/common/impl/HddsDispatcher.java | 2 +- .../container/common/interfaces/Handler.java | 2 +- .../container/keyvalue/KeyValueHandler.java | 12 +++++------ .../ozoneimpl/ContainerController.java | 4 ++-- .../ozoneimpl/ContainerScrubber.java | 20 +++++++++---------- 5 files changed, 19 insertions(+), 21 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 6f56b3c13d27c..30de8936fedb2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -302,7 +302,7 @@ private ContainerCommandResponseProto dispatchRequest( containerState == State.OPEN || containerState == State.CLOSING); // mark and persist the container state to be unhealthy try { - handler.markContainerUhealthy(container); + handler.markContainerUnhealthy(container); } catch (IOException ioe) { // just log the error here in case marking the container fails, // Return the actual failure response to the client diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java index 52d14dbbb4747..97413f4e274c2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java @@ -135,7 +135,7 @@ public abstract void markContainerForClose(Container container) * @param container container to update * @throws IOException in case of exception */ - public abstract void markContainerUhealthy(Container container) + public abstract void markContainerUnhealthy(Container container) throws IOException; /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 72f48fa3b5ec6..7249271b95c5d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -884,20 +884,20 @@ public Container importContainer(long containerID, long maxSize, @Override public void markContainerForClose(Container container) throws IOException { - State currentState = container.getContainerState(); // Move the container to CLOSING state only if it's OPEN - if (currentState == State.OPEN) { + if (container.getContainerState() == State.OPEN) { container.markContainerForClose(); sendICR(container); } } @Override - public void markContainerUhealthy(Container container) + public void markContainerUnhealthy(Container container) throws IOException { - // this will mark the container unhealthy and a close container action will - // be sent from the dispatcher ton SCM to close down this container. - container.markContainerUnhealthy(); + if (container.getContainerState() != State.UNHEALTHY) { + container.markContainerUnhealthy(); + sendICR(container); + } } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java index 11cb8eeadd111..10cb330415fd7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java @@ -133,11 +133,11 @@ public void deleteContainer(final long containerId, boolean force) * @param container Container * @return handler of the container */ - Handler getHandler(final Container container) { + private Handler getHandler(final Container container) { return handlers.get(container.getContainerType()); } - Iterator getContainerSetIterator() { + public Iterator getContainers() { return containerSet.getContainerIterator(); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubber.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubber.java index d6f8b273d1b9d..380dc9e4ebad1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubber.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubber.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,7 +56,11 @@ public ContainerScrubber(OzoneConfiguration conf, LOG.info("Background ContainerScrubber starting up"); while (true) { - scrub(); + try { + scrub(); + } catch (StorageContainerException e) { + LOG.error("Scrubber encountered StorageContainerException."); + } if (this.halt) { break; // stop and exit if requested @@ -126,25 +129,20 @@ private void throttleScrubber(TimeStamp startTime) { } } - private void scrub() { - - Iterator containerIt = controller.getContainerSetIterator(); + private void scrub() throws StorageContainerException { + Iterator containerIt = controller.getContainers(); long count = 0; - while (containerIt.hasNext()) { + while (containerIt.hasNext() && !halt) { TimeStamp startTime = new TimeStamp(System.currentTimeMillis()); Container container = containerIt.next(); - Handler containerHandler = controller.getHandler(container); - - if (this.halt) { - break; // stop if requested - } try { container.check(); } catch (StorageContainerException e) { LOG.error("Error unexpected exception {} for Container {}", e, container.getContainerData().getContainerID()); + container.markContainerUnhealthy(); // XXX Action required here } count++; From fe069570d8962e6d679f38d0ca44a1838f2f287c Mon Sep 17 00:00:00 2001 From: avijayanhwx <14299376+avijayanhwx@users.noreply.github.com> Date: Thu, 6 Jun 2019 11:13:29 -0700 Subject: [PATCH 0137/1308] HDDS-1647 : Recon config tag does not show up on Ozone UI. (#914) --- hadoop-hdds/common/src/main/resources/ozone-default.xml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index d6379795c79fa..33f058486c7fe 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1196,13 +1196,14 @@ hadoop.tags.custom - OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY + OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM, + CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,RECON ozone.tags.system OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM, - CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,TOKEN,TLS + CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,TOKEN,TLS,RECON From 76c0183ae3f7feeed108925a929a2bcc0fd31658 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Thu, 6 Jun 2019 11:17:59 -0700 Subject: [PATCH 0138/1308] HDDS-1652. HddsDispatcher should not shutdown volumeSet. Contributed by Xiaoyu Yao. (#916) --- .../hadoop/ozone/container/common/impl/HddsDispatcher.java | 2 -- .../hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java | 5 +++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 30de8936fedb2..39e163e54e372 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -114,8 +114,6 @@ public void init() { @Override public void shutdown() { - // Shutdown the volumes - volumeSet.shutdown(); } /** diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java index 91d096815ca0f..c05ecb966bcd4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java @@ -87,6 +87,7 @@ public class BenchMarkDatanodeDispatcher { private List containers; private List keys; private List chunks; + private VolumeSet volumeSet; @Setup(Level.Trial) public void initialize() throws IOException { @@ -103,7 +104,7 @@ public void initialize() throws IOException { conf.set("dfs.datanode.data.dir", baseDir + File.separator + "data"); ContainerSet containerSet = new ContainerSet(); - VolumeSet volumeSet = new VolumeSet(datanodeUuid, conf); + volumeSet = new VolumeSet(datanodeUuid, conf); StateContext context = new StateContext( conf, DatanodeStates.RUNNING, null); ContainerMetrics metrics = ContainerMetrics.create(conf); @@ -161,7 +162,7 @@ public void initialize() throws IOException { @TearDown(Level.Trial) public void cleanup() throws IOException { - dispatcher.shutdown(); + volumeSet.shutdown(); FileUtils.deleteDirectory(new File(baseDir)); } From c7e6f076df5b38702579db352475113e5f3ae5fb Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Thu, 6 Jun 2019 11:20:04 -0700 Subject: [PATCH 0139/1308] HDDS-1650. Fix Ozone tests leaking volume checker thread. Contributed by Xiaoyu Yao. (#915) --- .../common/impl/TestHddsDispatcher.java | 9 ++++++--- .../TestRoundRobinVolumeChoosingPolicy.java | 13 ++++++++++++- .../container/common/volume/TestVolumeSet.java | 1 + .../common/volume/TestVolumeSetDiskChecks.java | 3 +++ .../container/keyvalue/TestKeyValueHandler.java | 7 ++++--- .../container/ozoneimpl/TestOzoneContainer.java | 16 +++++++++++----- 6 files changed, 37 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index d425820193698..54dbe94c1c212 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -73,13 +73,15 @@ public class TestHddsDispatcher { public void testContainerCloseActionWhenFull() throws IOException { String testDir = GenericTestUtils.getTempPath( TestHddsDispatcher.class.getSimpleName()); + OzoneConfiguration conf = new OzoneConfiguration(); + DatanodeDetails dd = randomDatanodeDetails(); + VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf); + try { UUID scmId = UUID.randomUUID(); - OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, testDir); - DatanodeDetails dd = randomDatanodeDetails(); ContainerSet containerSet = new ContainerSet(); - VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf); + DatanodeStateMachine stateMachine = Mockito.mock( DatanodeStateMachine.class); StateContext context = Mockito.mock(StateContext.class); @@ -118,6 +120,7 @@ public void testContainerCloseActionWhenFull() throws IOException { .addContainerActionIfAbsent(Mockito.any(ContainerAction.class)); } finally { + volumeSet.shutdown(); FileUtils.deleteDirectory(new File(testDir)); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java index 80594d3524536..d0fbf10269c4f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.util.ReflectionUtils; +import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -40,10 +41,12 @@ public class TestRoundRobinVolumeChoosingPolicy { private RoundRobinVolumeChoosingPolicy policy; private List volumes; + private VolumeSet volumeSet; private final String baseDir = MiniDFSCluster.getBaseDirectory(); private final String volume1 = baseDir + "disk1"; private final String volume2 = baseDir + "disk2"; + private static final String DUMMY_IP_ADDR = "0.0.0.0"; @Before @@ -53,10 +56,18 @@ public void setup() throws Exception { conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey); policy = ReflectionUtils.newInstance( RoundRobinVolumeChoosingPolicy.class, null); - VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); + volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); volumes = volumeSet.getVolumesList(); } + @After + public void cleanUp() { + if (volumeSet != null) { + volumeSet.shutdown(); + volumeSet = null; + } + } + @Test public void testRRVolumeChoosingPolicy() throws Exception { HddsVolume hddsVolume1 = volumes.get(0); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index f97ad4e873f7b..79eeb61495ae5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -237,6 +237,7 @@ public void testFailVolumes() throws Exception{ //Set back to writable try { readOnlyVolumePath.setWritable(true); + volSet.shutdown(); } finally { FileUtil.fullyDelete(volumePath); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index 472bb9891cb54..c5deff0fc7802 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -100,6 +100,7 @@ public void testOzoneDirsAreCreated() throws IOException { for (String d : dirs) { assertTrue(new File(d).isDirectory()); } + volumeSet.shutdown(); } /** @@ -124,6 +125,7 @@ HddsVolumeChecker getVolumeChecker(Configuration configuration) assertThat(volumeSet.getFailedVolumesList().size(), is(numBadVolumes)); assertThat(volumeSet.getVolumesList().size(), is(numVolumes - numBadVolumes)); + volumeSet.shutdown(); } /** @@ -146,6 +148,7 @@ HddsVolumeChecker getVolumeChecker(Configuration configuration) assertEquals(volumeSet.getFailedVolumesList().size(), numVolumes); assertEquals(volumeSet.getVolumesList().size(), 0); + volumeSet.shutdown(); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 8ef9e19d53712..2c71fef11a646 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -229,14 +229,14 @@ public void testHandlerCommandHandling() throws Exception { @Test public void testVolumeSetInKeyValueHandler() throws Exception{ File path = GenericTestUtils.getRandomizedTestDir(); + Configuration conf = new OzoneConfiguration(); + conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath()); + VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); try { - Configuration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath()); ContainerSet cset = new ContainerSet(); int[] interval = new int[1]; interval[0] = 2; ContainerMetrics metrics = new ContainerMetrics(interval); - VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class); DatanodeStateMachine stateMachine = Mockito.mock( DatanodeStateMachine.class); @@ -263,6 +263,7 @@ public void testVolumeSetInKeyValueHandler() throws Exception{ ex); } } finally { + volumeSet.shutdown(); FileUtil.fullyDelete(path); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index f5ebb49d06317..b0d3a0f3b7b57 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -82,13 +83,20 @@ public void setUp() throws Exception { conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().getAbsolutePath()); commitSpaceMap = new HashMap(); + volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf); + volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); + } + + @After + public void cleanUp() throws Exception { + if (volumeSet != null) { + volumeSet.shutdown(); + volumeSet = null; + } } @Test public void testBuildContainerMap() throws Exception { - volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf); - volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); - // Format the volumes for (HddsVolume volume : volumeSet.getVolumesList()) { volume.format(UUID.randomUUID().toString()); @@ -139,8 +147,6 @@ public void testBuildContainerMap() throws Exception { @Test public void testContainerCreateDiskFull() throws Exception { - volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf); - volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); long containerSize = (long) StorageUnit.MB.toBytes(100); boolean diskSpaceException = false; From e1dfc060f8f0247f97127c75c9284a068fc93907 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 6 Jun 2019 11:59:53 -0700 Subject: [PATCH 0140/1308] HDFS-14486. The exception classes in some throw statements do not accurately describe why they are thrown. Contributed by Ayush Saxena. --- .../hdfs/server/datanode/BlockReceiver.java | 3 +- .../hadoop/hdfs/server/datanode/DataNode.java | 3 +- .../checker/DatasetVolumeChecker.java | 10 +++-- .../checker/StorageLocationChecker.java | 8 ++-- .../fsdataset/impl/FsDatasetImpl.java | 4 +- .../TestDataNodeVolumeFailureToleration.java | 4 +- .../server/datanode/TestDatanodeRegister.java | 12 ++++++ .../checker/TestDatasetVolumeChecker.java | 37 +++++++++++++++++++ .../checker/TestStorageLocationChecker.java | 33 ++++++++++++++++- 9 files changed, 101 insertions(+), 13 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 950918404303c..dad964c3d6c05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -25,6 +25,7 @@ import java.io.DataOutputStream; import java.io.EOFException; import java.io.IOException; +import java.io.InterruptedIOException; import java.io.OutputStreamWriter; import java.io.Writer; import java.nio.ByteBuffer; @@ -1074,7 +1075,7 @@ void receiveBlock( responder.interrupt(); // do not throw if shutting down for restart. if (!datanode.isRestarting()) { - throw new IOException("Interrupted receiveBlock"); + throw new InterruptedIOException("Interrupted receiveBlock"); } } responder = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index f9ef83d934262..09c99fdfec18d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -96,6 +96,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurableBase; @@ -1420,7 +1421,7 @@ void startDataNode(List dataDirectories, int volsConfigured = dnConf.getVolsConfigured(); if (volFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT || volFailuresTolerated >= volsConfigured) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated + ". Value configured is either less than -1 or >= " + "to the number of configured volumes (" + volsConfigured + ")."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java index b4922875bdd90..91582fe0558a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java @@ -26,6 +26,8 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.MoreExecutors; import com.google.common.util.concurrent.ThreadFactoryBuilder; + +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -120,7 +122,7 @@ public DatasetVolumeChecker(Configuration conf, Timer timer) TimeUnit.MILLISECONDS); if (maxAllowedTimeForCheckMs <= 0) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - " + maxAllowedTimeForCheckMs + " (should be > 0)"); } @@ -137,7 +139,7 @@ public DatasetVolumeChecker(Configuration conf, Timer timer) TimeUnit.MILLISECONDS); if (minDiskCheckGapMs < 0) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY + " - " + minDiskCheckGapMs + " (should be >= 0)"); } @@ -148,7 +150,7 @@ public DatasetVolumeChecker(Configuration conf, Timer timer) TimeUnit.MILLISECONDS); if (diskCheckTimeout < 0) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - " + diskCheckTimeout + " (should be >= 0)"); } @@ -156,7 +158,7 @@ public DatasetVolumeChecker(Configuration conf, Timer timer) lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs; if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " + maxVolumeFailuresTolerated + " " + DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java index c5de065f4f7e0..0332bc8633950 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java @@ -23,6 +23,8 @@ import com.google.common.collect.Maps; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ThreadFactoryBuilder; + +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -93,7 +95,7 @@ public StorageLocationChecker(Configuration conf, Timer timer) TimeUnit.MILLISECONDS); if (maxAllowedTimeForCheckMs <= 0) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - " + maxAllowedTimeForCheckMs + " (should be > 0)"); } @@ -107,7 +109,7 @@ public StorageLocationChecker(Configuration conf, Timer timer) DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT); if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " + maxVolumeFailuresTolerated + " " + DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG); @@ -170,7 +172,7 @@ public List check( } if (maxVolumeFailuresTolerated >= dataDirs.size()) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " + maxVolumeFailuresTolerated + ". Value configured is >= " + "to the number of configured volumes (" + dataDirs.size() + ")."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 76110d68b88a2..f8507633fbf7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -48,6 +48,8 @@ import javax.management.StandardMBean; import com.google.common.annotations.VisibleForTesting; + +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -292,7 +294,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b) if (volFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT || volFailuresTolerated >= volsConfigured) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated + ". Value configured is either less than maxVolumeFailureLimit or greater than " + "to the number of configured volumes (" + volsConfigured + ")."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java index 825887c1af5f7..a9e4096df4b47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -36,7 +37,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -242,7 +242,7 @@ private void testVolumeConfig(int volumesTolerated, int volumesFailed, prepareDirToFail(dirs[i]); } restartDatanodes(volumesTolerated, manageDfsDirs); - } catch (DiskErrorException e) { + } catch (HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Invalid value configured for " + "dfs.datanode.failed.volumes.tolerated", e); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java index 95a361a65dd26..bffdaae369771 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import static org.mockito.Mockito.doReturn; @@ -36,6 +37,7 @@ import org.junit.Assert; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -162,4 +164,14 @@ public void testDNShutdwonBeforeRegister() throws Exception { e.getMessage()); } } + + @Test + public void testInvalidConfigurationValue() throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -2); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.failed.volumes.tolerated" + + " - -2 should be greater than or equal to -1", + () -> new DataNode(conf, new ArrayList<>(), null, null)); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java index 9a45e622748f7..9b0636fada19e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java @@ -20,6 +20,8 @@ import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; + +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdfs.server.datanode.fsdataset.*; @@ -44,6 +46,10 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY; import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.*; @@ -226,4 +232,35 @@ static List makeVolumes( } return volumes; } + + @Test + public void testInvalidConfigurationValues() throws Exception { + HdfsConfiguration conf = new HdfsConfiguration(); + conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, 0); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.disk.check.timeout" + + " - 0 (should be > 0)", + () -> new DatasetVolumeChecker(conf, new FakeTimer())); + conf.unset(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY); + + conf.setInt(DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY, -1); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.disk.check.min.gap" + + " - -1 (should be >= 0)", + () -> new DatasetVolumeChecker(conf, new FakeTimer())); + conf.unset(DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY); + + conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, -1); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.disk.check.timeout" + + " - -1 (should be > 0)", + () -> new DatasetVolumeChecker(conf, new FakeTimer())); + conf.unset(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY); + + conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -2); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.failed.volumes.tolerated" + + " - -2 should be greater than or equal to -1", + () -> new DatasetVolumeChecker(conf, new FakeTimer())); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java index 169a1b9333ffa..80f0396c6fc30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.checker; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; @@ -36,6 +37,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY; import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*; @@ -129,7 +131,7 @@ public void testBadConfiguration() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 3); - thrown.expect(IOException.class); + thrown.expect(HadoopIllegalArgumentException.class); thrown.expectMessage("Invalid value configured"); StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer()); @@ -214,4 +216,33 @@ public VolumeCheckResult answer(InvocationOnMock invocation) } return locations; } + + @Test + public void testInvalidConfigurationValues() throws Exception { + final List locations = + makeMockLocations(HEALTHY, HEALTHY, HEALTHY); + Configuration conf = new HdfsConfiguration(); + + conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 4); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.failed.volumes.tolerated" + + " - 4. Value configured is >= to the " + + "number of configured volumes (3).", + () -> new StorageLocationChecker(conf, new FakeTimer()).check(conf, + locations)); + conf.unset(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY); + + conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, 0); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.disk.check.timeout" + + " - 0 (should be > 0)", + () -> new StorageLocationChecker(conf, new FakeTimer())); + conf.unset(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY); + + conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -2); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.failed.volumes.tolerated" + + " - -2 should be greater than or equal to -1", + () -> new StorageLocationChecker(conf, new FakeTimer())); + } } From 8ca58efeece77dc7b0a2a792c1c29e3567ae02b6 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Fri, 7 Jun 2019 03:14:47 +0800 Subject: [PATCH 0141/1308] =?UTF-8?q?HDDS-1490.=20Support=20configurable?= =?UTF-8?q?=20container=20placement=20policy=20through=20'o=E2=80=A6=20(#9?= =?UTF-8?q?03)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 4 + .../algorithms/ContainerPlacementPolicy.java | 0 .../placement/algorithms/package-info.java | 21 +++ .../hdds/scm/net/NetworkTopologyImpl.java | 2 +- .../hadoop/hdds/scm/net/NodeSchemaLoader.java | 72 +++++---- .../hdds/scm/net/NodeSchemaManager.java | 11 +- .../src/main/resources/ozone-default.xml | 8 +- .../hdds/scm/net/TestNodeSchemaLoader.java | 8 +- .../hdds/scm/net/TestNodeSchemaManager.java | 2 +- .../hdds/scm/net/TestYamlSchemaLoader.java | 10 +- hadoop-hdds/server-scm/pom.xml | 5 + .../ContainerPlacementPolicyFactory.java | 67 +++++++++ .../SCMContainerPlacementCapacity.java | 4 +- .../SCMContainerPlacementRandom.java | 4 +- .../hdds/scm/exceptions/SCMException.java | 3 +- .../scm/server/StorageContainerManager.java | 20 ++- .../scm/container/TestReplicationManager.java | 5 +- .../TestContainerPlacementFactory.java | 142 ++++++++++++++++++ .../TestSCMContainerPlacementCapacity.java | 2 +- .../TestSCMContainerPlacementRackAware.java | 39 ++++- .../TestSCMContainerPlacementRandom.java | 2 +- .../placement/TestContainerPlacement.java | 6 +- hadoop-ozone/integration-test/pom.xml | 8 + hadoop-ozone/ozone-manager/pom.xml | 8 + 24 files changed, 380 insertions(+), 73 deletions(-) rename hadoop-hdds/{server-scm => common}/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java (100%) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 4a423588f5f28..c91d1c1e83b91 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -312,6 +312,10 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY = "ozone.scm.container.placement.impl"; + public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_DEFAULT = + "org.apache.hadoop.hdds.scm.container.placement.algorithms." + + "SCMContainerPlacementRackAware"; + public static final String OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT = "ozone.scm.pipeline.owner.container.count"; public static final int OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT = 3; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java similarity index 100% rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java new file mode 100644 index 0000000000000..dac4752fe66fa --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.container.placement.algorithms; +/** + Contains container placement policy interface definition. + **/ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index d0b295f717875..8613ed7116a35 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -732,7 +732,7 @@ public String toString() { try { // print the number of leaves int numOfLeaves = clusterTree.getNumOfLeaves(); - tree.append("Expected number of leaves:"); + tree.append("Number of leaves:"); tree.append(numOfLeaves); tree.append("\n"); // print all nodes diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java index 32d7f16a9969d..3e1a7109621fc 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.net; +import org.apache.commons.io.FilenameUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.w3c.dom.Document; @@ -31,7 +32,10 @@ import javax.xml.parsers.ParserConfigurationException; import java.io.File; import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -93,23 +97,50 @@ public List getSchemaList() { } /** - * Load user defined network layer schemas from a XML configuration file. + * Load user defined network layer schemas from a XML/YAML configuration file. * @param schemaFilePath path of schema file * @return all valid node schemas defined in schema file */ - public NodeSchemaLoadResult loadSchemaFromXml(String schemaFilePath) - throws IllegalArgumentException { + public NodeSchemaLoadResult loadSchemaFromFile(String schemaFilePath) + throws IllegalArgumentException, FileNotFoundException { try { File schemaFile = new File(schemaFilePath); if (!schemaFile.exists()) { - String msg = "Network topology layer schema file " + schemaFilePath + - " is not found."; + // try to load with classloader + ClassLoader classloader = + Thread.currentThread().getContextClassLoader(); + if (classloader == null) { + classloader = NodeSchemaLoader.class.getClassLoader(); + } + if (classloader != null) { + URL url = classloader.getResource(schemaFilePath); + if (url != null) { + schemaFile = new File(url.toURI()); + } + } + } + + if (!schemaFile.exists()) { + String msg = "Network topology layer schema file " + + schemaFilePath + "[" + schemaFile.getAbsolutePath() + + "] is not found."; LOG.warn(msg); - throw new IllegalArgumentException(msg); + throw new FileNotFoundException(msg); } - return loadSchema(schemaFile); - } catch (ParserConfigurationException | IOException | SAXException e) { - throw new IllegalArgumentException("Fail to load network topology node" + + LOG.info("Load network topology schema file " + + schemaFile.getCanonicalPath()); + if (FilenameUtils.getExtension(schemaFilePath).toLowerCase() + .compareTo("yaml") == 0) { + return loadSchemaFromYaml(schemaFile); + } else { + return loadSchema(schemaFile); + } + } catch (FileNotFoundException e) { + throw e; + } catch (ParserConfigurationException | IOException | SAXException | + URISyntaxException e) { + throw new IllegalArgumentException("Failed to load network topology node" + " schema file: " + schemaFilePath + " , error:" + e.getMessage()); } } @@ -167,29 +198,6 @@ private NodeSchemaLoadResult loadSchema(File schemaFile) throws return schemaList; } - /** - * Load user defined network layer schemas from a YAML configuration file. - * @param schemaFilePath path of schema file - * @return all valid node schemas defined in schema file - */ - public NodeSchemaLoadResult loadSchemaFromYaml(String schemaFilePath) - throws IllegalArgumentException { - try { - File schemaFile = new File(schemaFilePath); - if (!schemaFile.exists()) { - String msg = "Network topology layer schema file " + schemaFilePath + - " is not found."; - LOG.warn(msg); - throw new IllegalArgumentException(msg); - } - return loadSchemaFromYaml(schemaFile); - } catch (Exception e) { - throw new IllegalArgumentException("Fail to load network topology node" - + " schema file: " + schemaFilePath + " , error:" - + e.getMessage()); - } - } - /** * Load network topology layer schemas from a YAML configuration file. * @param schemaFile schema file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java index 9a598c619242b..680c7be2d81b5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java @@ -19,7 +19,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.commons.io.FilenameUtils; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.net.NodeSchemaLoader.NodeSchemaLoadResult; import org.slf4j.Logger; @@ -63,20 +62,14 @@ public void init(Configuration conf) { String schemaFile = conf.get( ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT); - NodeSchemaLoadResult result; try { - if (FilenameUtils.getExtension(schemaFile).toLowerCase() - .compareTo("yaml") == 0) { - result = NodeSchemaLoader.getInstance().loadSchemaFromYaml(schemaFile); - } else { - result = NodeSchemaLoader.getInstance().loadSchemaFromXml(schemaFile); - } + result = NodeSchemaLoader.getInstance().loadSchemaFromFile(schemaFile); allSchema = result.getSchemaList(); enforcePrefix = result.isEnforePrefix(); maxLevel = allSchema.size(); } catch (Throwable e) { - String msg = "Fail to load schema file:" + schemaFile + String msg = "Failed to load schema file:" + schemaFile + ", error:" + e.getMessage(); LOG.error(msg); throw new RuntimeException(msg); diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 33f058486c7fe..178712944405b 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -815,11 +815,13 @@ ozone.scm.container.placement.impl - org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom + org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackAware OZONE, MANAGEMENT - Placement policy class for containers. - Defaults to SCMContainerPlacementRandom.class + + The full name of class which implements org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy. + The class decides which datanode will be used to host the container replica. If not set, + org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackAware will be used as default value. diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java index 30799b1099b37..0c20353a2ce0c 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java @@ -44,7 +44,7 @@ public TestNodeSchemaLoader(String schemaFile, String errMsg) { try { String filePath = classLoader.getResource( "./networkTopologyTestFiles/" + schemaFile).getPath(); - NodeSchemaLoader.getInstance().loadSchemaFromXml(filePath); + NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); fail("expect exceptions"); } catch (Throwable e) { assertTrue(e.getMessage().contains(errMsg)); @@ -83,7 +83,7 @@ public void testGood() { try { String filePath = classLoader.getResource( "./networkTopologyTestFiles/good.xml").getPath(); - NodeSchemaLoader.getInstance().loadSchemaFromXml(filePath); + NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); } catch (Throwable e) { fail("should succeed"); } @@ -94,10 +94,10 @@ public void testNotExist() { String filePath = classLoader.getResource( "./networkTopologyTestFiles/good.xml").getPath() + ".backup"; try { - NodeSchemaLoader.getInstance().loadSchemaFromXml(filePath); + NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); fail("should fail"); } catch (Throwable e) { - assertTrue(e.getMessage().contains("file " + filePath + " is not found")); + assertTrue(e.getMessage().contains("not found")); } } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java index 7e304190d6d1a..6698043727649 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java @@ -79,7 +79,7 @@ public void testInitFailure() { manager.init(conf); fail("should fail"); } catch (Throwable e) { - assertTrue(e.getMessage().contains("Fail to load schema file:" + + assertTrue(e.getMessage().contains("Failed to load schema file:" + filePath)); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java index 580a7fb485e80..c38bf388363cf 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java @@ -44,7 +44,7 @@ public TestYamlSchemaLoader(String schemaFile, String errMsg) { try { String filePath = classLoader.getResource( "./networkTopologyTestFiles/" + schemaFile).getPath(); - NodeSchemaLoader.getInstance().loadSchemaFromYaml(filePath); + NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); fail("expect exceptions"); } catch (Throwable e) { assertTrue(e.getMessage().contains(errMsg)); @@ -69,7 +69,7 @@ public void testGood() { try { String filePath = classLoader.getResource( "./networkTopologyTestFiles/good.yaml").getPath(); - NodeSchemaLoader.getInstance().loadSchemaFromYaml(filePath); + NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); } catch (Throwable e) { fail("should succeed"); } @@ -78,12 +78,12 @@ public void testGood() { @Test public void testNotExist() { String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.xml").getPath() + ".backup"; + "./networkTopologyTestFiles/good.yaml").getPath() + ".backup"; try { - NodeSchemaLoader.getInstance().loadSchemaFromXml(filePath); + NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); fail("should fail"); } catch (Throwable e) { - assertTrue(e.getMessage().contains("file " + filePath + " is not found")); + assertTrue(e.getMessage().contains("not found")); } } diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 9b2a8e2b98caf..b55a224a85116 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -141,5 +141,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + + ${basedir}/../../hadoop-hdds/common/src/main/resources + + diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java new file mode 100644 index 0000000000000..826c3d6fe1cfc --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.container.placement.algorithms; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.Constructor; + +/** + * A factory to create container placement instance based on configuration + * property ozone.scm.container.placement.classname. + */ +public final class ContainerPlacementPolicyFactory { + private static final Logger LOG = + LoggerFactory.getLogger(ContainerPlacementPolicyFactory.class); + + private ContainerPlacementPolicyFactory() { + } + + public static ContainerPlacementPolicy getPolicy(Configuration conf, + final NodeManager nodeManager, NetworkTopology clusterMap, + final boolean fallback) throws SCMException{ + final Class placementClass = conf + .getClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, + SCMContainerPlacementRackAware.class, + ContainerPlacementPolicy.class); + Constructor constructor; + try { + constructor = placementClass.getDeclaredConstructor(NodeManager.class, + Configuration.class, NetworkTopology.class, boolean.class); + } catch (NoSuchMethodException e) { + String msg = "Failed to find constructor(NodeManager, Configuration, " + + "NetworkTopology, boolean) for class " + + placementClass.getCanonicalName(); + LOG.error(msg); + throw new SCMException(msg, + SCMException.ResultCodes.FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY); + } + + try { + return constructor.newInstance(nodeManager, conf, clusterMap, fallback); + } catch (Exception e) { + throw new RuntimeException("Failed to instantiate class " + + placementClass.getCanonicalName() + " for " + e.getMessage()); + } + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java index daf8222606641..f2892ffb4cff8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.node.NodeManager; import com.google.common.annotations.VisibleForTesting; @@ -77,7 +78,8 @@ public final class SCMContainerPlacementCapacity extends SCMCommonPolicy { * @param conf Configuration */ public SCMContainerPlacementCapacity(final NodeManager nodeManager, - final Configuration conf) { + final Configuration conf, final NetworkTopology networkTopology, + final boolean fallback) { super(nodeManager, conf); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java index 48b613944202f..bcd7986ce577c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.slf4j.Logger; @@ -49,7 +50,8 @@ public final class SCMContainerPlacementRandom extends SCMCommonPolicy * @param conf Config */ public SCMContainerPlacementRandom(final NodeManager nodeManager, - final Configuration conf) { + final Configuration conf, final NetworkTopology networkTopology, + final boolean fallback) { super(nodeManager, conf); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java index 01166ad5a765b..ec75eec1f6318 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java @@ -119,6 +119,7 @@ public enum ResultCodes { DUPLICATE_DATANODE, NO_SUCH_DATANODE, NO_REPLICA_FOUND, - FAILED_TO_FIND_ACTIVE_PIPELINE + FAILED_TO_FIND_ACTIVE_PIPELINE, + FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index b13f2cb7cfd32..7cc5cbaf7f761 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -41,6 +41,9 @@ import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl; import org.apache.hadoop.hdds.scm.block.PendingDeleteHandler; import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicyFactory; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.safemode.SafeModeHandler; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler; @@ -53,7 +56,6 @@ import org.apache.hadoop.hdds.scm.container.IncrementalContainerReportHandler; import org.apache.hadoop.hdds.scm.container.SCMContainerManager; import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics; import org.apache.hadoop.hdds.scm.container.ReplicationManager; @@ -206,6 +208,11 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl private final SafeModeHandler safeModeHandler; private SCMContainerMetrics scmContainerMetrics; + /** + * Network topology Map. + */ + private NetworkTopology clusterMap; + /** * Creates a new StorageContainerManager. Configuration will be * updated with information on the actual listening addresses used @@ -277,14 +284,13 @@ public StorageContainerManager(OzoneConfiguration conf, securityProtocolServer = null; } - eventQueue = new EventQueue(); long watcherTimeout = conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT, HDDS_SCM_WATCHER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); commandWatcherLeaseManager = new LeaseManager<>("CommandWatcher", watcherTimeout); - initalizeSystemManagers(conf, configurator); + initializeSystemManagers(conf, configurator); CloseContainerEventHandler closeContainerHandler = new CloseContainerEventHandler(pipelineManager, containerManager); @@ -381,7 +387,7 @@ public StorageContainerManager(OzoneConfiguration conf, * used if needed. * @throws IOException - on Failure. */ - private void initalizeSystemManagers(OzoneConfiguration conf, + private void initializeSystemManagers(OzoneConfiguration conf, SCMConfigurator configurator) throws IOException { if(configurator.getScmNodeManager() != null) { @@ -391,9 +397,10 @@ private void initalizeSystemManagers(OzoneConfiguration conf, conf, scmStorageConfig.getClusterID(), this, eventQueue); } - //TODO: support configurable containerPlacement policy + clusterMap = new NetworkTopologyImpl(conf); ContainerPlacementPolicy containerPlacementPolicy = - new SCMContainerPlacementCapacity(scmNodeManager, conf); + ContainerPlacementPolicyFactory.getPolicy(conf, scmNodeManager, + clusterMap, true); if (configurator.getPipelineManager() != null) { pipelineManager = configurator.getPipelineManager(); @@ -1205,7 +1212,6 @@ public Map getContainerStateCount() { return nodeStateCount; } - /** * Returns the SCM metadata Store. * @return SCMMetadataStore diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java index 35fd1088124c2..6a0e16326d25a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java @@ -97,8 +97,9 @@ public void setup() throws IOException, InterruptedException { Mockito.when(containerPlacementPolicy.chooseDatanodes( Mockito.anyListOf(DatanodeDetails.class), - Mockito.anyListOf(DatanodeDetails.class), Mockito.anyInt(), - Mockito.anyLong())).thenAnswer(invocation -> { + Mockito.anyListOf(DatanodeDetails.class), + Mockito.anyInt(), Mockito.anyLong())) + .thenAnswer(invocation -> { int count = (int) invocation.getArguments()[2]; return IntStream.range(0, count) .mapToObj(i -> randomDatanodeDetails()) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java new file mode 100644 index 0000000000000..ae709b10db53c --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java @@ -0,0 +1,142 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.container.placement.algorithms; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; +import org.apache.hadoop.hdds.scm.net.NodeSchema; +import org.apache.hadoop.hdds.scm.net.NodeSchemaManager; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.when; + +/** + * Test for scm container placement factory. + */ +public class TestContainerPlacementFactory { + // network topology cluster + private NetworkTopology cluster; + // datanodes array list + private List datanodes = new ArrayList<>(); + // node storage capacity + private final long storageCapacity = 100L; + // configuration + private Configuration conf; + // node manager + private NodeManager nodeManager; + + @Before + public void setup() { + //initialize network topology instance + conf = new OzoneConfiguration(); + NodeSchema[] schemas = new NodeSchema[] + {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}; + NodeSchemaManager.getInstance().init(schemas, true); + cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance()); + + // build datanodes, and network topology + String rack = "/rack"; + String hostname = "node"; + for (int i = 0; i < 15; i++) { + // Totally 3 racks, each has 5 datanodes + DatanodeDetails node = TestUtils.createDatanodeDetails( + hostname + i, rack + (i / 5)); + datanodes.add(node); + cluster.add(node); + } + + // create mock node manager + nodeManager = Mockito.mock(NodeManager.class); + when(nodeManager.getNodes(NodeState.HEALTHY)) + .thenReturn(new ArrayList<>(datanodes)); + when(nodeManager.getNodeStat(anyObject())) + .thenReturn(new SCMNodeMetric(storageCapacity, 0L, 100L)); + when(nodeManager.getNodeStat(datanodes.get(2))) + .thenReturn(new SCMNodeMetric(storageCapacity, 90L, 10L)); + when(nodeManager.getNodeStat(datanodes.get(3))) + .thenReturn(new SCMNodeMetric(storageCapacity, 80L, 20L)); + when(nodeManager.getNodeStat(datanodes.get(4))) + .thenReturn(new SCMNodeMetric(storageCapacity, 70L, 30L)); + } + + + @Test + public void testDefaultPolicy() throws IOException { + ContainerPlacementPolicy policy = ContainerPlacementPolicyFactory + .getPolicy(conf, nodeManager, cluster, true); + + int nodeNum = 3; + List datanodeDetails = + policy.chooseDatanodes(null, null, nodeNum, 15); + Assert.assertEquals(nodeNum, datanodeDetails.size()); + Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), + datanodeDetails.get(1))); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), + datanodeDetails.get(2))); + Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1), + datanodeDetails.get(2))); + } + + /** + * A dummy container placement implementation for test. + */ + public class DummyImpl implements ContainerPlacementPolicy { + @Override + public List chooseDatanodes( + List excludedNodes, List favoredNodes, + int nodesRequired, long sizeRequired) throws IOException { + return null; + } + } + + @Test(expected = SCMException.class) + public void testConstuctorNotFound() throws SCMException { + // set a placement class which does't have the right constructor implemented + conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, + "org.apache.hadoop.hdds.scm.container.placement.algorithms." + + "TestContainerPlacementFactory$DummyImpl"); + ContainerPlacementPolicyFactory.getPolicy(conf, null, null, true); + } + + @Test(expected = RuntimeException.class) + public void testClassNotImplemented() throws SCMException { + // set a placement class not implemented + conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, + "org.apache.hadoop.hdds.scm.container.placement.algorithm.HelloWorld"); + ContainerPlacementPolicyFactory.getPolicy(conf, null, null, true); + } +} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java index fb2a4c33dfcf7..2c58dc28fda8a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java @@ -64,7 +64,7 @@ public void chooseDatanodes() throws SCMException { .thenReturn(new SCMNodeMetric(100L, 70L, 30L)); SCMContainerPlacementCapacity scmContainerPlacementRandom = - new SCMContainerPlacementCapacity(mockNodeManager, conf); + new SCMContainerPlacementCapacity(mockNodeManager, conf, null, true); List existingNodes = new ArrayList<>(); existingNodes.add(datanodes.get(0)); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index 732178e499c07..e63b09e528e37 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.hdds.scm.container.placement.algorithms; +import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -23,6 +24,7 @@ import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.net.NodeSchema; @@ -47,6 +49,8 @@ */ public class TestSCMContainerPlacementRackAware { private NetworkTopology cluster; + private Configuration conf; + private NodeManager nodeManager; private List datanodes = new ArrayList<>(); // policy with fallback capability private SCMContainerPlacementRackAware policy; @@ -58,7 +62,7 @@ public class TestSCMContainerPlacementRackAware { @Before public void setup() { //initialize network topology instance - Configuration conf = new OzoneConfiguration(); + conf = new OzoneConfiguration(); NodeSchema[] schemas = new NodeSchema[] {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}; NodeSchemaManager.getInstance().init(schemas, true); @@ -76,7 +80,7 @@ public void setup() { } // create mock node manager - NodeManager nodeManager = Mockito.mock(NodeManager.class); + nodeManager = Mockito.mock(NodeManager.class); when(nodeManager.getNodes(NodeState.HEALTHY)) .thenReturn(new ArrayList<>(datanodes)); when(nodeManager.getNodeStat(anyObject())) @@ -254,4 +258,35 @@ public void testNoInfiniteLoop() throws SCMException { // request storage space larger than node capability policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 15); } + + @Test + public void testDatanodeWithDefaultNetworkLocation() throws SCMException { + String hostname = "node"; + List dataList = new ArrayList<>(); + NetworkTopology clusterMap = + new NetworkTopologyImpl(NodeSchemaManager.getInstance()); + for (int i = 0; i < 15; i++) { + // Totally 3 racks, each has 5 datanodes + DatanodeDetails node = TestUtils.createDatanodeDetails( + hostname + i, null); + dataList.add(node); + clusterMap.add(node); + } + Assert.assertEquals(dataList.size(), StringUtils.countMatches( + clusterMap.toString(), NetConstants.DEFAULT_RACK)); + + // choose nodes to host 3 replica + int nodeNum = 3; + SCMContainerPlacementRackAware newPolicy = + new SCMContainerPlacementRackAware(nodeManager, conf, clusterMap, true); + List datanodeDetails = + newPolicy.chooseDatanodes(null, null, nodeNum, 15); + Assert.assertEquals(nodeNum, datanodeDetails.size()); + Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), + datanodeDetails.get(1))); + Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), + datanodeDetails.get(2))); + Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(1), + datanodeDetails.get(2))); + } } \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java index a20c6c019f195..adb30bcbec14b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java @@ -59,7 +59,7 @@ public void chooseDatanodes() throws SCMException { .thenReturn(new SCMNodeMetric(100L, 90L, 10L)); SCMContainerPlacementRandom scmContainerPlacementRandom = - new SCMContainerPlacementRandom(mockNodeManager, conf); + new SCMContainerPlacementRandom(mockNodeManager, conf, null, true); List existingNodes = new ArrayList<>(); existingNodes.add(datanodes.get(0)); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java index bd62111cf1e40..62e1108f4a4ad 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java @@ -80,9 +80,11 @@ public void testCapacityPlacementYieldsBetterDataDistribution() throws .getStandardDeviation(), 0.001); SCMContainerPlacementCapacity capacityPlacer = new - SCMContainerPlacementCapacity(nodeManagerCapacity, new Configuration()); + SCMContainerPlacementCapacity(nodeManagerCapacity, new Configuration(), + null, true); SCMContainerPlacementRandom randomPlacer = new - SCMContainerPlacementRandom(nodeManagerRandom, new Configuration()); + SCMContainerPlacementRandom(nodeManagerRandom, new Configuration(), + null, true); for (int x = 0; x < opsCount; x++) { long containerSize = random.nextInt(100) * OzoneConsts.GB; diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index 3d4de8ef5b253..ba53e3fb43954 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -126,4 +126,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-jar + + + + + ${basedir}/../../hadoop-hdds/common/src/main/resources + + + diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 304f8851da57d..0f5ae75a8cdad 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -124,5 +124,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + + ${basedir}/../../hadoop-hdds/common/src/main/resources + + + ${basedir}/src/test/resources + + From cb9bc6e64c590622ae04aea2c81962be59037f7a Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Thu, 6 Jun 2019 16:41:58 -0400 Subject: [PATCH 0142/1308] YARN-9581. Fixed yarn logs cli to access RM2. Contributed by Prabhu Joseph --- .../hadoop/yarn/client/cli/LogsCLI.java | 44 ++++++++++++------- .../hadoop/yarn/client/cli/SchedConfCLI.java | 26 ++++++----- .../hadoop/yarn/webapp/util/WebAppUtils.java | 39 ++++++++++++++-- .../yarn/webapp/util/YarnWebServiceUtils.java | 29 ++++++++---- .../yarn/conf/TestYarnConfiguration.java | 6 +++ 5 files changed, 107 insertions(+), 37 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java index 96007f41a3d10..2b5439bf53e38 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java @@ -25,6 +25,7 @@ import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.UniformInterfaceException; import com.sun.jersey.api.client.WebResource; +import com.sun.jersey.api.client.WebResource.Builder; import com.sun.jersey.api.client.filter.ClientFilter; import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory; import com.sun.jersey.client.urlconnection.URLConnectionClientHandler; @@ -157,6 +158,9 @@ public HttpURLConnection getHttpURLConnection(URL url) if (yarnClient != null) { yarnClient.close(); } + if (webServiceClient != null) { + webServiceClient.destroy(); + } } } @@ -420,24 +424,34 @@ private void printHelpMessage(Options options) { } protected List getAMContainerInfoForRMWebService( - Configuration conf, String appId) throws ClientHandlerException, - UniformInterfaceException, JSONException { - String webAppAddress = WebAppUtils.getRMWebAppURLWithScheme(conf); - - WebResource webResource = webServiceClient.resource(webAppAddress); + Configuration conf, String appId) throws Exception { + return WebAppUtils.execOnActiveRM(conf, this::getAMContainerInfoFromRM, + appId); + } - ClientResponse response = - webResource.path("ws").path("v1").path("cluster").path("apps") - .path(appId).path("appattempts").accept(MediaType.APPLICATION_JSON) - .get(ClientResponse.class); - JSONObject json = - response.getEntity(JSONObject.class).getJSONObject("appAttempts"); - JSONArray requests = json.getJSONArray("appAttempt"); + private List getAMContainerInfoFromRM( + String webAppAddress, String appId) throws ClientHandlerException, + UniformInterfaceException, JSONException { List amContainersList = new ArrayList(); - for (int i = 0; i < requests.length(); i++) { - amContainersList.add(requests.getJSONObject(i)); + ClientResponse response = null; + try { + Builder builder = webServiceClient.resource(webAppAddress) + .path("ws").path("v1").path("cluster") + .path("apps").path(appId).path("appattempts") + .accept(MediaType.APPLICATION_JSON); + response = builder.get(ClientResponse.class); + JSONObject json = response.getEntity(JSONObject.class) + .getJSONObject("appAttempts"); + JSONArray requests = json.getJSONArray("appAttempt"); + for (int j = 0; j < requests.length(); j++) { + amContainersList.add(requests.getJSONObject(j)); + } + return amContainersList; + } finally { + if (response != null) { + response.close(); + } } - return amContainersList; } private List getAMContainerInfoForAHSWebService( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java index a5f3b80c50baa..be54553a0c8fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java @@ -21,13 +21,14 @@ import com.google.common.annotations.VisibleForTesting; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; -import com.sun.jersey.api.client.WebResource; +import com.sun.jersey.api.client.WebResource.Builder; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.MissingArgumentException; import org.apache.commons.cli.Options; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.util.Tool; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -131,18 +132,22 @@ public int run(String[] args) throws Exception { return -1; } + Configuration conf = getConf(); + return WebAppUtils.execOnActiveRM(conf, + this::updateSchedulerConfOnRMNode, updateInfo); + } + + private int updateSchedulerConfOnRMNode(String webAppAddress, + SchedConfUpdateInfo updateInfo) throws Exception { Client webServiceClient = Client.create(); - WebResource webResource = webServiceClient - .resource(WebAppUtils.getRMWebAppURLWithScheme(getConf())); ClientResponse response = null; - try { - response = - webResource.path("ws").path("v1").path("cluster") - .path("scheduler-conf").accept(MediaType.APPLICATION_JSON) - .entity(YarnWebServiceUtils.toJson(updateInfo, - SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON) - .put(ClientResponse.class); + Builder builder = webServiceClient.resource(webAppAddress) + .path("ws").path("v1").path("cluster") + .path("scheduler-conf").accept(MediaType.APPLICATION_JSON); + builder.entity(YarnWebServiceUtils.toJson(updateInfo, + SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON); + response = builder.put(ClientResponse.class); if (response != null) { if (response.getStatus() == Status.OK.getStatusCode()) { System.out.println("Configuration changed successfully."); @@ -163,6 +168,7 @@ public int run(String[] args) throws Exception { } } + @VisibleForTesting void addQueues(String args, SchedConfUpdateInfo updateInfo) { if (args == null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java index e62bf104ae4ce..5b1c3bb3b7dcf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java @@ -90,8 +90,33 @@ public static void setNMWebAppHostNameAndPort(Configuration conf, } } + /** + * Runs a certain function against the active RM. The function's first + * argument is expected to be a string which contains the address of + * the RM being tried. + */ + public static R execOnActiveRM(Configuration conf, + ThrowingBiFunction func, T arg) throws Exception { + String rm1Address = getRMWebAppURLWithScheme(conf, 0); + try { + return func.apply(rm1Address, arg); + } catch (Exception e) { + if (HAUtil.isHAEnabled(conf)) { + String rm2Address = getRMWebAppURLWithScheme(conf, 1); + return func.apply(rm2Address, arg); + } + throw e; + } + } + + /** A BiFunction which throws on Exception. */ + @FunctionalInterface + public interface ThrowingBiFunction { + R apply(T t, U u) throws Exception; + } + public static String getRMWebAppURLWithoutScheme(Configuration conf, - boolean isHAEnabled) { + boolean isHAEnabled, int haIdIndex) { YarnConfiguration yarnConfig = new YarnConfiguration(conf); // set RM_ID if we have not configure it. if (isHAEnabled) { @@ -99,7 +124,7 @@ public static String getRMWebAppURLWithoutScheme(Configuration conf, if (rmId == null || rmId.isEmpty()) { List rmIds = new ArrayList<>(HAUtil.getRMHAIds(conf)); if (rmIds != null && !rmIds.isEmpty()) { - yarnConfig.set(YarnConfiguration.RM_HA_ID, rmIds.get(0)); + yarnConfig.set(YarnConfiguration.RM_HA_ID, rmIds.get(haIdIndex)); } } } @@ -120,13 +145,19 @@ public static String getRMWebAppURLWithoutScheme(Configuration conf, } } + public static String getRMWebAppURLWithScheme(Configuration conf, + int haIdIndex) { + return getHttpSchemePrefix(conf) + getRMWebAppURLWithoutScheme( + conf, HAUtil.isHAEnabled(conf), haIdIndex); + } + public static String getRMWebAppURLWithScheme(Configuration conf) { return getHttpSchemePrefix(conf) + getRMWebAppURLWithoutScheme( - conf, HAUtil.isHAEnabled(conf)); + conf, HAUtil.isHAEnabled(conf), 0); } public static String getRMWebAppURLWithoutScheme(Configuration conf) { - return getRMWebAppURLWithoutScheme(conf, false); + return getRMWebAppURLWithoutScheme(conf, false, 0); } public static String getRouterWebAppURLWithScheme(Configuration conf) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java index e7bca2ca0f2ac..fccb3e1415f8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java @@ -21,7 +21,7 @@ import com.sun.jersey.api.client.ClientHandlerException; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.UniformInterfaceException; -import com.sun.jersey.api.client.WebResource; +import com.sun.jersey.api.client.WebResource.Builder; import javax.ws.rs.core.MediaType; import com.sun.jersey.api.json.JSONJAXBContext; @@ -53,16 +53,29 @@ private YarnWebServiceUtils() {} public static JSONObject getNodeInfoFromRMWebService(Configuration conf, String nodeId) throws ClientHandlerException, UniformInterfaceException { - Client webServiceClient = Client.create(); - String webAppAddress = WebAppUtils.getRMWebAppURLWithScheme(conf); - - WebResource webResource = webServiceClient.resource(webAppAddress); + try { + return WebAppUtils.execOnActiveRM(conf, + YarnWebServiceUtils::getNodeInfoFromRM, nodeId); + } catch (Exception e) { + if (e instanceof ClientHandlerException) { + throw ((ClientHandlerException) e); + } else if (e instanceof UniformInterfaceException) { + throw ((UniformInterfaceException) e); + } else { + throw new RuntimeException(e); + } + } + } + private static JSONObject getNodeInfoFromRM(String webAppAddress, + String nodeId) throws ClientHandlerException, UniformInterfaceException { + Client webServiceClient = Client.create(); ClientResponse response = null; try { - response = webResource.path("ws").path("v1").path("cluster") - .path("nodes").path(nodeId).accept(MediaType.APPLICATION_JSON) - .get(ClientResponse.class); + Builder builder = webServiceClient.resource(webAppAddress) + .path("ws").path("v1").path("cluster") + .path("nodes").path(nodeId).accept(MediaType.APPLICATION_JSON); + response = builder.get(ClientResponse.class); return response.getEntity(JSONObject.class); } finally { if (response != null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java index a053fdb9376a5..212e09c02e90f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java @@ -56,6 +56,12 @@ public void testDefaultRMWebUrl() throws Exception { conf2.set("yarn.resourcemanager.hostname.rm2", "40.40.40.40"); String rmWebUrlinHA2 = WebAppUtils.getRMWebAppURLWithScheme(conf2); Assert.assertEquals("http://30.30.30.30:8088", rmWebUrlinHA2); + + rmWebUrlinHA2 = WebAppUtils.getRMWebAppURLWithScheme(conf2, 0); + Assert.assertEquals("http://30.30.30.30:8088", rmWebUrlinHA2); + + rmWebUrlinHA2 = WebAppUtils.getRMWebAppURLWithScheme(conf2, 1); + Assert.assertEquals("http://40.40.40.40:8088", rmWebUrlinHA2); } @Test From 3ea4f41d9f944274db3e44ab04118e781c1e00d2 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 6 Jun 2019 16:36:34 +0900 Subject: [PATCH 0143/1308] MAPREDUCE-6794. Remove unused properties from TTConfig.java --- .../src/site/markdown/DeprecatedProperties.md | 20 ----- .../server/tasktracker/TTConfig.java | 73 +------------------ .../hadoop/mapreduce/util/ConfigUtil.java | 41 ----------- .../resources/job_1329348432655_0001_conf.xml | 13 ---- .../src/main/data/2jobs2min-rumen-jh.json | 26 ------- 5 files changed, 2 insertions(+), 171 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md index 8ec7c14343865..abeacafa27826 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md @@ -65,7 +65,6 @@ The following table lists the configuration property names that are deprecated i | fs.s3a.server-side-encryption-key | fs.s3a.server-side-encryption.key | | hadoop.configured.node.mapping | net.topology.configured.node.mapping | | hadoop.native.lib | io.native.lib.available | -| hadoop.net.static.resolutions | mapreduce.tasktracker.net.static.resolutions | | hadoop.pipes.command-file.keep | mapreduce.pipes.commandfile.preserve | | hadoop.pipes.executable.interpretor | mapreduce.pipes.executable.interpretor | | hadoop.pipes.executable | mapreduce.pipes.executable | @@ -89,7 +88,6 @@ The following table lists the configuration property names that are deprecated i | keep.failed.task.files | mapreduce.task.files.preserve.failedtasks | | keep.task.files.pattern | mapreduce.task.files.preserve.filepattern | | key.value.separator.in.input.line | mapreduce.input.keyvaluelinerecordreader.key.value.separator | -| local.cache.size | mapreduce.tasktracker.cache.local.size | | map.input.file | mapreduce.map.input.file | | map.input.length | mapreduce.map.input.length | | map.input.start | mapreduce.map.input.start | @@ -113,10 +111,6 @@ The following table lists the configuration property names that are deprecated i | mapred.compress.map.output | mapreduce.map.output.compress | | mapred.data.field.separator | mapreduce.fieldsel.data.field.separator | | mapred.debug.out.lines | mapreduce.task.debugout.lines | -| mapred.healthChecker.interval | mapreduce.tasktracker.healthchecker.interval | -| mapred.healthChecker.script.args | mapreduce.tasktracker.healthchecker.script.args | -| mapred.healthChecker.script.path | mapreduce.tasktracker.healthchecker.script.path | -| mapred.healthChecker.script.timeout | mapreduce.tasktracker.healthchecker.script.timeout | | mapred.inmem.merge.threshold | mapreduce.reduce.merge.inmem.threshold | | mapred.input.dir.formats | mapreduce.input.multipleinputs.dir.formats | | mapred.input.dir.mappers | mapreduce.input.multipleinputs.dir.mappers | @@ -146,8 +140,6 @@ The following table lists the configuration property names that are deprecated i | mapred.line.input.format.linespermap | mapreduce.input.lineinputformat.linespermap | | mapred.linerecordreader.maxlength | mapreduce.input.linerecordreader.line.maxlength | | mapred.local.dir | mapreduce.cluster.local.dir | -| mapred.local.dir.minspacekill | mapreduce.tasktracker.local.dir.minspacekill | -| mapred.local.dir.minspacestart | mapreduce.tasktracker.local.dir.minspacestart | | mapred.map.child.env | mapreduce.map.env | | mapred.map.child.java.opts | mapreduce.map.java.opts | | mapred.map.child.log.level | mapreduce.map.log.level | @@ -212,19 +204,10 @@ The following table lists the configuration property names that are deprecated i | mapred.task.profile.params | mapreduce.task.profile.params | | mapred.task.profile.reduces | mapreduce.task.profile.reduces | | mapred.task.timeout | mapreduce.task.timeout | -| mapred.tasktracker.dns.interface | mapreduce.tasktracker.dns.interface | -| mapred.tasktracker.dns.nameserver | mapreduce.tasktracker.dns.nameserver | -| mapred.tasktracker.events.batchsize | mapreduce.tasktracker.events.batchsize | -| mapred.task.tracker.http.address | mapreduce.tasktracker.http.address | | mapred.tasktracker.indexcache.mb | mapreduce.tasktracker.indexcache.mb | -| mapred.tasktracker.instrumentation | mapreduce.tasktracker.instrumentation | | mapred.tasktracker.map.tasks.maximum | mapreduce.tasktracker.map.tasks.maximum | | mapred.tasktracker.memory\_calculator\_plugin | mapreduce.tasktracker.resourcecalculatorplugin | | mapred.tasktracker.memorycalculatorplugin | mapreduce.tasktracker.resourcecalculatorplugin | -| mapred.tasktracker.reduce.tasks.maximum | mapreduce.tasktracker.reduce.tasks.maximum | -| mapred.task.tracker.report.address | mapreduce.tasktracker.report.address | -| mapred.task.tracker.task-controller | mapreduce.tasktracker.taskcontroller | -| mapred.tasktracker.tasks.sleeptime-before-sigkill | mapreduce.tasktracker.tasks.sleeptimebeforesigkill | | mapred.temp.dir | mapreduce.cluster.temp.dir | | mapred.text.key.comparator.options | mapreduce.partition.keycomparator.options | | mapred.text.key.partitioner.options | mapreduce.partition.keypartitioner.options | @@ -251,9 +234,6 @@ The following table lists the configuration property names that are deprecated i | sequencefile.filter.regex | mapreduce.input.sequencefileinputfilter.regex | | session.id | dfs.metrics.session-id | | slave.host.name | dfs.datanode.hostname | -| slave.host.name | mapreduce.tasktracker.host.name | -| tasktracker.contention.tracking | mapreduce.tasktracker.contention.tracking | -| tasktracker.http.threads | mapreduce.tasktracker.http.threads | | topology.node.switch.mapping.impl | net.topology.node.switch.mapping.impl | | topology.script.file.name | net.topology.script.file.name | | topology.script.number.args | net.topology.script.number.args | diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java index 5556287dfb45b..f75ad05d29564 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java @@ -22,86 +22,17 @@ import org.apache.hadoop.mapreduce.MRConfig; /** - * Place holder for TaskTracker server-level configuration. - * - * The keys should have "mapreduce.tasktracker." as the prefix + * Place holder for MapReduce server-level configuration. + * (formerly TaskTracker configuration) */ @InterfaceAudience.Private @InterfaceStability.Evolving public interface TTConfig extends MRConfig { - // Task-tracker configuration properties - public static final String TT_HEALTH_CHECKER_INTERVAL = - "mapreduce.tasktracker.healthchecker.interval"; - public static final String TT_HEALTH_CHECKER_SCRIPT_ARGS = - "mapreduce.tasktracker.healthchecker.script.args"; - public static final String TT_HEALTH_CHECKER_SCRIPT_PATH = - "mapreduce.tasktracker.healthchecker.script.path"; - public static final String TT_HEALTH_CHECKER_SCRIPT_TIMEOUT = - "mapreduce.tasktracker.healthchecker.script.timeout"; - public static final String TT_LOCAL_DIR_MINSPACE_KILL = - "mapreduce.tasktracker.local.dir.minspacekill"; - public static final String TT_LOCAL_DIR_MINSPACE_START = - "mapreduce.tasktracker.local.dir.minspacestart"; - public static final String TT_HTTP_ADDRESS = - "mapreduce.tasktracker.http.address"; - public static final String TT_REPORT_ADDRESS = - "mapreduce.tasktracker.report.address"; - public static final String TT_TASK_CONTROLLER = - "mapreduce.tasktracker.taskcontroller"; - public static final String TT_CONTENTION_TRACKING = - "mapreduce.tasktracker.contention.tracking"; - public static final String TT_STATIC_RESOLUTIONS = - "mapreduce.tasktracker.net.static.resolutions"; - public static final String TT_HTTP_THREADS = - "mapreduce.tasktracker.http.threads"; - public static final String TT_HOST_NAME = "mapreduce.tasktracker.host.name"; - public static final String TT_SLEEP_TIME_BEFORE_SIG_KILL = - "mapreduce.tasktracker.tasks.sleeptimebeforesigkill"; - public static final String TT_DNS_INTERFACE = - "mapreduce.tasktracker.dns.interface"; - public static final String TT_DNS_NAMESERVER = - "mapreduce.tasktracker.dns.nameserver"; - public static final String TT_MAX_TASK_COMPLETION_EVENTS_TO_POLL = - "mapreduce.tasktracker.events.batchsize"; public static final String TT_INDEX_CACHE = "mapreduce.tasktracker.indexcache.mb"; - public static final String TT_INSTRUMENTATION = - "mapreduce.tasktracker.instrumentation"; public static final String TT_MAP_SLOTS = "mapreduce.tasktracker.map.tasks.maximum"; - /** - * @deprecated Use {@link #TT_RESOURCE_CALCULATOR_PLUGIN} instead - */ - @Deprecated - public static final String TT_MEMORY_CALCULATOR_PLUGIN = - "mapreduce.tasktracker.memorycalculatorplugin"; public static final String TT_RESOURCE_CALCULATOR_PLUGIN = "mapreduce.tasktracker.resourcecalculatorplugin"; - public static final String TT_REDUCE_SLOTS = - "mapreduce.tasktracker.reduce.tasks.maximum"; - public static final String TT_LOCAL_CACHE_SIZE = - "mapreduce.tasktracker.cache.local.size"; - public static final String TT_LOCAL_CACHE_SUBDIRS_LIMIT = - "mapreduce.tasktracker.cache.local.numberdirectories"; - public static final String TT_OUTOFBAND_HEARBEAT = - "mapreduce.tasktracker.outofband.heartbeat"; - public static final String TT_RESERVED_PHYSCIALMEMORY_MB = - "mapreduce.tasktracker.reserved.physicalmemory.mb"; - public static final String TT_USER_NAME = "mapreduce.tasktracker.kerberos.principal"; - public static final String TT_KEYTAB_FILE = - "mapreduce.tasktracker.keytab.file"; - public static final String TT_GROUP = - "mapreduce.tasktracker.group"; - public static final String TT_USERLOGCLEANUP_SLEEPTIME = - "mapreduce.tasktracker.userlogcleanup.sleeptime"; - public static final String TT_DISTRIBUTED_CACHE_CHECK_PERIOD = - "mapreduce.tasktracker.distributedcache.checkperiod"; - /** - * Percentage of the local distributed cache that should be kept in between - * garbage collection. - */ - public static final String TT_LOCAL_CACHE_KEEP_AROUND_PCT = - "mapreduce.tasktracker.cache.local.keep.pct"; - } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java index 4b47379271756..dcd3b08ab0031 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java @@ -80,55 +80,14 @@ private static void addDeprecatedKeys() { JTConfig.JT_TASKCACHE_LEVELS), new DeprecationDelta("mapred.job.tracker.retire.jobs", JTConfig.JT_RETIREJOBS), - new DeprecationDelta("mapred.healthChecker.interval", - TTConfig.TT_HEALTH_CHECKER_INTERVAL), - new DeprecationDelta("mapred.healthChecker.script.args", - TTConfig.TT_HEALTH_CHECKER_SCRIPT_ARGS), - new DeprecationDelta("mapred.healthChecker.script.path", - TTConfig.TT_HEALTH_CHECKER_SCRIPT_PATH), - new DeprecationDelta("mapred.healthChecker.script.timeout", - TTConfig.TT_HEALTH_CHECKER_SCRIPT_TIMEOUT), - new DeprecationDelta("mapred.local.dir.minspacekill", - TTConfig.TT_LOCAL_DIR_MINSPACE_KILL), - new DeprecationDelta("mapred.local.dir.minspacestart", - TTConfig.TT_LOCAL_DIR_MINSPACE_START), - new DeprecationDelta("mapred.task.tracker.http.address", - TTConfig.TT_HTTP_ADDRESS), - new DeprecationDelta("mapred.task.tracker.report.address", - TTConfig.TT_REPORT_ADDRESS), - new DeprecationDelta("mapred.task.tracker.task-controller", - TTConfig.TT_TASK_CONTROLLER), - new DeprecationDelta("mapred.tasktracker.dns.interface", - TTConfig.TT_DNS_INTERFACE), - new DeprecationDelta("mapred.tasktracker.dns.nameserver", - TTConfig.TT_DNS_NAMESERVER), - new DeprecationDelta("mapred.tasktracker.events.batchsize", - TTConfig.TT_MAX_TASK_COMPLETION_EVENTS_TO_POLL), new DeprecationDelta("mapred.tasktracker.indexcache.mb", TTConfig.TT_INDEX_CACHE), - new DeprecationDelta("mapred.tasktracker.instrumentation", - TTConfig.TT_INSTRUMENTATION), new DeprecationDelta("mapred.tasktracker.map.tasks.maximum", TTConfig.TT_MAP_SLOTS), new DeprecationDelta("mapred.tasktracker.memory_calculator_plugin", TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN), new DeprecationDelta("mapred.tasktracker.memorycalculatorplugin", TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN), - new DeprecationDelta("mapred.tasktracker.reduce.tasks.maximum", - TTConfig.TT_REDUCE_SLOTS), - new DeprecationDelta( - "mapred.tasktracker.tasks.sleeptime-before-sigkill", - TTConfig.TT_SLEEP_TIME_BEFORE_SIG_KILL), - new DeprecationDelta("slave.host.name", - TTConfig.TT_HOST_NAME), - new DeprecationDelta("tasktracker.http.threads", - TTConfig.TT_HTTP_THREADS), - new DeprecationDelta("hadoop.net.static.resolutions", - TTConfig.TT_STATIC_RESOLUTIONS), - new DeprecationDelta("local.cache.size", - TTConfig.TT_LOCAL_CACHE_SIZE), - new DeprecationDelta("tasktracker.contention.tracking", - TTConfig.TT_CONTENTION_TRACKING), new DeprecationDelta("yarn.app.mapreduce.yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts", MRJobConfig.MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS), new DeprecationDelta("job.end.notification.url", diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml index 4c73e8bb17fc8..45930f05f8bed 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml @@ -6,7 +6,6 @@ yarn.nodemanager.container-manager.thread-count20 mapred.queue.default.acl-administer-jobs* dfs.image.transfer.bandwidthPerSec0 -mapreduce.tasktracker.healthchecker.interval60000 mapreduce.jobtracker.staging.root.dir${hadoop.tmp.dir}/mapred/staging dfs.block.access.token.lifetime600 yarn.resourcemanager.am.max-retries2 @@ -56,19 +55,16 @@ dfs.namenode.replication.min1 mapreduce.map.java.opts-Xmx500m s3native.bytes-per-checksum512 -mapreduce.tasktracker.tasks.sleeptimebeforesigkill5000 tfile.fs.output.buffer.size262144 yarn.nodemanager.local-dirs/home/user/local-dir/ mapreduce.jobtracker.persist.jobstatus.activetrue fs.AbstractFileSystem.hdfs.implorg.apache.hadoop.fs.Hdfs dfs.namenode.safemode.min.datanodes0 -mapreduce.tasktracker.local.dir.minspacestart0 dfs.client.https.need-authfalse fs.har.impl.disable.cachetrue dfs.client.https.keystore.resourcessl-client.xml dfs.namenode.max.objects0 dfs.namenode.safemode.threshold-pct0.999f -mapreduce.tasktracker.local.dir.minspacekill0 dfs.blocksize67108864 mapreduce.job.reduce.slowstart.completedmaps0.05 mapreduce.job.end-notification.retry.attempts5 @@ -76,7 +72,6 @@ fs.s3n.implorg.apache.hadoop.fs.s3native.NativeS3FileSystem mapreduce.map.memory.mb512 mapreduce.job.user.nameuser -mapreduce.tasktracker.outofband.heartbeatfalse io.native.lib.availabletrue dfs.client-write-packet-size65536 mapreduce.client.progressmonitor.pollinterval1000 @@ -90,7 +85,6 @@ mapreduce.cluster.local.dir${hadoop.tmp.dir}/mapred/local mapreduce.job.application.attempt.id1 dfs.permissions.enabledtrue -mapreduce.tasktracker.taskcontrollerorg.apache.hadoop.mapred.DefaultTaskController mapreduce.reduce.shuffle.parallelcopies5 yarn.nodemanager.env-whitelistJAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME mapreduce.job.maxtaskfailures.per.tracker4 @@ -138,7 +132,6 @@ mapreduce.map.output.key.classorg.apache.hadoop.io.IntWritable mapreduce.job.end-notification.max.retry.interval5 ftp.blocksize67108864 -mapreduce.tasktracker.http.threads40 mapreduce.reduce.java.opts-Xmx500m dfs.datanode.data.dirfile:///home/user/hadoop-user/dfs/data dfs.namenode.replication.interval3 @@ -194,7 +187,6 @@ yarn.resourcemanager.amliveliness-monitor.interval-ms1000 mapreduce.reduce.speculativefalse mapreduce.client.output.filterFAILED -mapreduce.tasktracker.report.address127.0.0.1:0 mapreduce.task.userlog.limit.kb0 mapreduce.tasktracker.map.tasks.maximum2 hadoop.http.authentication.simple.anonymous.allowedtrue @@ -204,7 +196,6 @@ fs.automatic.closetrue fs.kfs.implorg.apache.hadoop.fs.kfs.KosmosFileSystem mapreduce.job.submithostaddress127.0.0.1 -mapreduce.tasktracker.healthchecker.script.timeout600000 dfs.datanode.directoryscan.interval21600 yarn.resourcemanager.address0.0.0.0:8040 yarn.nodemanager.log-aggregation-enablefalse @@ -230,8 +221,6 @@ yarn.resourcemanager.container.liveness-monitor.interval-ms600000 yarn.am.liveness-monitor.expiry-interval-ms600000 mapreduce.task.profilefalse -mapreduce.tasktracker.instrumentationorg.apache.hadoop.mapred.TaskTrackerMetricsInst -mapreduce.tasktracker.http.address0.0.0.0:50060 mapreduce.jobhistory.webapp.address0.0.0.0:19888 rpc.engine.org.apache.hadoop.yarn.proto.ApplicationMasterProtocol$ApplicationMasterProtocolService$BlockingInterfaceorg.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine yarn.ipc.rpc.classorg.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC @@ -271,7 +260,6 @@ mapreduce.admin.user.envLD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native dfs.namenode.checkpoint.edits.dir${dfs.namenode.checkpoint.dir} hadoop.common.configuration.version0.23.0 -mapreduce.tasktracker.dns.interfacedefault io.serializationsorg.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization yarn.nodemanager.aux-service.mapreduce.shuffle.classorg.apache.hadoop.mapred.ShuffleHandler yarn.nodemanager.aux-services.mapreduce.shuffle.classorg.apache.hadoop.mapred.ShuffleHandler @@ -352,7 +340,6 @@ yarn.resourcemanager.admin.client.thread-count1 fs.AbstractFileSystem.viewfs.implorg.apache.hadoop.fs.viewfs.ViewFs yarn.resourcemanager.resource-tracker.client.thread-count50 -mapreduce.tasktracker.dns.nameserverdefault mapreduce.map.output.compressfalse mapreduce.job.counters.limit120 dfs.datanode.ipc.address0.0.0.0:9867 diff --git a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json index c537195f6a629..7a52559f59463 100644 --- a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json +++ b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json @@ -4532,7 +4532,6 @@ "yarn.nodemanager.container-manager.thread-count" : "20", "mapred.queue.default.acl-administer-jobs" : "*", "dfs.image.transfer.bandwidthPerSec" : "0", - "mapreduce.tasktracker.healthchecker.interval" : "60000", "mapreduce.jobtracker.staging.root.dir" : "/user", "yarn.resourcemanager.recovery.enabled" : "false", "yarn.resourcemanager.am.max-retries" : "1", @@ -4606,13 +4605,11 @@ "mapreduce.map.java.opts" : " -Xmx825955249", "yarn.scheduler.fair.allocation.file" : "/etc/yarn/fair-scheduler.xml", "s3native.bytes-per-checksum" : "512", - "mapreduce.tasktracker.tasks.sleeptimebeforesigkill" : "5000", "tfile.fs.output.buffer.size" : "262144", "yarn.nodemanager.local-dirs" : "${hadoop.tmp.dir}/nm-local-dir", "mapreduce.jobtracker.persist.jobstatus.active" : "false", "fs.AbstractFileSystem.hdfs.impl" : "org.apache.hadoop.fs.Hdfs", "mapreduce.job.map.output.collector.class" : "org.apache.hadoop.mapred.MapTask$MapOutputBuffer", - "mapreduce.tasktracker.local.dir.minspacestart" : "0", "dfs.namenode.safemode.min.datanodes" : "0", "hadoop.security.uid.cache.secs" : "14400", "dfs.client.https.need-auth" : "false", @@ -4621,7 +4618,6 @@ "dfs.namenode.max.objects" : "0", "hadoop.ssl.client.conf" : "ssl-client.xml", "dfs.namenode.safemode.threshold-pct" : "0.999f", - "mapreduce.tasktracker.local.dir.minspacekill" : "0", "dfs.blocksize" : "134217728", "yarn.resourcemanager.scheduler.class" : "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler", "mapreduce.job.reduce.slowstart.completedmaps" : "0.8", @@ -4629,7 +4625,6 @@ "mapreduce.job.inputformat.class" : "org.apache.hadoop.examples.terasort.TeraGen$RangeInputFormat", "mapreduce.map.memory.mb" : "1024", "mapreduce.job.user.name" : "jenkins", - "mapreduce.tasktracker.outofband.heartbeat" : "false", "io.native.lib.available" : "true", "dfs.client-write-packet-size" : "65536", "mapreduce.client.progressmonitor.pollinterval" : "1000", @@ -4644,7 +4639,6 @@ "yarn.scheduler.fair.user-as-default-queue" : "true", "mapreduce.job.application.attempt.id" : "1", "dfs.permissions.enabled" : "true", - "mapreduce.tasktracker.taskcontroller" : "org.apache.hadoop.mapred.DefaultTaskController", "yarn.scheduler.fair.preemption" : "true", "mapreduce.reduce.shuffle.parallelcopies" : "5", "yarn.nodemanager.env-whitelist" : "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME", @@ -4700,7 +4694,6 @@ "mapreduce.reduce.shuffle.retry-delay.max.ms" : "60000", "mapreduce.job.end-notification.max.retry.interval" : "5", "ftp.blocksize" : "67108864", - "mapreduce.tasktracker.http.threads" : "80", "mapreduce.reduce.java.opts" : " -Xmx825955249", "dfs.datanode.data.dir" : "file://${hadoop.tmp.dir}/dfs/data", "ha.failover-controller.cli-check.rpc-timeout.ms" : "20000", @@ -4768,7 +4761,6 @@ "mapreduce.reduce.speculative" : "false", "mapreduce.client.output.filter" : "FAILED", "mapreduce.ifile.readahead.bytes" : "4194304", - "mapreduce.tasktracker.report.address" : "127.0.0.1:0", "mapreduce.task.userlog.limit.kb" : "0", "mapreduce.tasktracker.map.tasks.maximum" : "2", "hadoop.http.authentication.simple.anonymous.allowed" : "true", @@ -4779,7 +4771,6 @@ "dfs.namenode.handler.count" : "10", "fs.automatic.close" : "false", "mapreduce.job.submithostaddress" : "10.20.206.115", - "mapreduce.tasktracker.healthchecker.script.timeout" : "600000", "dfs.datanode.directoryscan.interval" : "21600", "yarn.resourcemanager.address" : "a2115.smile.com:8032", "yarn.nodemanager.health-checker.interval-ms" : "600000", @@ -4809,8 +4800,6 @@ "yarn.resourcemanager.container.liveness-monitor.interval-ms" : "600000", "yarn.am.liveness-monitor.expiry-interval-ms" : "600000", "mapreduce.task.profile" : "false", - "mapreduce.tasktracker.http.address" : "0.0.0.0:50060", - "mapreduce.tasktracker.instrumentation" : "org.apache.hadoop.mapred.TaskTrackerMetricsInst", "mapreduce.jobhistory.webapp.address" : "a2115.smile.com:19888", "ha.failover-controller.graceful-fence.rpc-timeout.ms" : "5000", "yarn.ipc.rpc.class" : "org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC", @@ -4857,7 +4846,6 @@ "fs.permissions.umask-mode" : "022", "dfs.client.domain.socket.data.traffic" : "false", "hadoop.common.configuration.version" : "0.23.0", - "mapreduce.tasktracker.dns.interface" : "default", "mapreduce.output.fileoutputformat.compress.type" : "BLOCK", "mapreduce.ifile.readahead" : "true", "hadoop.security.group.mapping.ldap.ssl" : "false", @@ -4945,7 +4933,6 @@ "dfs.namenode.checkpoint.period" : "3600", "fs.AbstractFileSystem.viewfs.impl" : "org.apache.hadoop.fs.viewfs.ViewFs", "yarn.resourcemanager.resource-tracker.client.thread-count" : "50", - "mapreduce.tasktracker.dns.nameserver" : "default", "mapreduce.map.output.compress" : "true", "dfs.datanode.ipc.address" : "0.0.0.0:9867", "hadoop.ssl.require.client.cert" : "false", @@ -9613,7 +9600,6 @@ "yarn.nodemanager.container-manager.thread-count" : "20", "mapred.queue.default.acl-administer-jobs" : "*", "dfs.image.transfer.bandwidthPerSec" : "0", - "mapreduce.tasktracker.healthchecker.interval" : "60000", "mapreduce.jobtracker.staging.root.dir" : "/user", "yarn.resourcemanager.recovery.enabled" : "false", "yarn.resourcemanager.am.max-retries" : "1", @@ -9687,13 +9673,11 @@ "mapreduce.map.java.opts" : " -Xmx825955249", "yarn.scheduler.fair.allocation.file" : "/etc/yarn/fair-scheduler.xml", "s3native.bytes-per-checksum" : "512", - "mapreduce.tasktracker.tasks.sleeptimebeforesigkill" : "5000", "tfile.fs.output.buffer.size" : "262144", "yarn.nodemanager.local-dirs" : "${hadoop.tmp.dir}/nm-local-dir", "mapreduce.jobtracker.persist.jobstatus.active" : "false", "fs.AbstractFileSystem.hdfs.impl" : "org.apache.hadoop.fs.Hdfs", "mapreduce.job.map.output.collector.class" : "org.apache.hadoop.mapred.MapTask$MapOutputBuffer", - "mapreduce.tasktracker.local.dir.minspacestart" : "0", "dfs.namenode.safemode.min.datanodes" : "0", "hadoop.security.uid.cache.secs" : "14400", "dfs.client.https.need-auth" : "false", @@ -9702,7 +9686,6 @@ "dfs.namenode.max.objects" : "0", "hadoop.ssl.client.conf" : "ssl-client.xml", "dfs.namenode.safemode.threshold-pct" : "0.999f", - "mapreduce.tasktracker.local.dir.minspacekill" : "0", "dfs.blocksize" : "134217728", "yarn.resourcemanager.scheduler.class" : "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler", "mapreduce.job.reduce.slowstart.completedmaps" : "0.8", @@ -9710,7 +9693,6 @@ "mapreduce.job.inputformat.class" : "org.apache.hadoop.examples.terasort.TeraGen$RangeInputFormat", "mapreduce.map.memory.mb" : "1024", "mapreduce.job.user.name" : "jenkins", - "mapreduce.tasktracker.outofband.heartbeat" : "false", "io.native.lib.available" : "true", "dfs.client-write-packet-size" : "65536", "mapreduce.client.progressmonitor.pollinterval" : "1000", @@ -9725,7 +9707,6 @@ "yarn.scheduler.fair.user-as-default-queue" : "true", "mapreduce.job.application.attempt.id" : "1", "dfs.permissions.enabled" : "true", - "mapreduce.tasktracker.taskcontroller" : "org.apache.hadoop.mapred.DefaultTaskController", "yarn.scheduler.fair.preemption" : "true", "mapreduce.reduce.shuffle.parallelcopies" : "5", "yarn.nodemanager.env-whitelist" : "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME", @@ -9781,7 +9762,6 @@ "mapreduce.reduce.shuffle.retry-delay.max.ms" : "60000", "mapreduce.job.end-notification.max.retry.interval" : "5", "ftp.blocksize" : "67108864", - "mapreduce.tasktracker.http.threads" : "80", "mapreduce.reduce.java.opts" : " -Xmx825955249", "dfs.datanode.data.dir" : "file://${hadoop.tmp.dir}/dfs/data", "ha.failover-controller.cli-check.rpc-timeout.ms" : "20000", @@ -9849,7 +9829,6 @@ "mapreduce.reduce.speculative" : "false", "mapreduce.client.output.filter" : "FAILED", "mapreduce.ifile.readahead.bytes" : "4194304", - "mapreduce.tasktracker.report.address" : "127.0.0.1:0", "mapreduce.task.userlog.limit.kb" : "0", "mapreduce.tasktracker.map.tasks.maximum" : "2", "hadoop.http.authentication.simple.anonymous.allowed" : "true", @@ -9860,7 +9839,6 @@ "dfs.namenode.handler.count" : "10", "fs.automatic.close" : "false", "mapreduce.job.submithostaddress" : "10.20.206.115", - "mapreduce.tasktracker.healthchecker.script.timeout" : "600000", "dfs.datanode.directoryscan.interval" : "21600", "yarn.resourcemanager.address" : "a2115.smile.com:8032", "yarn.nodemanager.health-checker.interval-ms" : "600000", @@ -9890,8 +9868,6 @@ "yarn.resourcemanager.container.liveness-monitor.interval-ms" : "600000", "yarn.am.liveness-monitor.expiry-interval-ms" : "600000", "mapreduce.task.profile" : "false", - "mapreduce.tasktracker.http.address" : "0.0.0.0:50060", - "mapreduce.tasktracker.instrumentation" : "org.apache.hadoop.mapred.TaskTrackerMetricsInst", "mapreduce.jobhistory.webapp.address" : "a2115.smile.com:19888", "ha.failover-controller.graceful-fence.rpc-timeout.ms" : "5000", "yarn.ipc.rpc.class" : "org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC", @@ -9938,7 +9914,6 @@ "fs.permissions.umask-mode" : "022", "dfs.client.domain.socket.data.traffic" : "false", "hadoop.common.configuration.version" : "0.23.0", - "mapreduce.tasktracker.dns.interface" : "default", "mapreduce.output.fileoutputformat.compress.type" : "BLOCK", "mapreduce.ifile.readahead" : "true", "hadoop.security.group.mapping.ldap.ssl" : "false", @@ -10026,7 +10001,6 @@ "dfs.namenode.checkpoint.period" : "3600", "fs.AbstractFileSystem.viewfs.impl" : "org.apache.hadoop.fs.viewfs.ViewFs", "yarn.resourcemanager.resource-tracker.client.thread-count" : "50", - "mapreduce.tasktracker.dns.nameserver" : "default", "mapreduce.map.output.compress" : "true", "dfs.datanode.ipc.address" : "0.0.0.0:9867", "hadoop.ssl.require.client.cert" : "false", From 0b115b60b0fb78a9f33d9955908292b41b952433 Mon Sep 17 00:00:00 2001 From: Hanisha Koneru Date: Thu, 6 Jun 2019 19:42:35 -0700 Subject: [PATCH 0144/1308] HDDS-1371. OMSnapshotProvider to download DB checkpoint from leader OM. (#703) --- .../org/apache/hadoop/ozone/OzoneConsts.java | 11 + .../apache/hadoop/utils/db/DBCheckpoint.java | 16 ++ .../hadoop/utils/db/RocksDBCheckpoint.java | 11 + .../src/main/resources/ozone-default.xml | 49 +++- .../hadoop/ozone/client/rest/RestClient.java | 4 +- .../java/org/apache/hadoop/ozone/OmUtils.java | 105 +++++++++ .../apache/hadoop/ozone/om/OMConfigKeys.java | 21 ++ .../hadoop/ozone/MiniOzoneHAClusterImpl.java | 16 +- .../TestOzoneManagerSnapshotProvider.java | 125 +++++++++++ .../ozone/om/OMDBCheckpointServlet.java | 26 ++- .../apache/hadoop/ozone/om/OMNodeDetails.java | 41 +++- .../apache/hadoop/ozone/om/OzoneManager.java | 88 ++++++-- .../ozone/om/OzoneManagerHttpServer.java | 9 +- .../om/ratis/OzoneManagerRatisServer.java | 17 +- .../OzoneManagerSnapshotProvider.java | 210 ++++++++++++++++++ .../ozone/om/snapshot/package-info.java | 23 ++ .../impl/OzoneManagerServiceProviderImpl.java | 6 +- 17 files changed, 723 insertions(+), 55 deletions(-) create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 4f249f854bb16..d2d80b928af7e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -76,6 +76,12 @@ public final class OzoneConsts { public static final String OZONE_USER = "user"; public static final String OZONE_REQUEST = "request"; + // OM Http server endpoints + public static final String OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT = + "/serviceList"; + public static final String OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT = + "/dbCheckpoint"; + // Ozone File System scheme public static final String OZONE_URI_SCHEME = "o3fs"; @@ -286,4 +292,9 @@ private OzoneConsts() { // OM Ratis snapshot file to store the last applied index public static final String OM_RATIS_SNAPSHOT_INDEX = "ratisSnapshotIndex"; + + // OM Http request parameter to be used while downloading DB checkpoint + // from OM leader to follower + public static final String OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT = + "snapshotBeforeCheckpoint"; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBCheckpoint.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBCheckpoint.java index a3b197a55f178..09de8e651252f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBCheckpoint.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBCheckpoint.java @@ -55,4 +55,20 @@ public interface DBCheckpoint { */ void cleanupCheckpoint() throws IOException; + /** + * Set the OM Ratis snapshot index corresponding to the OM DB checkpoint. + * The snapshot index is the latest snapshot index saved by ratis + * snapshots. It is not guaranteed to be the last ratis index applied to + * the OM DB state. + * @param omRatisSnapshotIndex the saved ratis snapshot index + */ + void setRatisSnapshotIndex(long omRatisSnapshotIndex); + + /** + * Get the OM Ratis snapshot index corresponding to the OM DB checkpoint. + * The ratis snapshot index indicates upto which index is definitely + * included in the DB checkpoint. It is not guaranteed to be the last ratis + * log index applied to the DB checkpoint. + */ + long getRatisSnapshotIndex(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RocksDBCheckpoint.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RocksDBCheckpoint.java index 88b3f75b1aa78..0e736d23c377b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RocksDBCheckpoint.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RocksDBCheckpoint.java @@ -38,6 +38,7 @@ public class RocksDBCheckpoint implements DBCheckpoint { private long checkpointTimestamp = System.currentTimeMillis(); private long latestSequenceNumber = -1; private long checkpointCreationTimeTaken = 0L; + private long ratisSnapshotIndex = 0L; public RocksDBCheckpoint(Path checkpointLocation) { this.checkpointLocation = checkpointLocation; @@ -78,4 +79,14 @@ public void cleanupCheckpoint() throws IOException { LOG.debug("Cleaning up checkpoint at " + checkpointLocation.toString()); FileUtils.deleteDirectory(checkpointLocation.toFile()); } + + @Override + public void setRatisSnapshotIndex(long omRatisSnapshotIndex) { + this.ratisSnapshotIndex = omRatisSnapshotIndex; + } + + @Override + public long getRatisSnapshotIndex() { + return ratisSnapshotIndex; + } } \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 178712944405b..d32a6ee7ef470 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1585,6 +1585,8 @@ logs. If this is not set then default metadata dirs is used. A warning will be logged if this not set. Ideally, this should be mapped to a fast disk like an SSD. + If undefined, OM ratis storage dir will fallback to ozone.metadata.dirs. + This fallback approach is not recommended for production environments. @@ -1703,6 +1705,45 @@ . + + ozone.om.ratis.snapshot.dir + + OZONE, OM, STORAGE, MANAGEMENT, RATIS + This directory is used for storing OM's snapshot + related files like the ratisSnapshotIndex and DB checkpoint from leader + OM. + If undefined, OM snapshot dir will fallback to ozone.om.ratis.storage.dir. + This fallback approach is not recommended for production environments. + + + + ozone.om.snapshot.provider.socket.timeout + 5000s + OZONE, OM, HA, MANAGEMENT + + Socket timeout for HTTP call made by OM Snapshot Provider to request + OM snapshot from OM Leader. + + + + ozone.om.snapshot.provider.connection.timeout + 5000s + OZONE, OM, HA, MANAGEMENT + + Connection timeout for HTTP call made by OM Snapshot Provider to request + OM snapshot from OM Leader. + + + + ozone.om.snapshot.provider.request.timeout + 5000ms + OZONE, OM, HA, MANAGEMENT + + Connection request timeout for HTTP call made by OM Snapshot Provider to + request OM snapshot from OM Leader. + + + ozone.acl.authorizer.class org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer @@ -2346,14 +2387,6 @@ OM snapshot. - - recon.om.socket.timeout - 5s - OZONE, RECON, OM - - Socket timeout for HTTP call made by Recon to request OM snapshot. - - recon.om.snapshot.task.initial.delay 1m diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java index f3afc924c3676..7b37e498e2d8a 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java @@ -99,6 +99,7 @@ import static java.net.HttpURLConnection.HTTP_CREATED; import static java.net.HttpURLConnection.HTTP_OK; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT; /** * Ozone Client REST protocol implementation. It uses REST protocol to @@ -190,7 +191,8 @@ private InetSocketAddress getOzoneRestServerAddress( " details on configuring Ozone."); } - HttpGet httpGet = new HttpGet("http://" + httpAddress + "/serviceList"); + HttpGet httpGet = new HttpGet("http://" + httpAddress + + OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT); HttpEntity entity = executeHttpRequest(httpGet); try { String serviceListJson = EntityUtils.toString(entity); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 5cd51421cb72d..07780569fb834 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -27,6 +27,7 @@ import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.nio.file.Path; +import java.nio.file.Paths; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.Collection; @@ -34,12 +35,15 @@ import java.util.Optional; import java.util.zip.GZIPOutputStream; +import com.google.common.base.Strings; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; import org.apache.commons.compress.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.scm.ScmUtils; +import org.apache.hadoop.hdds.scm.HddsServerUtil; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -48,7 +52,11 @@ import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_BIND_HOST_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTPS_BIND_PORT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_PORT_DEFAULT; @@ -366,4 +374,101 @@ private static void addFilesToArchive(String source, File file, } } + /** + * If a OM conf is only set with key suffixed with OM Node ID, return the + * set value. + * @return null if base conf key is set, otherwise the value set for + * key suffixed with Node ID. + */ + public static String getConfSuffixedWithOMNodeId(Configuration conf, + String confKey, String omServiceID, String omNodeId) { + String confValue = conf.getTrimmed(confKey); + if (StringUtils.isNotEmpty(confValue)) { + return null; + } + String suffixedConfKey = OmUtils.addKeySuffixes( + confKey, omServiceID, omNodeId); + confValue = conf.getTrimmed(suffixedConfKey); + if (StringUtils.isNotEmpty(confValue)) { + return confValue; + } + return null; + } + + /** + * Returns the http address of peer OM node. + * @param conf Configuration + * @param omNodeId peer OM node ID + * @param omNodeHostAddr peer OM node host address + * @return http address of peer OM node in the format : + */ + public static String getHttpAddressForOMPeerNode(Configuration conf, + String omServiceId, String omNodeId, String omNodeHostAddr) { + final Optional bindHost = getHostNameFromConfigKeys(conf, + addKeySuffixes(OZONE_OM_HTTP_BIND_HOST_KEY, omServiceId, omNodeId)); + + final Optional addressPort = getPortNumberFromConfigKeys(conf, + addKeySuffixes(OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId)); + + final Optional addressHost = getHostNameFromConfigKeys(conf, + addKeySuffixes(OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId)); + + String hostName = bindHost.orElse(addressHost.orElse(omNodeHostAddr)); + + return hostName + ":" + addressPort.orElse(OZONE_OM_HTTP_BIND_PORT_DEFAULT); + } + + /** + * Returns the https address of peer OM node. + * @param conf Configuration + * @param omNodeId peer OM node ID + * @param omNodeHostAddr peer OM node host address + * @return https address of peer OM node in the format : + */ + public static String getHttpsAddressForOMPeerNode(Configuration conf, + String omServiceId, String omNodeId, String omNodeHostAddr) { + final Optional bindHost = getHostNameFromConfigKeys(conf, + addKeySuffixes(OZONE_OM_HTTPS_BIND_HOST_KEY, omServiceId, omNodeId)); + + final Optional addressPort = getPortNumberFromConfigKeys(conf, + addKeySuffixes(OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId)); + + final Optional addressHost = getHostNameFromConfigKeys(conf, + addKeySuffixes(OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId)); + + String hostName = bindHost.orElse(addressHost.orElse(omNodeHostAddr)); + + return hostName + ":" + + addressPort.orElse(OZONE_OM_HTTPS_BIND_PORT_DEFAULT); + } + + /** + * Get the local directory where ratis logs will be stored. + */ + public static String getOMRatisDirectory(Configuration conf) { + String storageDir = conf.get(OMConfigKeys.OZONE_OM_RATIS_STORAGE_DIR); + + if (Strings.isNullOrEmpty(storageDir)) { + storageDir = HddsServerUtil.getDefaultRatisDirectory(conf); + } + return storageDir; + } + + public static String getOMRatisSnapshotDirectory(Configuration conf) { + String snapshotDir = conf.get(OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_DIR); + + if (Strings.isNullOrEmpty(snapshotDir)) { + snapshotDir = Paths.get(getOMRatisDirectory(conf), + "snapshot").toString(); + } + return snapshotDir; + } + + public static File createOMDir(String dirPath) { + File dirFile = new File(dirPath); + if (!dirFile.exists() && !dirFile.mkdirs()) { + throw new IllegalArgumentException("Unable to create path: " + dirFile); + } + return dirFile; + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 5f1f579f6d335..14b6783d0b5f1 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -184,6 +184,27 @@ private OMConfigKeys() { OZONE_OM_RATIS_SERVER_ROLE_CHECK_INTERVAL_DEFAULT = TimeDuration.valueOf(15, TimeUnit.SECONDS); + // OM SnapshotProvider configurations + public static final String OZONE_OM_RATIS_SNAPSHOT_DIR = + "ozone.om.ratis.snapshot.dir"; + public static final String OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_KEY = + "ozone.om.snapshot.provider.socket.timeout"; + public static final TimeDuration + OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT = + TimeDuration.valueOf(5000, TimeUnit.MILLISECONDS); + + public static final String OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_KEY = + "ozone.om.snapshot.provider.connection.timeout"; + public static final TimeDuration + OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT = + TimeDuration.valueOf(5000, TimeUnit.MILLISECONDS); + + public static final String OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_KEY = + "ozone.om.snapshot.provider.request.timeout"; + public static final TimeDuration + OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT = + TimeDuration.valueOf(5000, TimeUnit.MILLISECONDS); + public static final String OZONE_OM_KERBEROS_KEYTAB_FILE_KEY = "ozone.om." + "kerberos.keytab.file"; public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om" diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 03c2a2c590e6e..7818d9ea7e544 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -200,6 +200,10 @@ private Map createOMService() throws IOException, // Set nodeId String nodeId = nodeIdBaseStr + i; conf.set(OMConfigKeys.OZONE_OM_NODE_ID_KEY, nodeId); + // Set the OM http(s) address to null so that the cluster picks + // up the address set with service ID and node ID in initHAConfig + conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, ""); + conf.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, ""); // Set metadata/DB dir base path String metaDirPath = path + "/" + nodeId; @@ -207,11 +211,6 @@ private Map createOMService() throws IOException, OMStorage omStore = new OMStorage(conf); initializeOmStorage(omStore); - // Set HTTP address to the rpc port + 2 - int httpPort = basePort + (6*i) - 4; - conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, - "127.0.0.1:" + httpPort); - OzoneManager om = OzoneManager.createOm(null, conf); om.setCertClient(certClient); omMap.put(nodeId, om); @@ -261,11 +260,16 @@ private void initHAConfig(int basePort) throws IOException { omNodesKeyValue.append(",").append(omNodeId); String omAddrKey = OmUtils.addKeySuffixes( OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodeId); + String omHttpAddrKey = OmUtils.addKeySuffixes( + OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId); + String omHttpsAddrKey = OmUtils.addKeySuffixes( + OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId); String omRatisPortKey = OmUtils.addKeySuffixes( OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, omServiceId, omNodeId); conf.set(omAddrKey, "127.0.0.1:" + port); - // Reserve port+2 for OMs HTTP server + conf.set(omHttpAddrKey, "127.0.0.1:" + (port + 2)); + conf.set(omHttpsAddrKey, "127.0.0.1:" + (port + 3)); conf.setInt(omRatisPortKey, port + 4); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java new file mode 100644 index 0000000000000..f5e39f70e9c99 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.VolumeArgs; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.utils.db.DBCheckpoint; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; + +import java.util.UUID; + +/** + * Test OM's snapshot provider service. + */ +public class TestOzoneManagerSnapshotProvider { + + private MiniOzoneHAClusterImpl cluster = null; + private ObjectStore objectStore; + private OzoneConfiguration conf; + private String clusterId; + private String scmId; + private int numOfOMs = 3; + + @Rule + public Timeout timeout = new Timeout(300_000); + + /** + * Create a MiniDFSCluster for testing. + */ + @Before + public void init() throws Exception { + conf = new OzoneConfiguration(); + clusterId = UUID.randomUUID().toString(); + scmId = UUID.randomUUID().toString(); + conf.setBoolean(OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY, true); + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + .setClusterId(clusterId) + .setScmId(scmId) + .setOMServiceId("om-service-test1") + .setNumOfOzoneManagers(numOfOMs) + .build(); + cluster.waitForClusterToBeReady(); + objectStore = OzoneClientFactory.getRpcClient(conf).getObjectStore(); + } + + /** + * Shutdown MiniDFSCluster. + */ + @After + public void shutdown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testDownloadCheckpoint() throws Exception { + String userName = "user" + RandomStringUtils.randomNumeric(5); + String adminName = "admin" + RandomStringUtils.randomNumeric(5); + String volumeName = "volume" + RandomStringUtils.randomNumeric(5); + String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); + + VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() + .setOwner(userName) + .setAdmin(adminName) + .build(); + + objectStore.createVolume(volumeName, createVolumeArgs); + OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); + + retVolumeinfo.createBucket(bucketName); + OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName); + + String leaderOMNodeId = objectStore.getClientProxy().getOMProxyProvider() + .getCurrentProxyOMNodeId(); + OzoneManager ozoneManager = cluster.getOzoneManager(leaderOMNodeId); + + // Get a follower OM + String followerNodeId = ozoneManager.getPeerNodes().get(0).getOMNodeId(); + OzoneManager followerOM = cluster.getOzoneManager(followerNodeId); + + // Download latest checkpoint from leader OM to follower OM + DBCheckpoint omSnapshot = followerOM.getOmSnapshotProvider() + .getOzoneManagerDBSnapshot(leaderOMNodeId); + + long leaderSnapshotIndex = ozoneManager.loadRatisSnapshotIndex(); + long downloadedSnapshotIndex = omSnapshot.getRatisSnapshotIndex(); + + // The snapshot index downloaded from leader OM should match the ratis + // snapshot index on the leader OM + Assert.assertEquals("The snapshot index downloaded from leader OM does " + + "not match its ratis snapshot index", + leaderSnapshotIndex, downloadedSnapshotIndex); + } +} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java index 96acfb3f06df2..d54e1216a8693 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java @@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.om; +import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT; +import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_INDEX; import static org.apache.hadoop.ozone.OzoneConsts. OZONE_DB_CHECKPOINT_REQUEST_FLUSH; @@ -54,6 +56,7 @@ public class OMDBCheckpointServlet extends HttpServlet { LoggerFactory.getLogger(OMDBCheckpointServlet.class); private static final long serialVersionUID = 1L; + private transient OzoneManager om; private transient DBStore omDbStore; private transient OMMetrics omMetrics; private transient DataTransferThrottler throttler = null; @@ -61,7 +64,7 @@ public class OMDBCheckpointServlet extends HttpServlet { @Override public void init() throws ServletException { - OzoneManager om = (OzoneManager) getServletContext() + om = (OzoneManager) getServletContext() .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE); if (om == null) { @@ -110,6 +113,24 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) { flush = Boolean.valueOf(flushParam); } + boolean takeRatisSnapshot = false; + String snapshotBeforeCheckpointParam = + request.getParameter(OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT); + if (StringUtils.isNotEmpty(snapshotBeforeCheckpointParam)) { + takeRatisSnapshot = Boolean.valueOf(snapshotBeforeCheckpointParam); + } + + long ratisSnapshotIndex; + if (takeRatisSnapshot) { + // If OM follower is downloading the checkpoint, we should save a + // ratis snapshot first. This step also included flushing the OM DB. + // Hence, we can set flush to false. + flush = false; + ratisSnapshotIndex = om.saveRatisSnapshot(); + } else { + ratisSnapshotIndex = om.loadRatisSnapshotIndex(); + } + DBCheckpoint checkpoint = omDbStore.getCheckpoint(flush); if (checkpoint == null) { LOG.error("Unable to process metadata snapshot request. " + @@ -136,6 +157,9 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) { response.setHeader("Content-Disposition", "attachment; filename=\"" + checkPointTarFile.getName() + "\""); + // Ratis snapshot index used when downloading DB checkpoint to OM follower + response.setHeader(OM_RATIS_SNAPSHOT_INDEX, + String.valueOf(ratisSnapshotIndex)); checkpointFileInputStream = new FileInputStream(checkPointTarFile); start = Instant.now(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMNodeDetails.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMNodeDetails.java index 630d98aec25a8..d399ca9e5601b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMNodeDetails.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMNodeDetails.java @@ -17,11 +17,16 @@ package org.apache.hadoop.ozone.om; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.net.NetUtils; import java.net.InetAddress; import java.net.InetSocketAddress; +import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; + /** * This class stores OM node details. */ @@ -31,17 +36,22 @@ public final class OMNodeDetails { private InetSocketAddress rpcAddress; private int rpcPort; private int ratisPort; + private String httpAddress; + private String httpsAddress; /** * Constructs OMNodeDetails object. */ private OMNodeDetails(String serviceId, String nodeId, - InetSocketAddress rpcAddr, int rpcPort, int ratisPort) { + InetSocketAddress rpcAddr, int rpcPort, int ratisPort, + String httpAddress, String httpsAddress) { this.omServiceId = serviceId; this.omNodeId = nodeId; this.rpcAddress = rpcAddr; this.rpcPort = rpcPort; this.ratisPort = ratisPort; + this.httpAddress = httpAddress; + this.httpsAddress = httpsAddress; } /** @@ -53,6 +63,8 @@ public static class Builder { private InetSocketAddress rpcAddress; private int rpcPort; private int ratisPort; + private String httpAddr; + private String httpsAddr; public Builder setRpcAddress(InetSocketAddress rpcAddr) { this.rpcAddress = rpcAddr; @@ -75,9 +87,19 @@ public Builder setOMNodeId(String nodeId) { return this; } + public Builder setHttpAddress(String httpAddress) { + this.httpAddr = httpAddress; + return this; + } + + public Builder setHttpsAddress(String httpsAddress) { + this.httpsAddr = httpsAddress; + return this; + } + public OMNodeDetails build() { return new OMNodeDetails(omServiceId, omNodeId, rpcAddress, rpcPort, - ratisPort); + ratisPort, httpAddr, httpsAddr); } } @@ -108,4 +130,19 @@ public int getRpcPort() { public String getRpcAddressString() { return NetUtils.getHostPortString(rpcAddress); } + + public String getOMDBCheckpointEnpointUrl(HttpConfig.Policy httpPolicy) { + if (httpPolicy.isHttpEnabled()) { + if (StringUtils.isNotEmpty(httpAddress)) { + return "http://" + httpAddress + OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT + + "?" + OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT + "=true"; + } + } else { + if (StringUtils.isNotEmpty(httpsAddress)) { + return "https://" + httpsAddress + OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT + + "?" + OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT + "=true"; + } + } + return null; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index b3357e1668ac9..598525489e121 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -78,6 +78,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeOwnerChangeResponse; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.protocol.OzoneManagerServerProtocol; +import org.apache.hadoop.ozone.om.snapshot.OzoneManagerSnapshotProvider; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -184,22 +185,17 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_FILE; import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_TEMP_FILE; - import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_INDEX; import static org.apache.hadoop.ozone.OzoneConsts.RPC_PORT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys - .OZONE_OM_HANDLER_COUNT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys - .OZONE_OM_HANDLER_COUNT_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys - .OZONE_OM_METRICS_SAVE_INTERVAL; -import static org.apache.hadoop.ozone.om.OMConfigKeys - .OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT; - +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODE_ID_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys - .OZONE_OM_RATIS_PORT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_AUTH_METHOD; @@ -208,10 +204,6 @@ import static org.apache.hadoop.ozone.protocol.proto .OzoneManagerProtocolProtos.OzoneManagerService .newReflectiveBlockingService; -import static org.apache.hadoop.ozone.om.OMConfigKeys - .OZONE_OM_KERBEROS_KEYTAB_FILE_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys - .OZONE_OM_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.util.ExitUtil.terminate; /** @@ -241,7 +233,6 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private RPC.Server omRpcServer; private InetSocketAddress omRpcAddress; private String omId; - private List peerNodes; private final OMMetadataManager metadataManager; private final VolumeManager volumeManager; private final BucketManager bucketManager; @@ -273,7 +264,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private boolean isRatisEnabled; private OzoneManagerRatisServer omRatisServer; private OzoneManagerRatisClient omRatisClient; + private OzoneManagerSnapshotProvider omSnapshotProvider; private OMNodeDetails omNodeDetails; + private List peerNodes; + private File omRatisSnapshotDir; private final File ratisSnapshotFile; private long snapshotIndex; @@ -319,6 +313,11 @@ private OzoneManager(OzoneConfiguration conf) throws IOException, startRatisServer(); startRatisClient(); + if (peerNodes != null && !peerNodes.isEmpty()) { + this.omSnapshotProvider = new OzoneManagerSnapshotProvider(configuration, + omRatisSnapshotDir, peerNodes); + } + this.ratisSnapshotFile = new File(omStorage.getCurrentDir(), OM_RATIS_SNAPSHOT_INDEX); this.snapshotIndex = loadRatisSnapshotIndex(); @@ -449,11 +448,17 @@ private void loadOMHAConfigs(Configuration conf) { } else { // This OMNode belongs to same OM service as the current OMNode. // Add it to peerNodes list. + String httpAddr = OmUtils.getHttpAddressForOMPeerNode(conf, + serviceId, nodeId, addr.getHostName()); + String httpsAddr = OmUtils.getHttpsAddressForOMPeerNode(conf, + serviceId, nodeId, addr.getHostName()); OMNodeDetails peerNodeInfo = new OMNodeDetails.Builder() .setOMServiceId(serviceId) .setOMNodeId(nodeId) .setRpcAddress(addr) .setRatisPort(ratisPort) + .setHttpAddress(httpAddr) + .setHttpsAddress(httpsAddr) .build(); peerNodesList.add(peerNodeInfo); } @@ -465,6 +470,7 @@ private void loadOMHAConfigs(Configuration conf) { setOMNodeDetails(localOMServiceId, localOMNodeId, localRpcAddress, localRatisPort); + this.peerNodes = peerNodesList; LOG.info("Found matching OM address with OMServiceId: {}, " + @@ -530,6 +536,49 @@ private void setOMNodeDetails(String serviceId, String nodeId, // Set this nodes OZONE_OM_ADDRESS_KEY to the discovered address. configuration.set(OZONE_OM_ADDRESS_KEY, NetUtils.getHostPortString(rpcAddress)); + + // Create Ratis storage dir + String omRatisDirectory = OmUtils.getOMRatisDirectory(configuration); + if (omRatisDirectory == null || omRatisDirectory.isEmpty()) { + throw new IllegalArgumentException(HddsConfigKeys.OZONE_METADATA_DIRS + + " must be defined."); + } + OmUtils.createOMDir(omRatisDirectory); + + // Create Ratis snapshot dir + omRatisSnapshotDir = OmUtils.createOMDir( + OmUtils.getOMRatisSnapshotDirectory(configuration)); + + // Get and set Http(s) address of local node. If base config keys are + // not set, check for keys suffixed with OM serivce ID and node ID. + setOMNodeSpecificConfigs(serviceId, nodeId); + } + + /** + * Check if any of the following configuration keys have been set using OM + * Node ID suffixed to the key. If yes, then set the base key with the + * configured valued. + * 1. {@link OMConfigKeys#OZONE_OM_HTTP_ADDRESS_KEY} + * 2. {@link OMConfigKeys#OZONE_OM_HTTPS_ADDRESS_KEY} + * 3. {@link OMConfigKeys#OZONE_OM_HTTP_BIND_HOST_KEY} + * 4. {@link OMConfigKeys#OZONE_OM_HTTPS_BIND_HOST_KEY} + */ + private void setOMNodeSpecificConfigs(String omServiceId, String omNodeId) { + String[] confKeys = new String[] { + OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, + OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, + OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY, + OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY}; + + for (String confKey : confKeys) { + String confValue = OmUtils.getConfSuffixedWithOMNodeId( + configuration, confKey, omServiceId, omNodeId); + if (confValue != null) { + LOG.info("Setting configuration key {} with value of key {}: {}", + confKey, OmUtils.addKeySuffixes(confKey, omNodeId), confValue); + configuration.set(confKey, confValue); + } + } } private KeyProviderCryptoExtension createKeyProviderExt( @@ -1124,6 +1173,11 @@ public OzoneManagerRatisServer getOmRatisServer() { return omRatisServer; } + @VisibleForTesting + public OzoneManagerSnapshotProvider getOmSnapshotProvider() { + return omSnapshotProvider; + } + @VisibleForTesting public InetSocketAddress getOmRpcServerAddr() { return omRpcAddress; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java index 7a8c107f61744..b98d6d3a1ef60 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java @@ -23,6 +23,9 @@ import java.io.IOException; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT; + /** * HttpServer wrapper for the OzoneManager. */ @@ -31,8 +34,10 @@ public class OzoneManagerHttpServer extends BaseHttpServer { public OzoneManagerHttpServer(Configuration conf, OzoneManager om) throws IOException { super(conf, "ozoneManager"); - addServlet("serviceList", "/serviceList", ServiceListJSONServlet.class); - addServlet("dbCheckpoint", "/dbCheckpoint", OMDBCheckpointServlet.class); + addServlet("serviceList", OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT, + ServiceListJSONServlet.class); + addServlet("dbCheckpoint", OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT, + OMDBCheckpointServlet.class); getWebAppContext().setAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE, om); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index 58ab181402dc9..1b79dd732150f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.ratis; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Strings; import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; @@ -39,7 +38,7 @@ import com.google.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.scm.HddsServerUtil; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMNodeDetails; import org.apache.hadoop.ozone.om.OzoneManager; @@ -358,7 +357,7 @@ private RaftProperties newRaftProperties(Configuration conf) { } // Set Ratis storage directory - String storageDir = getOMRatisDirectory(conf); + String storageDir = OmUtils.getOMRatisDirectory(conf); RaftServerConfigKeys.setStorageDirs(properties, Collections.singletonList(new File(storageDir))); @@ -620,18 +619,6 @@ public RaftPeerId getRaftPeerId() { return this.raftPeerId; } - /** - * Get the local directory where ratis logs will be stored. - */ - public static String getOMRatisDirectory(Configuration conf) { - String storageDir = conf.get(OMConfigKeys.OZONE_OM_RATIS_STORAGE_DIR); - - if (Strings.isNullOrEmpty(storageDir)) { - storageDir = HddsServerUtil.getDefaultRatisDirectory(conf); - } - return storageDir; - } - private UUID getRaftGroupIdFromOmServiceId(String omServiceId) { return UUID.nameUUIDFromBytes(omServiceId.getBytes(StandardCharsets.UTF_8)); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java new file mode 100644 index 0000000000000..e1d488923a0ec --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java @@ -0,0 +1,210 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.ozone.om.OMNodeDetails; +import org.apache.hadoop.utils.db.DBCheckpoint; +import org.apache.hadoop.utils.db.RocksDBCheckpoint; +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.HttpResponse; +import org.apache.http.client.HttpClient; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.util.EntityUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static java.net.HttpURLConnection.HTTP_CREATED; +import static java.net.HttpURLConnection.HTTP_OK; +import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_INDEX; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_KEY; + +/** + * OzoneManagerSnapshotProvider downloads the latest checkpoint from the + * leader OM and loads the checkpoint into State Machine. + */ +public class OzoneManagerSnapshotProvider { + + private static final Logger LOG = + LoggerFactory.getLogger(OzoneManagerSnapshotProvider.class); + + private final File omSnapshotDir; + private Map peerNodesMap; + private final HttpConfig.Policy httpPolicy; + private final RequestConfig httpRequestConfig; + private CloseableHttpClient httpClient; + + private static final String OM_SNAPSHOT_DB = "om.snapshot.db"; + + public OzoneManagerSnapshotProvider(Configuration conf, + File omRatisSnapshotDir, List peerNodes) { + + LOG.info("Initializing OM Snapshot Provider"); + this.omSnapshotDir = omRatisSnapshotDir; + + this.peerNodesMap = new HashMap<>(); + for (OMNodeDetails peerNode : peerNodes) { + this.peerNodesMap.put(peerNode.getOMNodeId(), peerNode); + } + + this.httpPolicy = DFSUtil.getHttpPolicy(conf); + this.httpRequestConfig = getHttpRequestConfig(conf); + } + + private RequestConfig getHttpRequestConfig(Configuration conf) { + TimeUnit socketTimeoutUnit = + OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT.getUnit(); + int socketTimeoutMS = (int) conf.getTimeDuration( + OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_KEY, + OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT.getDuration(), + socketTimeoutUnit); + + TimeUnit connectionTimeoutUnit = + OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT.getUnit(); + int connectionTimeoutMS = (int) conf.getTimeDuration( + OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_KEY, + OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT.getDuration(), + connectionTimeoutUnit); + + TimeUnit requestTimeoutUnit = + OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT.getUnit(); + int requestTimeoutMS = (int) conf.getTimeDuration( + OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_KEY, + OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT.getDuration(), + requestTimeoutUnit); + + RequestConfig requestConfig = RequestConfig.custom() + .setSocketTimeout(socketTimeoutMS) + .setConnectTimeout(connectionTimeoutMS) + .setConnectionRequestTimeout(requestTimeoutMS) + .build(); + + return requestConfig; + } + + /** + * Create and return http client object. + */ + private HttpClient getHttpClient() { + if (httpClient == null) { + httpClient = HttpClientBuilder + .create() + .setDefaultRequestConfig(httpRequestConfig) + .build(); + } + return httpClient; + } + + /** + * Close http client object. + */ + private void closeHttpClient() throws IOException { + if (httpClient != null) { + httpClient.close(); + httpClient = null; + } + } + + /** + * Download the latest checkpoint from OM Leader via HTTP. + * @param leaderOMNodeID leader OM Node ID. + * @return the DB checkpoint (including the ratis snapshot index) + */ + protected DBCheckpoint getOzoneManagerDBSnapshot(String leaderOMNodeID) + throws IOException { + String snapshotFileName = OM_SNAPSHOT_DB + "_" + System.currentTimeMillis(); + File targetFile = new File(omSnapshotDir, snapshotFileName + ".tar.gz"); + + String omCheckpointUrl = peerNodesMap.get(leaderOMNodeID) + .getOMDBCheckpointEnpointUrl(httpPolicy); + + LOG.info("Downloading latest checkpoint from Leader OM {}. Checkpoint " + + "URL: {}", leaderOMNodeID, omCheckpointUrl); + + try { + HttpGet httpGet = new HttpGet(omCheckpointUrl); + HttpResponse response = getHttpClient().execute(httpGet); + int errorCode = response.getStatusLine().getStatusCode(); + HttpEntity entity = response.getEntity(); + + if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) { + + Header header = response.getFirstHeader(OM_RATIS_SNAPSHOT_INDEX); + if (header == null) { + throw new IOException("The HTTP response header " + + OM_RATIS_SNAPSHOT_INDEX + " is missing."); + } + + long snapshotIndex = Long.parseLong(header.getValue()); + + try (InputStream inputStream = entity.getContent()) { + FileUtils.copyInputStreamToFile(inputStream, targetFile); + } + + // Untar the checkpoint file. + Path untarredDbDir = Paths.get(omSnapshotDir.getAbsolutePath(), + snapshotFileName); + FileUtil.unTar(targetFile, untarredDbDir.toFile()); + FileUtils.deleteQuietly(targetFile); + + LOG.info("Sucessfully downloaded latest checkpoint with snapshot " + + "index {} from leader OM: {}", snapshotIndex, leaderOMNodeID); + + RocksDBCheckpoint omCheckpoint = new RocksDBCheckpoint(untarredDbDir); + omCheckpoint.setRatisSnapshotIndex(snapshotIndex); + return omCheckpoint; + } + + if (entity != null) { + throw new IOException("Unexpected exception when trying to reach " + + "OM to download latest checkpoint. Checkpoint URL: " + + omCheckpointUrl + ". Entity: " + EntityUtils.toString(entity)); + } else { + throw new IOException("Unexpected null in http payload, while " + + "processing request to OM to download latest checkpoint. " + + "Checkpoint Url: " + omCheckpointUrl); + } + } finally { + closeHttpClient(); + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java new file mode 100644 index 0000000000000..3c82a6940780d --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +/** + * This package contains OM Ratis Snapshot related classes. + */ \ No newline at end of file diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java index 0a615d4e35169..b552facf6231a 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_CONNECTION_REQUEST_TIMEOUT; @@ -69,7 +70,6 @@ public class OzoneManagerServiceProviderImpl private static final Logger LOG = LoggerFactory.getLogger(OzoneManagerServiceProviderImpl.class); - private final String dbCheckpointEndPoint = "/dbCheckpoint"; private final CloseableHttpClient httpClient; private File omSnapshotDBParentDir = null; private String omDBSnapshotUrl; @@ -116,11 +116,11 @@ public OzoneManagerServiceProviderImpl(OzoneConfiguration configuration) { .build(); omDBSnapshotUrl = "http://" + ozoneManagerHttpAddress + - dbCheckpointEndPoint; + OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; if (ozoneSecurityEnabled) { omDBSnapshotUrl = "https://" + ozoneManagerHttpsAddress + - dbCheckpointEndPoint; + OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; } boolean flushParam = configuration.getBoolean( From a91d24fea45c2d269fabe46d43d5d4156ba47e1c Mon Sep 17 00:00:00 2001 From: Hanisha Koneru Date: Thu, 6 Jun 2019 19:44:40 -0700 Subject: [PATCH 0145/1308] HDDS-1496. Support partial chunk reads and checksum verification (#804) --- .../hdds/scm/storage/BlockInputStream.java | 634 ++++++++---------- .../hdds/scm/storage/ChunkInputStream.java | 546 +++++++++++++++ .../scm/storage/TestBlockInputStream.java | 251 ++++--- .../scm/storage/TestChunkInputStream.java | 224 +++++++ .../apache/hadoop/ozone/common/Checksum.java | 29 +- .../hadoop/ozone/common/ChecksumData.java | 37 +- .../ozone/client/io/KeyInputStream.java | 380 ++++------- .../hadoop/ozone/client/rpc/RpcClient.java | 4 +- .../storage/DistributedStorageHandler.java | 3 +- .../hadoop/ozone/om/TestChunkStreams.java | 11 +- 10 files changed, 1377 insertions(+), 742 deletions(-) create mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java create mode 100644 hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index 82fb1063d3bd8..bccbc9bdb96a7 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -19,470 +19,370 @@ package org.apache.hadoop.hdds.scm.storage; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.ozone.common.OzoneChecksumException; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; import org.apache.hadoop.fs.Seekable; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadChunkResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - ContainerCommandRequestProto; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetBlockResponseProto; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.EOFException; import java.io.IOException; import java.io.InputStream; -import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; /** - * An {@link InputStream} used by the REST service in combination with the - * SCMClient to read the value of a key from a sequence - * of container chunks. All bytes of the key value are stored in container - * chunks. Each chunk may contain multiple underlying {@link ByteBuffer} - * instances. This class encapsulates all state management for iterating - * through the sequence of chunks and the sequence of buffers within each chunk. + * An {@link InputStream} called from KeyInputStream to read a block from the + * container. + * This class encapsulates all state management for iterating + * through the sequence of chunks through {@link ChunkInputStream}. */ public class BlockInputStream extends InputStream implements Seekable { + private static final Logger LOG = + LoggerFactory.getLogger(BlockInputStream.class); + private static final int EOF = -1; private final BlockID blockID; + private final long length; + private Pipeline pipeline; + private final Token token; + private final boolean verifyChecksum; private final String traceID; private XceiverClientManager xceiverClientManager; private XceiverClientSpi xceiverClient; - private List chunks; - // ChunkIndex points to the index current chunk in the buffers or the the - // index of chunk which will be read next into the buffers in - // readChunkFromContainer(). + private boolean initialized = false; + + // List of ChunkInputStreams, one for each chunk in the block + private List chunkStreams; + + // chunkOffsets[i] stores the index of the first data byte in + // chunkStream i w.r.t the block data. + // Let’s say we have chunk size as 40 bytes. And let's say the parent + // block stores data from index 200 and has length 400. + // The first 40 bytes of this block will be stored in chunk[0], next 40 in + // chunk[1] and so on. But since the chunkOffsets are w.r.t the block only + // and not the key, the values in chunkOffsets will be [0, 40, 80,....]. + private long[] chunkOffsets = null; + + // Index of the chunkStream corresponding to the current position of the + // BlockInputStream i.e offset of the data to be read next from this block private int chunkIndex; - // ChunkIndexOfCurrentBuffer points to the index of chunk read into the - // buffers or index of the last chunk in the buffers. It is updated only - // when a new chunk is read from container into the buffers. - private int chunkIndexOfCurrentBuffer; - private long[] chunkOffset; - private List buffers; - private int bufferIndex; - private long bufferPosition; - private boolean verifyChecksum; - /** - * Creates a new BlockInputStream. - * - * @param blockID block ID of the chunk - * @param xceiverClientManager client manager that controls client - * @param xceiverClient client to perform container calls - * @param chunks list of chunks to read - * @param traceID container protocol call traceID - * @param verifyChecksum verify checksum - * @param initialPosition the initial position of the stream pointer. This - * position is seeked now if the up-stream was seeked - * before this was created. - */ - public BlockInputStream( - BlockID blockID, XceiverClientManager xceiverClientManager, - XceiverClientSpi xceiverClient, List chunks, String traceID, - boolean verifyChecksum, long initialPosition) throws IOException { - this.blockID = blockID; - this.traceID = traceID; - this.xceiverClientManager = xceiverClientManager; - this.xceiverClient = xceiverClient; - this.chunks = chunks; - this.chunkIndex = 0; - this.chunkIndexOfCurrentBuffer = -1; - // chunkOffset[i] stores offset at which chunk i stores data in - // BlockInputStream - this.chunkOffset = new long[this.chunks.size()]; - initializeChunkOffset(); - this.buffers = null; - this.bufferIndex = 0; - this.bufferPosition = -1; + // Position of the BlockInputStream is maintainted by this variable till + // the stream is initialized. This position is w.r.t to the block only and + // not the key. + // For the above example, if we seek to position 240 before the stream is + // initialized, then value of blockPosition will be set to 40. + // Once, the stream is initialized, the position of the stream + // will be determined by the current chunkStream and its position. + private long blockPosition = 0; + + // Tracks the chunkIndex corresponding to the last blockPosition so that it + // can be reset if a new position is seeked. + private int chunkIndexOfPrevPosition; + + public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, + Token token, boolean verifyChecksum, + String traceId, XceiverClientManager xceiverClientManager) { + this.blockID = blockId; + this.length = blockLen; + this.pipeline = pipeline; + this.token = token; this.verifyChecksum = verifyChecksum; - if (initialPosition > 0) { - // The stream was seeked to a position before the stream was - // initialized. So seeking to the position now. - seek(initialPosition); - } + this.traceID = traceId; + this.xceiverClientManager = xceiverClientManager; } - private void initializeChunkOffset() { - long tempOffset = 0; - for (int i = 0; i < chunks.size(); i++) { - chunkOffset[i] = tempOffset; - tempOffset += chunks.get(i).getLen(); + /** + * Initialize the BlockInputStream. Get the BlockData (list of chunks) from + * the Container and create the ChunkInputStreams for each Chunk in the Block. + */ + public synchronized void initialize() throws IOException { + + // Pre-check that the stream has not been intialized already + if (initialized) { + return; } - } - @Override - public synchronized int read() - throws IOException { - checkOpen(); - int available = prepareRead(1); - int dataout = EOF; + List chunks = getChunkInfos(); + if (chunks != null && !chunks.isEmpty()) { + // For each chunk in the block, create a ChunkInputStream and compute + // its chunkOffset + this.chunkOffsets = new long[chunks.size()]; + long tempOffset = 0; + + this.chunkStreams = new ArrayList<>(chunks.size()); + for (int i = 0; i < chunks.size(); i++) { + addStream(chunks.get(i)); + chunkOffsets[i] = tempOffset; + tempOffset += chunks.get(i).getLen(); + } - if (available == EOF) { - Preconditions - .checkState(buffers == null); //should have released by now, see below - } else { - dataout = Byte.toUnsignedInt(buffers.get(bufferIndex).get()); - } + initialized = true; + this.chunkIndex = 0; - if (blockStreamEOF()) { - // consumer might use getPos to determine EOF, - // so release buffers when serving the last byte of data - releaseBuffers(); + if (blockPosition > 0) { + // Stream was seeked to blockPosition before initialization. Seek to the + // blockPosition now. + seek(blockPosition); + } } - - return dataout; } - @Override - public synchronized int read(byte[] b, int off, int len) throws IOException { - // According to the JavaDocs for InputStream, it is recommended that - // subclasses provide an override of bulk read if possible for performance - // reasons. In addition to performance, we need to do it for correctness - // reasons. The Ozone REST service uses PipedInputStream and - // PipedOutputStream to relay HTTP response data between a Jersey thread and - // a Netty thread. It turns out that PipedInputStream/PipedOutputStream - // have a subtle dependency (bug?) on the wrapped stream providing separate - // implementations of single-byte read and bulk read. Without this, get key - // responses might close the connection before writing all of the bytes - // advertised in the Content-Length. - if (b == null) { - throw new NullPointerException(); - } - if (off < 0 || len < 0 || len > b.length - off) { - throw new IndexOutOfBoundsException(); - } - if (len == 0) { - return 0; - } - checkOpen(); - int total = 0; - while (len > 0) { - int available = prepareRead(len); - if (available == EOF) { - Preconditions - .checkState(buffers == null); //should have been released by now - return total != 0 ? total : EOF; - } - buffers.get(bufferIndex).get(b, off + total, available); - len -= available; - total += available; + /** + * Send RPC call to get the block info from the container. + * @return List of chunks in this block. + */ + protected List getChunkInfos() throws IOException { + // irrespective of the container state, we will always read via Standalone + // protocol. + if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) { + pipeline = Pipeline.newBuilder(pipeline) + .setType(HddsProtos.ReplicationType.STAND_ALONE).build(); } + xceiverClient = xceiverClientManager.acquireClient(pipeline); + boolean success = false; + List chunks; + try { + LOG.debug("Initializing BlockInputStream for get key to access {}", + blockID.getContainerID()); - if (blockStreamEOF()) { - // smart consumers determine EOF by calling getPos() - // so we release buffers when serving the final bytes of data - releaseBuffers(); + if (token != null) { + UserGroupInformation.getCurrentUser().addToken(token); + } + DatanodeBlockID datanodeBlockID = blockID + .getDatanodeBlockIDProtobuf(); + GetBlockResponseProto response = ContainerProtocolCalls + .getBlock(xceiverClient, datanodeBlockID, traceID); + + chunks = response.getBlockData().getChunksList(); + success = true; + } finally { + if (!success) { + xceiverClientManager.releaseClient(xceiverClient, false); + } } - return total; + return chunks; } /** - * Determines if all data in the stream has been consumed. - * - * @return true if EOF, false if more data is available + * Append another ChunkInputStream to the end of the list. Note that the + * ChunkInputStream is only created here. The chunk will be read from the + * Datanode only when a read operation is performed on for that chunk. */ - protected boolean blockStreamEOF() { - if (buffersHaveData() || chunksRemaining()) { - return false; - } else { - // if there are any chunks, we better be at the last chunk for EOF - Preconditions.checkState(((chunks == null) || chunks.isEmpty() || - chunkIndex == (chunks.size() - 1)), - "EOF detected, but not at the last chunk"); - return true; - } - } - - private void releaseBuffers() { - //ashes to ashes, dust to dust - buffers = null; - bufferIndex = 0; + protected synchronized void addStream(ChunkInfo chunkInfo) { + chunkStreams.add(new ChunkInputStream(chunkInfo, blockID, traceID, + xceiverClient, verifyChecksum)); } - @Override - public synchronized void close() { - if (xceiverClientManager != null && xceiverClient != null) { - xceiverClientManager.releaseClient(xceiverClient, false); - xceiverClientManager = null; - xceiverClient = null; - } + public synchronized long getRemaining() throws IOException { + return length - getPos(); } /** - * Checks if the stream is open. If not, throws an exception. - * - * @throws IOException if stream is closed + * {@inheritDoc} */ - private synchronized void checkOpen() throws IOException { - if (xceiverClient == null) { - throw new IOException("BlockInputStream has been closed."); + @Override + public synchronized int read() throws IOException { + byte[] buf = new byte[1]; + if (read(buf, 0, 1) == EOF) { + return EOF; } + return Byte.toUnsignedInt(buf[0]); } /** - * Prepares to read by advancing through chunks and buffers as needed until it - * finds data to return or encounters EOF. - * - * @param len desired length of data to read - * @return length of data available to read, possibly less than desired length + * {@inheritDoc} */ - private synchronized int prepareRead(int len) throws IOException { - for (;;) { - if (!buffersAllocated()) { - // The current chunk at chunkIndex has not been read from the - // container. Read the chunk and put the data into buffers. - readChunkFromContainer(); - } - if (buffersHaveData()) { - // Data is available from buffers - ByteBuffer bb = buffers.get(bufferIndex); - return len > bb.remaining() ? bb.remaining() : len; - } else if (chunksRemaining()) { - // There are additional chunks available. - // Read the next chunk in the block. - chunkIndex += 1; - readChunkFromContainer(); - } else { - // All available input has been consumed. - return EOF; - } + @Override + public synchronized int read(byte[] b, int off, int len) throws IOException { + if (b == null) { + throw new NullPointerException(); } - } - - private boolean buffersAllocated() { - if (buffers == null || buffers.isEmpty()) { - return false; + if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); } - return true; - } - - private boolean buffersHaveData() { - boolean hasData = false; - - if (buffersAllocated()) { - while (bufferIndex < (buffers.size())) { - if (buffers.get(bufferIndex).hasRemaining()) { - // current buffer has data - hasData = true; - break; - } else { - if (buffersRemaining()) { - // move to next available buffer - ++bufferIndex; - Preconditions.checkState(bufferIndex < buffers.size()); - } else { - // no more buffers remaining - break; - } - } - } + if (len == 0) { + return 0; } - return hasData; - } + if (!initialized) { + initialize(); + } - private boolean buffersRemaining() { - return (bufferIndex < (buffers.size() - 1)); - } + checkOpen(); + int totalReadLen = 0; + while (len > 0) { + // if we are at the last chunk and have read the entire chunk, return + if (chunkStreams.size() == 0 || + (chunkStreams.size() - 1 <= chunkIndex && + chunkStreams.get(chunkIndex) + .getRemaining() == 0)) { + return totalReadLen == 0 ? EOF : totalReadLen; + } - private boolean chunksRemaining() { - if ((chunks == null) || chunks.isEmpty()) { - return false; - } - // Check if more chunks are remaining in the stream after chunkIndex - if (chunkIndex < (chunks.size() - 1)) { - return true; + // Get the current chunkStream and read data from it + ChunkInputStream current = chunkStreams.get(chunkIndex); + int numBytesToRead = Math.min(len, (int)current.getRemaining()); + int numBytesRead = current.read(b, off, numBytesToRead); + if (numBytesRead != numBytesToRead) { + // This implies that there is either data loss or corruption in the + // chunk entries. Even EOF in the current stream would be covered in + // this case. + throw new IOException(String.format( + "Inconsistent read for chunkName=%s length=%d numBytesRead=%d", + current.getChunkName(), current.getLength(), numBytesRead)); + } + totalReadLen += numBytesRead; + off += numBytesRead; + len -= numBytesRead; + if (current.getRemaining() <= 0 && + ((chunkIndex + 1) < chunkStreams.size())) { + chunkIndex += 1; + } } - // ChunkIndex is the last chunk in the stream. Check if this chunk has - // been read from container or not. Return true if chunkIndex has not - // been read yet and false otherwise. - return chunkIndexOfCurrentBuffer != chunkIndex; + return totalReadLen; } /** - * Attempts to read the chunk at the specified offset in the chunk list. If - * successful, then the data of the read chunk is saved so that its bytes can - * be returned from subsequent read calls. + * Seeks the BlockInputStream to the specified position. If the stream is + * not initialized, save the seeked position via blockPosition. Otherwise, + * update the position in 2 steps: + * 1. Updating the chunkIndex to the chunkStream corresponding to the + * seeked position. + * 2. Seek the corresponding chunkStream to the adjusted position. * - * @throws IOException if there is an I/O error while performing the call + * Let’s say we have chunk size as 40 bytes. And let's say the parent block + * stores data from index 200 and has length 400. If the key was seeked to + * position 90, then this block will be seeked to position 90. + * When seek(90) is called on this blockStream, then + * 1. chunkIndex will be set to 2 (as indices 80 - 120 reside in chunk[2]). + * 2. chunkStream[2] will be seeked to position 10 + * (= 90 - chunkOffset[2] (= 80)). */ - private synchronized void readChunkFromContainer() throws IOException { - // Read the chunk at chunkIndex - final ChunkInfo chunkInfo = chunks.get(chunkIndex); - ByteString byteString; - byteString = readChunk(chunkInfo); - buffers = byteString.asReadOnlyByteBufferList(); - bufferIndex = 0; - chunkIndexOfCurrentBuffer = chunkIndex; - - // The bufferIndex and position might need to be adjusted if seek() was - // called on the stream before. This needs to be done so that the buffer - // position can be advanced to the 'seeked' position. - adjustBufferIndex(); - } - - /** - * Send RPC call to get the chunk from the container. - */ - @VisibleForTesting - protected ByteString readChunk(final ChunkInfo chunkInfo) - throws IOException { - ReadChunkResponseProto readChunkResponse; - try { - List validators = - ContainerProtocolCalls.getValidatorList(); - validators.add(validator); - readChunkResponse = ContainerProtocolCalls - .readChunk(xceiverClient, chunkInfo, blockID, traceID, validators); - } catch (IOException e) { - if (e instanceof StorageContainerException) { - throw e; - } - throw new IOException("Unexpected OzoneException: " + e.toString(), e); - } - return readChunkResponse.getData(); - } - - @VisibleForTesting - protected List getDatanodeList() { - return xceiverClient.getPipeline().getNodes(); - } - - private CheckedBiFunction validator = - (request, response) -> { - ReadChunkResponseProto readChunkResponse = response.getReadChunk(); - final ChunkInfo chunkInfo = readChunkResponse.getChunkData(); - ByteString byteString = readChunkResponse.getData(); - if (byteString.size() != chunkInfo.getLen()) { - // Bytes read from chunk should be equal to chunk size. - throw new OzoneChecksumException(String - .format("Inconsistent read for chunk=%s len=%d bytesRead=%d", - chunkInfo.getChunkName(), chunkInfo.getLen(), - byteString.size())); - } - ChecksumData checksumData = - ChecksumData.getFromProtoBuf(chunkInfo.getChecksumData()); - if (verifyChecksum) { - Checksum.verifyChecksum(byteString, checksumData); - } - }; - @Override public synchronized void seek(long pos) throws IOException { - if (pos < 0 || (chunks.size() == 0 && pos > 0) - || pos >= chunkOffset[chunks.size() - 1] + chunks.get(chunks.size() - 1) - .getLen()) { - throw new EOFException("EOF encountered pos: " + pos + " container key: " - + blockID.getLocalID()); + if (!initialized) { + // Stream has not been initialized yet. Save the position so that it + // can be seeked when the stream is initialized. + blockPosition = pos; + return; + } + + checkOpen(); + if (pos < 0 || pos >= length) { + if (pos == 0) { + // It is possible for length and pos to be zero in which case + // seek should return instead of throwing exception + return; + } + throw new EOFException( + "EOF encountered at pos: " + pos + " for block: " + blockID); } - if (pos < chunkOffset[chunkIndex]) { - chunkIndex = Arrays.binarySearch(chunkOffset, 0, chunkIndex, pos); - } else if (pos >= chunkOffset[chunkIndex] + chunks.get(chunkIndex) - .getLen()) { + if (chunkIndex >= chunkStreams.size()) { + chunkIndex = Arrays.binarySearch(chunkOffsets, pos); + } else if (pos < chunkOffsets[chunkIndex]) { chunkIndex = - Arrays.binarySearch(chunkOffset, chunkIndex + 1, chunks.size(), pos); + Arrays.binarySearch(chunkOffsets, 0, chunkIndex, pos); + } else if (pos >= chunkOffsets[chunkIndex] + chunkStreams + .get(chunkIndex).getLength()) { + chunkIndex = Arrays.binarySearch(chunkOffsets, + chunkIndex + 1, chunkStreams.size(), pos); } if (chunkIndex < 0) { // Binary search returns -insertionPoint - 1 if element is not present // in the array. insertionPoint is the point at which element would be // inserted in the sorted array. We need to adjust the chunkIndex // accordingly so that chunkIndex = insertionPoint - 1 - chunkIndex = -chunkIndex -2; + chunkIndex = -chunkIndex - 2; } - // The bufferPosition should be adjusted to account for the chunk offset - // of the chunk the the pos actually points to. - bufferPosition = pos - chunkOffset[chunkIndex]; + // Reset the previous chunkStream's position + chunkStreams.get(chunkIndexOfPrevPosition).resetPosition(); - // Check if current buffers correspond to the chunk index being seeked - // and if the buffers have any data. - if (chunkIndex == chunkIndexOfCurrentBuffer && buffersAllocated()) { - // Position the buffer to the seeked position. - adjustBufferIndex(); - } else { - // Release the current buffers. The next readChunkFromContainer will - // read the required chunk and position the buffer to the seeked - // position. - releaseBuffers(); - } + // seek to the proper offset in the ChunkInputStream + chunkStreams.get(chunkIndex).seek(pos - chunkOffsets[chunkIndex]); + chunkIndexOfPrevPosition = chunkIndex; } - private void adjustBufferIndex() { - if (bufferPosition == -1) { - // The stream has not been seeked to a position. No need to adjust the - // buffer Index and position. - return; + @Override + public synchronized long getPos() throws IOException { + if (length == 0) { + return 0; } - // The bufferPosition is w.r.t the buffers for current chunk. - // Adjust the bufferIndex and position to the seeked position. - long tempOffest = 0; - for (int i = 0; i < buffers.size(); i++) { - if (bufferPosition - tempOffest >= buffers.get(i).capacity()) { - tempOffest += buffers.get(i).capacity(); - } else { - bufferIndex = i; - break; - } + + if (!initialized) { + // The stream is not initialized yet. Return the blockPosition + return blockPosition; + } else { + return chunkOffsets[chunkIndex] + chunkStreams.get(chunkIndex).getPos(); } - buffers.get(bufferIndex).position((int) (bufferPosition - tempOffest)); - // Reset the bufferPosition as the seek() operation has been completed. - bufferPosition = -1; } @Override - public synchronized long getPos() throws IOException { - // position = chunkOffset of current chunk (at chunkIndex) + position of - // the buffer corresponding to the chunk. - long bufferPos = 0; - - if (bufferPosition >= 0) { - // seek has been called but the buffers were empty. Hence, the buffer - // position will be advanced after the buffers are filled. - // We return the chunkOffset + bufferPosition here as that will be the - // position of the buffer pointer after reading the chunk file. - bufferPos = bufferPosition; - - } else if (blockStreamEOF()) { - // all data consumed, buffers have been released. - // get position from the chunk offset and chunk length of last chunk - bufferPos = chunks.get(chunkIndex).getLen(); - - } else if (buffersAllocated()) { - // get position from available buffers of current chunk - bufferPos = buffers.get(bufferIndex).position(); + public boolean seekToNewSource(long targetPos) throws IOException { + return false; + } + @Override + public synchronized void close() { + if (xceiverClientManager != null && xceiverClient != null) { + xceiverClientManager.releaseClient(xceiverClient, false); + xceiverClientManager = null; + xceiverClient = null; } + } - return chunkOffset[chunkIndex] + bufferPos; + public synchronized void resetPosition() { + this.blockPosition = 0; } - @Override - public boolean seekToNewSource(long targetPos) throws IOException { - return false; + /** + * Checks if the stream is open. If not, throw an exception. + * + * @throws IOException if stream is closed + */ + protected synchronized void checkOpen() throws IOException { + if (xceiverClient == null) { + throw new IOException("BlockInputStream has been closed."); + } } public BlockID getBlockID() { return blockID; } + public long getLength() { + return length; + } + @VisibleForTesting - protected int getChunkIndex() { + synchronized int getChunkIndex() { return chunkIndex; } + + @VisibleForTesting + synchronized long getBlockPosition() { + return blockPosition; + } + + @VisibleForTesting + synchronized List getChunkStreams() { + return chunkStreams; + } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java new file mode 100644 index 0000000000000..8d30c22540021 --- /dev/null +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java @@ -0,0 +1,546 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.storage; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.Seekable; +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkResponseProto; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.ozone.common.Checksum; +import org.apache.hadoop.ozone.common.ChecksumData; +import org.apache.hadoop.ozone.common.OzoneChecksumException; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.List; + +/** + * An {@link InputStream} called from BlockInputStream to read a chunk from the + * container. Each chunk may contain multiple underlying {@link ByteBuffer} + * instances. + */ +public class ChunkInputStream extends InputStream implements Seekable { + + private ChunkInfo chunkInfo; + private final long length; + private final BlockID blockID; + private final String traceID; + private XceiverClientSpi xceiverClient; + private boolean verifyChecksum; + private boolean allocated = false; + + // Buffer to store the chunk data read from the DN container + private List buffers; + + // Index of the buffers corresponding to the current position of the buffers + private int bufferIndex; + + // The offset of the current data residing in the buffers w.r.t the start + // of chunk data + private long bufferOffset; + + // The number of bytes of chunk data residing in the buffers currently + private long bufferLength; + + // Position of the ChunkInputStream is maintained by this variable (if a + // seek is performed. This position is w.r.t to the chunk only and not the + // block or key. This variable is set only if either the buffers are not + // yet allocated or the if the allocated buffers do not cover the seeked + // position. Once the chunk is read, this variable is reset. + private long chunkPosition = -1; + + private static final int EOF = -1; + + ChunkInputStream(ChunkInfo chunkInfo, BlockID blockId, + String traceId, XceiverClientSpi xceiverClient, boolean verifyChecksum) { + this.chunkInfo = chunkInfo; + this.length = chunkInfo.getLen(); + this.blockID = blockId; + this.traceID = traceId; + this.xceiverClient = xceiverClient; + this.verifyChecksum = verifyChecksum; + } + + public synchronized long getRemaining() throws IOException { + return length - getPos(); + } + + /** + * {@inheritDoc} + */ + @Override + public synchronized int read() throws IOException { + checkOpen(); + int available = prepareRead(1); + int dataout = EOF; + + if (available == EOF) { + // There is no more data in the chunk stream. The buffers should have + // been released by now + Preconditions.checkState(buffers == null); + } else { + dataout = Byte.toUnsignedInt(buffers.get(bufferIndex).get()); + } + + if (chunkStreamEOF()) { + // consumer might use getPos to determine EOF, + // so release buffers when serving the last byte of data + releaseBuffers(); + } + + return dataout; + } + + /** + * {@inheritDoc} + */ + @Override + public synchronized int read(byte[] b, int off, int len) throws IOException { + // According to the JavaDocs for InputStream, it is recommended that + // subclasses provide an override of bulk read if possible for performance + // reasons. In addition to performance, we need to do it for correctness + // reasons. The Ozone REST service uses PipedInputStream and + // PipedOutputStream to relay HTTP response data between a Jersey thread and + // a Netty thread. It turns out that PipedInputStream/PipedOutputStream + // have a subtle dependency (bug?) on the wrapped stream providing separate + // implementations of single-byte read and bulk read. Without this, get key + // responses might close the connection before writing all of the bytes + // advertised in the Content-Length. + if (b == null) { + throw new NullPointerException(); + } + if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } + if (len == 0) { + return 0; + } + checkOpen(); + int total = 0; + while (len > 0) { + int available = prepareRead(len); + if (available == EOF) { + // There is no more data in the chunk stream. The buffers should have + // been released by now + Preconditions.checkState(buffers == null); + return total != 0 ? total : EOF; + } + buffers.get(bufferIndex).get(b, off + total, available); + len -= available; + total += available; + } + + if (chunkStreamEOF()) { + // smart consumers determine EOF by calling getPos() + // so we release buffers when serving the final bytes of data + releaseBuffers(); + } + + return total; + } + + /** + * Seeks the ChunkInputStream to the specified position. This is done by + * updating the chunkPosition to the seeked position in case the buffers + * are not allocated or buffers do not contain the data corresponding to + * the seeked position (determined by buffersHavePosition()). Otherwise, + * the buffers position is updated to the seeked position. + */ + @Override + public synchronized void seek(long pos) throws IOException { + if (pos < 0 || pos >= length) { + if (pos == 0) { + // It is possible for length and pos to be zero in which case + // seek should return instead of throwing exception + return; + } + throw new EOFException("EOF encountered at pos: " + pos + " for chunk: " + + chunkInfo.getChunkName()); + } + + if (buffersHavePosition(pos)) { + // The bufferPosition is w.r.t the current chunk. + // Adjust the bufferIndex and position to the seeked position. + adjustBufferPosition(pos - bufferOffset); + } else { + chunkPosition = pos; + } + } + + @Override + public synchronized long getPos() throws IOException { + if (chunkPosition >= 0) { + return chunkPosition; + } + if (chunkStreamEOF()) { + return length; + } + if (buffersHaveData()) { + return bufferOffset + buffers.get(bufferIndex).position(); + } + if (buffersAllocated()) { + return bufferOffset + bufferLength; + } + return 0; + } + + @Override + public boolean seekToNewSource(long targetPos) throws IOException { + return false; + } + + @Override + public synchronized void close() { + if (xceiverClient != null) { + xceiverClient = null; + } + } + + /** + * Checks if the stream is open. If not, throw an exception. + * + * @throws IOException if stream is closed + */ + protected synchronized void checkOpen() throws IOException { + if (xceiverClient == null) { + throw new IOException("BlockInputStream has been closed."); + } + } + + /** + * Prepares to read by advancing through buffers or allocating new buffers, + * as needed until it finds data to return, or encounters EOF. + * @param len desired lenght of data to read + * @return length of data available to read, possibly less than desired length + */ + private synchronized int prepareRead(int len) throws IOException { + for (;;) { + if (chunkPosition >= 0) { + if (buffersHavePosition(chunkPosition)) { + // The current buffers have the seeked position. Adjust the buffer + // index and position to point to the chunkPosition. + adjustBufferPosition(chunkPosition - bufferOffset); + } else { + // Read a required chunk data to fill the buffers with seeked + // position data + readChunkFromContainer(len); + } + } + if (buffersHaveData()) { + // Data is available from buffers + ByteBuffer bb = buffers.get(bufferIndex); + return len > bb.remaining() ? bb.remaining() : len; + } else if (dataRemainingInChunk()) { + // There is more data in the chunk stream which has not + // been read into the buffers yet. + readChunkFromContainer(len); + } else { + // All available input from this chunk stream has been consumed. + return EOF; + } + } + } + + /** + * Reads full or partial Chunk from DN Container based on the current + * position of the ChunkInputStream, the number of bytes of data to read + * and the checksum boundaries. + * If successful, then the read data in saved in the buffers so that + * subsequent read calls can utilize it. + * @param len number of bytes of data to be read + * @throws IOException if there is an I/O error while performing the call + * to Datanode + */ + private synchronized void readChunkFromContainer(int len) throws IOException { + + // index of first byte to be read from the chunk + long startByteIndex; + if (chunkPosition >= 0) { + // If seek operation was called to advance the buffer position, the + // chunk should be read from that position onwards. + startByteIndex = chunkPosition; + } else { + // Start reading the chunk from the last chunkPosition onwards. + startByteIndex = bufferOffset + bufferLength; + } + + if (verifyChecksum) { + // Update the bufferOffset and bufferLength as per the checksum + // boundary requirement. + computeChecksumBoundaries(startByteIndex, len); + } else { + // Read from the startByteIndex + bufferOffset = startByteIndex; + bufferLength = len; + } + + // Adjust the chunkInfo so that only the required bytes are read from + // the chunk. + final ChunkInfo adjustedChunkInfo = ChunkInfo.newBuilder(chunkInfo) + .setOffset(bufferOffset) + .setLen(bufferLength) + .build(); + + ByteString byteString = readChunk(adjustedChunkInfo); + + buffers = byteString.asReadOnlyByteBufferList(); + bufferIndex = 0; + allocated = true; + + // If the stream was seeked to position before, then the buffer + // position should be adjusted as the reads happen at checksum boundaries. + // The buffers position might need to be adjusted for the following + // scenarios: + // 1. Stream was seeked to a position before the chunk was read + // 2. Chunk was read from index < the current position to account for + // checksum boundaries. + adjustBufferPosition(startByteIndex - bufferOffset); + } + + /** + * Send RPC call to get the chunk from the container. + */ + @VisibleForTesting + protected ByteString readChunk(ChunkInfo readChunkInfo) throws IOException { + ReadChunkResponseProto readChunkResponse; + + try { + List validators = + ContainerProtocolCalls.getValidatorList(); + validators.add(validator); + + readChunkResponse = ContainerProtocolCalls.readChunk(xceiverClient, + readChunkInfo, blockID, traceID, validators); + + } catch (IOException e) { + if (e instanceof StorageContainerException) { + throw e; + } + throw new IOException("Unexpected OzoneException: " + e.toString(), e); + } + + return readChunkResponse.getData(); + } + + private CheckedBiFunction validator = + (request, response) -> { + final ChunkInfo reqChunkInfo = + request.getReadChunk().getChunkData(); + + ReadChunkResponseProto readChunkResponse = response.getReadChunk(); + ByteString byteString = readChunkResponse.getData(); + + if (byteString.size() != reqChunkInfo.getLen()) { + // Bytes read from chunk should be equal to chunk size. + throw new OzoneChecksumException(String + .format("Inconsistent read for chunk=%s len=%d bytesRead=%d", + reqChunkInfo.getChunkName(), reqChunkInfo.getLen(), + byteString.size())); + } + + if (verifyChecksum) { + ChecksumData checksumData = ChecksumData.getFromProtoBuf( + chunkInfo.getChecksumData()); + + // ChecksumData stores checksum for each 'numBytesPerChecksum' + // number of bytes in a list. Compute the index of the first + // checksum to match with the read data + + int checkumStartIndex = (int) (reqChunkInfo.getOffset() / + checksumData.getBytesPerChecksum()); + Checksum.verifyChecksum( + byteString, checksumData, checkumStartIndex); + } + }; + + /** + * Return the offset and length of bytes that need to be read from the + * chunk file to cover the checksum boundaries covering the actual start and + * end of the chunk index to be read. + * For example, lets say the client is reading from index 120 to 450 in the + * chunk. And let's say checksum is stored for every 100 bytes in the chunk + * i.e. the first checksum is for bytes from index 0 to 99, the next for + * bytes from index 100 to 199 and so on. To verify bytes from 120 to 450, + * we would need to read from bytes 100 to 499 so that checksum + * verification can be done. + * + * @param startByteIndex the first byte index to be read by client + * @param dataLen number of bytes to be read from the chunk + */ + private void computeChecksumBoundaries(long startByteIndex, int dataLen) { + + int bytesPerChecksum = chunkInfo.getChecksumData().getBytesPerChecksum(); + // index of the last byte to be read from chunk, inclusively. + final long endByteIndex = startByteIndex + dataLen - 1; + + bufferOffset = (startByteIndex / bytesPerChecksum) + * bytesPerChecksum; // inclusive + final long endIndex = ((endByteIndex / bytesPerChecksum) + 1) + * bytesPerChecksum; // exclusive + bufferLength = Math.min(endIndex, length) - bufferOffset; + } + + /** + * Adjust the buffers position to account for seeked position and/ or checksum + * boundary reads. + * @param bufferPosition the position to which the buffers must be advanced + */ + private void adjustBufferPosition(long bufferPosition) { + // The bufferPosition is w.r.t the current chunk. + // Adjust the bufferIndex and position to the seeked chunkPosition. + long tempOffest = 0; + for (int i = 0; i < buffers.size(); i++) { + if (bufferPosition - tempOffest >= buffers.get(i).capacity()) { + tempOffest += buffers.get(i).capacity(); + } else { + bufferIndex = i; + break; + } + } + buffers.get(bufferIndex).position((int) (bufferPosition - tempOffest)); + + // Reset the chunkPosition as chunk stream has been initialized i.e. the + // buffers have been allocated. + resetPosition(); + } + + /** + * Check if the buffers have been allocated data and false otherwise. + */ + private boolean buffersAllocated() { + return buffers != null && !buffers.isEmpty(); + } + + /** + * Check if the buffers have any data remaining between the current + * position and the limit. + */ + private boolean buffersHaveData() { + boolean hasData = false; + + if (buffersAllocated()) { + while (bufferIndex < (buffers.size())) { + if (buffers.get(bufferIndex).hasRemaining()) { + // current buffer has data + hasData = true; + break; + } else { + if (buffersRemaining()) { + // move to next available buffer + ++bufferIndex; + Preconditions.checkState(bufferIndex < buffers.size()); + } else { + // no more buffers remaining + break; + } + } + } + } + + return hasData; + } + + private boolean buffersRemaining() { + return (bufferIndex < (buffers.size() - 1)); + } + + /** + * Check if curernt buffers have the data corresponding to the input position. + */ + private boolean buffersHavePosition(long pos) { + // Check if buffers have been allocated + if (buffersAllocated()) { + // Check if the current buffers cover the input position + return pos >= bufferOffset && + pos < bufferOffset + bufferLength; + } + return false; + } + + /** + * Check if there is more data in the chunk which has not yet been read + * into the buffers. + */ + private boolean dataRemainingInChunk() { + long bufferPos; + if (chunkPosition >= 0) { + bufferPos = chunkPosition; + } else { + bufferPos = bufferOffset + bufferLength; + } + + return bufferPos < length; + } + + /** + * Check if end of chunkStream has been reached. + */ + private boolean chunkStreamEOF() { + if (!allocated) { + // Chunk data has not been read yet + return false; + } + + if (buffersHaveData() || dataRemainingInChunk()) { + return false; + } else { + Preconditions.checkState(bufferOffset + bufferLength == length, + "EOF detected, but not at the last byte of the chunk"); + return true; + } + } + + /** + * If EOF is reached, release the buffers. + */ + private void releaseBuffers() { + buffers = null; + bufferIndex = 0; + } + + /** + * Reset the chunkPosition once the buffers are allocated. + */ + void resetPosition() { + this.chunkPosition = -1; + } + + String getChunkName() { + return chunkInfo.getChunkName(); + } + + protected long getLength() { + return length; + } + + @VisibleForTesting + protected long getChunkPosition() { + return chunkPosition; + } +} diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java index b6ceb2b2af738..a1985f05eea44 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java @@ -1,32 +1,33 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + package org.apache.hadoop.hdds.scm.storage; +import com.google.common.primitives.Bytes; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ChecksumData; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ChecksumType; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; +import org.apache.hadoop.ozone.common.Checksum; +import org.apache.hadoop.security.token.Token; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -34,106 +35,127 @@ import java.io.EOFException; import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Random; -import java.util.UUID; + +import static org.apache.hadoop.hdds.scm.storage.TestChunkInputStream.generateRandomData; /** - * Tests {@link BlockInputStream}. + * Tests for {@link BlockInputStream}'s functionality. */ public class TestBlockInputStream { - private static BlockInputStream blockInputStream; - private static List chunks; - private static int blockSize; + private static final int CHUNK_SIZE = 100; + private static Checksum checksum; - private static final int CHUNK_SIZE = 20; + private BlockInputStream blockStream; + private byte[] blockData; + private int blockSize; + private List chunks; + private Map chunkDataMap; @Before public void setup() throws Exception { BlockID blockID = new BlockID(new ContainerBlockID(1, 1)); - chunks = createChunkList(10); - String traceID = UUID.randomUUID().toString(); - blockInputStream = new DummyBlockInputStream(blockID, null, null, chunks, - traceID, false, 0); - - blockSize = 0; - for (ChunkInfo chunk : chunks) { - blockSize += chunk.getLen(); - } + checksum = new Checksum(ChecksumType.NONE, CHUNK_SIZE); + createChunkList(5); + + blockStream = new DummyBlockInputStream(blockID, blockSize, null, null, + false, null, null); } /** * Create a mock list of chunks. The first n-1 chunks of length CHUNK_SIZE * and the last chunk with length CHUNK_SIZE/2. - * @param numChunks - * @return */ - private static List createChunkList(int numChunks) { - ChecksumData dummyChecksumData = ChecksumData.newBuilder() - .setType(ChecksumType.NONE) - .setBytesPerChecksum(100) - .build(); - List chunkList = new ArrayList<>(numChunks); - int i; - for (i = 0; i < numChunks - 1; i++) { - String chunkName = "chunk-" + i; + private void createChunkList(int numChunks) + throws Exception { + + chunks = new ArrayList<>(numChunks); + chunkDataMap = new HashMap<>(); + blockData = new byte[0]; + int i, chunkLen; + byte[] byteData; + String chunkName; + + for (i = 0; i < numChunks; i++) { + chunkName = "chunk-" + i; + chunkLen = CHUNK_SIZE; + if (i == numChunks - 1) { + chunkLen = CHUNK_SIZE / 2; + } + byteData = generateRandomData(chunkLen); ChunkInfo chunkInfo = ChunkInfo.newBuilder() .setChunkName(chunkName) .setOffset(0) - .setLen(CHUNK_SIZE) - .setChecksumData(dummyChecksumData) + .setLen(chunkLen) + .setChecksumData(checksum.computeChecksum( + byteData, 0, chunkLen).getProtoBufMessage()) .build(); - chunkList.add(chunkInfo); + + chunkDataMap.put(chunkName, byteData); + chunks.add(chunkInfo); + + blockSize += chunkLen; + blockData = Bytes.concat(blockData, byteData); } - ChunkInfo chunkInfo = ChunkInfo.newBuilder() - .setChunkName("chunk-" + i) - .setOffset(0) - .setLen(CHUNK_SIZE/2) - .setChecksumData(dummyChecksumData) - .build(); - chunkList.add(chunkInfo); - - return chunkList; } /** - * A dummy BlockInputStream to test the functionality of BlockInputStream. + * A dummy BlockInputStream to mock read block call to DN. */ - private static class DummyBlockInputStream extends BlockInputStream { + private class DummyBlockInputStream extends BlockInputStream { - DummyBlockInputStream(BlockID blockID, - XceiverClientManager xceiverClientManager, - XceiverClientSpi xceiverClient, - List chunks, - String traceID, + DummyBlockInputStream(BlockID blockId, + long blockLen, + Pipeline pipeline, + Token token, boolean verifyChecksum, - long initialPosition) throws IOException { - super(blockID, xceiverClientManager, xceiverClient, chunks, traceID, - verifyChecksum, initialPosition); + String traceId, + XceiverClientManager xceiverClientManager) { + super(blockId, blockLen, pipeline, token, verifyChecksum, + traceId, xceiverClientManager); } @Override - protected ByteString readChunk(final ChunkInfo chunkInfo) - throws IOException { - return getByteString(chunkInfo.getChunkName(), (int) chunkInfo.getLen()); + protected List getChunkInfos() { + return chunks; } @Override - protected List getDatanodeList() { - // return an empty dummy list of size 10 - return new ArrayList<>(10); + protected void addStream(ChunkInfo chunkInfo) { + TestChunkInputStream testChunkInputStream = new TestChunkInputStream(); + getChunkStreams().add(testChunkInputStream.new DummyChunkInputStream( + chunkInfo, null, null, null, false, + chunkDataMap.get(chunkInfo.getChunkName()).clone())); } - /** - * Create ByteString with the input data to return when a readChunk call is - * placed. - */ - private static ByteString getByteString(String data, int length) { - while (data.length() < length) { - data = data + "0"; - } - return ByteString.copyFrom(data.getBytes(), 0, length); + @Override + protected synchronized void checkOpen() throws IOException { + // No action needed + } + } + + private void seekAndVerify(int pos) throws Exception { + blockStream.seek(pos); + Assert.assertEquals("Current position of buffer does not match with the " + + "seeked position", pos, blockStream.getPos()); + } + + /** + * Match readData with the chunkData byte-wise. + * @param readData Data read through ChunkInputStream + * @param inputDataStartIndex first index (inclusive) in chunkData to compare + * with read data + * @param length the number of bytes of data to match starting from + * inputDataStartIndex + */ + private void matchWithInputData(byte[] readData, int inputDataStartIndex, + int length) { + for (int i = inputDataStartIndex; i < inputDataStartIndex + length; i++) { + Assert.assertEquals(blockData[i], readData[i - inputDataStartIndex]); } } @@ -143,17 +165,26 @@ public void testSeek() throws Exception { int pos = 0; seekAndVerify(pos); Assert.assertEquals("ChunkIndex is incorrect", 0, - blockInputStream.getChunkIndex()); + blockStream.getChunkIndex()); + // Before BlockInputStream is initialized (initialization happens during + // read operation), seek should update the BlockInputStream#blockPosition pos = CHUNK_SIZE; seekAndVerify(pos); + Assert.assertEquals("ChunkIndex is incorrect", 0, + blockStream.getChunkIndex()); + Assert.assertEquals(pos, blockStream.getBlockPosition()); + + // Initialize the BlockInputStream. After initializtion, the chunkIndex + // should be updated to correspond to the seeked position. + blockStream.initialize(); Assert.assertEquals("ChunkIndex is incorrect", 1, - blockInputStream.getChunkIndex()); + blockStream.getChunkIndex()); - pos = (CHUNK_SIZE * 5) + 5; + pos = (CHUNK_SIZE * 4) + 5; seekAndVerify(pos); - Assert.assertEquals("ChunkIndex is incorrect", 5, - blockInputStream.getChunkIndex()); + Assert.assertEquals("ChunkIndex is incorrect", 4, + blockStream.getChunkIndex()); try { // Try seeking beyond the blockSize. @@ -161,7 +192,7 @@ public void testSeek() throws Exception { seekAndVerify(pos); Assert.fail("Seek to position beyond block size should fail."); } catch (EOFException e) { - // Expected + System.out.println(e); } // Seek to random positions between 0 and the block size. @@ -173,20 +204,32 @@ public void testSeek() throws Exception { } @Test - public void testBlockEOF() throws Exception { - // Seek to some position < blockSize and verify EOF is not reached. - seekAndVerify(CHUNK_SIZE); - Assert.assertFalse(blockInputStream.blockStreamEOF()); - - // Seek to blockSize-1 and verify that EOF is not reached as the chunk - // has not been read from container yet. - seekAndVerify(blockSize-1); - Assert.assertFalse(blockInputStream.blockStreamEOF()); + public void testRead() throws Exception { + // read 200 bytes of data starting from position 50. Chunk0 contains + // indices 0 to 99, chunk1 from 100 to 199 and chunk3 from 200 to 299. So + // the read should result in 3 ChunkInputStream reads + seekAndVerify(50); + byte[] b = new byte[200]; + blockStream.read(b, 0, 200); + matchWithInputData(b, 50, 200); + + // The new position of the blockInputStream should be the last index read + // + 1. + Assert.assertEquals(250, blockStream.getPos()); + Assert.assertEquals(2, blockStream.getChunkIndex()); } - private void seekAndVerify(int pos) throws Exception { - blockInputStream.seek(pos); - Assert.assertEquals("Current position of buffer does not match with the " + - "seeked position", pos, blockInputStream.getPos()); + @Test + public void testSeekAndRead() throws Exception { + // Seek to a position and read data + seekAndVerify(50); + byte[] b1 = new byte[100]; + blockStream.read(b1, 0, 100); + matchWithInputData(b1, 50, 100); + + // Next read should start from the position of the last read + 1 i.e. 100 + byte[] b2 = new byte[100]; + blockStream.read(b2, 0, 100); + matchWithInputData(b2, 150, 100); } } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java new file mode 100644 index 0000000000000..b113bc7f68580 --- /dev/null +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.storage; + +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.common.Checksum; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.EOFException; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +/** + * Tests for {@link ChunkInputStream}'s functionality. + */ +public class TestChunkInputStream { + + private static final int CHUNK_SIZE = 100; + private static final int BYTES_PER_CHECKSUM = 20; + private static final String CHUNK_NAME = "dummyChunk"; + private static final Random RANDOM = new Random(); + private static Checksum checksum; + + private DummyChunkInputStream chunkStream; + private ChunkInfo chunkInfo; + private byte[] chunkData; + + @Before + public void setup() throws Exception { + checksum = new Checksum(ChecksumType.valueOf( + OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT), + BYTES_PER_CHECKSUM); + + chunkData = generateRandomData(CHUNK_SIZE); + + chunkInfo = ChunkInfo.newBuilder() + .setChunkName(CHUNK_NAME) + .setOffset(0) + .setLen(CHUNK_SIZE) + .setChecksumData(checksum.computeChecksum( + chunkData, 0, CHUNK_SIZE).getProtoBufMessage()) + .build(); + + chunkStream = new DummyChunkInputStream(chunkInfo, null, null, null, true); + } + + static byte[] generateRandomData(int length) { + byte[] bytes = new byte[length]; + RANDOM.nextBytes(bytes); + return bytes; + } + + /** + * A dummy ChunkInputStream to mock read chunk calls to DN. + */ + public class DummyChunkInputStream extends ChunkInputStream { + + // Stores the read chunk data in each readChunk call + private List readByteBuffers = new ArrayList<>(); + + DummyChunkInputStream(ChunkInfo chunkInfo, + BlockID blockId, + String traceId, + XceiverClientSpi xceiverClient, + boolean verifyChecksum) { + super(chunkInfo, blockId, traceId, xceiverClient, verifyChecksum); + } + + public DummyChunkInputStream(ChunkInfo chunkInfo, + BlockID blockId, + String traceId, + XceiverClientSpi xceiverClient, + boolean verifyChecksum, + byte[] data) { + super(chunkInfo, blockId, traceId, xceiverClient, verifyChecksum); + chunkData = data; + } + + @Override + protected ByteString readChunk(ChunkInfo readChunkInfo) { + ByteString byteString = ByteString.copyFrom(chunkData, + (int) readChunkInfo.getOffset(), + (int) readChunkInfo.getLen()); + readByteBuffers.add(byteString); + return byteString; + } + + @Override + protected void checkOpen() { + // No action needed + } + } + + /** + * Match readData with the chunkData byte-wise. + * @param readData Data read through ChunkInputStream + * @param inputDataStartIndex first index (inclusive) in chunkData to compare + * with read data + * @param length the number of bytes of data to match starting from + * inputDataStartIndex + */ + private void matchWithInputData(byte[] readData, int inputDataStartIndex, + int length) { + for (int i = inputDataStartIndex; i < inputDataStartIndex + length; i++) { + Assert.assertEquals(chunkData[i], readData[i - inputDataStartIndex]); + } + } + + /** + * Seek to a position and verify through getPos(). + */ + private void seekAndVerify(int pos) throws Exception { + chunkStream.seek(pos); + Assert.assertEquals("Current position of buffer does not match with the " + + "seeked position", pos, chunkStream.getPos()); + } + + @Test + public void testFullChunkRead() throws Exception { + byte[] b = new byte[CHUNK_SIZE]; + chunkStream.read(b, 0, CHUNK_SIZE); + + matchWithInputData(b, 0, CHUNK_SIZE); + } + + @Test + public void testPartialChunkRead() throws Exception { + int len = CHUNK_SIZE / 2; + byte[] b = new byte[len]; + + chunkStream.read(b, 0, len); + + matchWithInputData(b, 0, len); + + // To read chunk data from index 0 to 49 (len = 50), we need to read + // chunk from offset 0 to 60 as the checksum boundary is at every 20 + // bytes. Verify that 60 bytes of chunk data are read and stored in the + // buffers. + matchWithInputData(chunkStream.readByteBuffers.get(0).toByteArray(), + 0, 60); + + } + + @Test + public void testSeek() throws Exception { + seekAndVerify(0); + + try { + seekAndVerify(CHUNK_SIZE); + Assert.fail("Seeking to Chunk Length should fail."); + } catch (EOFException e) { + GenericTestUtils.assertExceptionContains("EOF encountered at pos: " + + CHUNK_SIZE + " for chunk: " + CHUNK_NAME, e); + } + + // Seek before read should update the ChunkInputStream#chunkPosition + seekAndVerify(25); + Assert.assertEquals(25, chunkStream.getChunkPosition()); + + // Read from the seeked position. + // Reading from index 25 to 54 should result in the ChunkInputStream + // copying chunk data from index 20 to 59 into the buffers (checksum + // boundaries). + byte[] b = new byte[30]; + chunkStream.read(b, 0, 30); + matchWithInputData(b, 25, 30); + matchWithInputData(chunkStream.readByteBuffers.get(0).toByteArray(), + 20, 40); + + // After read, the position of the chunkStream is evaluated from the + // buffers and the chunkPosition should be reset to -1. + Assert.assertEquals(-1, chunkStream.getChunkPosition()); + + // Seek to a position within the current buffers. Current buffers contain + // data from index 20 to 59. ChunkPosition should still not be used to + // set the position. + seekAndVerify(35); + Assert.assertEquals(-1, chunkStream.getChunkPosition()); + + // Seek to a position outside the current buffers. In this case, the + // chunkPosition should be updated to the seeked position. + seekAndVerify(75); + Assert.assertEquals(75, chunkStream.getChunkPosition()); + } + + @Test + public void testSeekAndRead() throws Exception { + // Seek to a position and read data + seekAndVerify(50); + byte[] b1 = new byte[20]; + chunkStream.read(b1, 0, 20); + matchWithInputData(b1, 50, 20); + + // Next read should start from the position of the last read + 1 i.e. 70 + byte[] b2 = new byte[20]; + chunkStream.read(b2, 0, 20); + matchWithInputData(b2, 70, 20); + } +} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java index 1a359fe5c4d39..0e70515a492df 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java @@ -225,15 +225,17 @@ private byte[] computeMD5Checksum(byte[] data, int offset, int len) { /** * Computes the ChecksumData for the input data and verifies that it - * matches with that of the input checksumData. + * matches with that of the input checksumData, starting from index + * startIndex. * @param byteString input data * @param checksumData checksumData to match with + * @param startIndex index of first checksum in checksumData to match with + * data's computed checksum. * @throws OzoneChecksumException is thrown if checksums do not match */ - public static boolean verifyChecksum( - ByteString byteString, ChecksumData checksumData) - throws OzoneChecksumException { - return verifyChecksum(byteString.toByteArray(), checksumData); + public static boolean verifyChecksum(ByteString byteString, + ChecksumData checksumData, int startIndex) throws OzoneChecksumException { + return verifyChecksum(byteString.toByteArray(), checksumData, startIndex); } /** @@ -245,6 +247,20 @@ public static boolean verifyChecksum( */ public static boolean verifyChecksum(byte[] data, ChecksumData checksumData) throws OzoneChecksumException { + return verifyChecksum(data, checksumData, 0); + } + + /** + * Computes the ChecksumData for the input data and verifies that it + * matches with that of the input checksumData. + * @param data input data + * @param checksumData checksumData to match with + * @param startIndex index of first checksum in checksumData to match with + * data's computed checksum. + * @throws OzoneChecksumException is thrown if checksums do not match + */ + public static boolean verifyChecksum(byte[] data, ChecksumData checksumData, + int startIndex) throws OzoneChecksumException { ChecksumType checksumType = checksumData.getChecksumType(); if (checksumType == ChecksumType.NONE) { // Checksum is set to NONE. No further verification is required. @@ -256,7 +272,8 @@ public static boolean verifyChecksum(byte[] data, ChecksumData checksumData) ChecksumData computedChecksumData = checksum.computeChecksum(data, 0, data.length); - return checksumData.verifyChecksumDataMatches(computedChecksumData); + return checksumData.verifyChecksumDataMatches(computedChecksumData, + startIndex); } /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java index dafa0e32a25b3..c0799bb25eef2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java @@ -111,13 +111,20 @@ public static ChecksumData getFromProtoBuf( } /** - * Verify that this ChecksumData matches with the input ChecksumData. + * Verify that this ChecksumData from startIndex to endIndex matches with the + * provided ChecksumData. + * The checksum at startIndex of this ChecksumData will be matched with the + * checksum at index 0 of the provided ChecksumData, and checksum at + * (startIndex + 1) of this ChecksumData with checksum at index 1 of + * provided ChecksumData and so on. * @param that the ChecksumData to match with + * @param startIndex index of the first checksum from this ChecksumData + * which will be used to compare checksums * @return true if checksums match * @throws OzoneChecksumException */ - public boolean verifyChecksumDataMatches(ChecksumData that) throws - OzoneChecksumException { + public boolean verifyChecksumDataMatches(ChecksumData that, int startIndex) + throws OzoneChecksumException { // pre checks if (this.checksums.size() == 0) { @@ -130,18 +137,22 @@ public boolean verifyChecksumDataMatches(ChecksumData that) throws "checksums"); } - if (this.checksums.size() != that.checksums.size()) { - throw new OzoneChecksumException("Original and Computed checksumData's " + - "has different number of checksums"); - } + int numChecksums = that.checksums.size(); - // Verify that checksum matches at each index - for (int index = 0; index < this.checksums.size(); index++) { - if (!matchChecksumAtIndex(this.checksums.get(index), - that.checksums.get(index))) { - // checksum mismatch. throw exception. - throw new OzoneChecksumException(index); + try { + // Verify that checksum matches at each index + for (int index = 0; index < numChecksums; index++) { + if (!matchChecksumAtIndex(this.checksums.get(startIndex + index), + that.checksums.get(index))) { + // checksum mismatch. throw exception. + throw new OzoneChecksumException(index); + } } + } catch (ArrayIndexOutOfBoundsException e) { + throw new OzoneChecksumException("Computed checksum has " + + numChecksums + " number of checksums. Original checksum has " + + (this.checksums.size() - startIndex) + " number of checksums " + + "starting from index " + startIndex); } return true; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java index 5b6342028a4de..41ac60f0bd855 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java @@ -20,19 +20,10 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.fs.Seekable; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.storage.BlockInputStream; -import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.util.Preconditions; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,60 +44,93 @@ public class KeyInputStream extends InputStream implements Seekable { private static final int EOF = -1; - private final ArrayList streamEntries; - // streamOffset[i] stores the offset at which blockInputStream i stores - // data in the key - private long[] streamOffset = null; - private int currentStreamIndex; + private String key; private long length = 0; private boolean closed = false; - private String key; - public KeyInputStream() { - streamEntries = new ArrayList<>(); - currentStreamIndex = 0; - } + // List of BlockInputStreams, one for each block in the key + private final List blockStreams; - @VisibleForTesting - public synchronized int getCurrentStreamIndex() { - return currentStreamIndex; - } + // blockOffsets[i] stores the index of the first data byte in + // blockStream w.r.t the key data. + // For example, let’s say the block size is 200 bytes and block[0] stores + // data from indices 0 - 199, block[1] from indices 200 - 399 and so on. + // Then, blockOffset[0] = 0 (the offset of the first byte of data in + // block[0]), blockOffset[1] = 200 and so on. + private long[] blockOffsets = null; - @VisibleForTesting - public long getRemainingOfIndex(int index) throws IOException { - return streamEntries.get(index).getRemaining(); + // Index of the blockStream corresponding to the current position of the + // KeyInputStream i.e. offset of the data to be read next + private int blockIndex; + + // Tracks the blockIndex corresponding to the last seeked position so that it + // can be reset if a new position is seeked. + private int blockIndexOfPrevPosition; + + public KeyInputStream() { + blockStreams = new ArrayList<>(); + blockIndex = 0; } /** - * Append another stream to the end of the list. - * - * @param stream the stream instance. - * @param streamLength the max number of bytes that should be written to this - * stream. + * For each block in keyInfo, add a BlockInputStream to blockStreams. */ - @VisibleForTesting - public synchronized void addStream(BlockInputStream stream, - long streamLength) { - streamEntries.add(new ChunkInputStreamEntry(stream, streamLength)); + public static LengthInputStream getFromOmKeyInfo(OmKeyInfo keyInfo, + XceiverClientManager xceiverClientManager, String requestId, + boolean verifyChecksum) { + List keyLocationInfos = keyInfo + .getLatestVersionLocations().getBlocksLatestVersionOnly(); + + KeyInputStream keyInputStream = new KeyInputStream(); + keyInputStream.initialize(keyInfo.getKeyName(), keyLocationInfos, + xceiverClientManager, requestId, verifyChecksum); + + return new LengthInputStream(keyInputStream, keyInputStream.length); + } + + private synchronized void initialize(String keyName, + List blockInfos, + XceiverClientManager xceiverClientManager, String requestId, + boolean verifyChecksum) { + this.key = keyName; + this.blockOffsets = new long[blockInfos.size()]; + long keyLength = 0; + for (int i = 0; i < blockInfos.size(); i++) { + OmKeyLocationInfo omKeyLocationInfo = blockInfos.get(i); + LOG.debug("Adding stream for accessing {}. The stream will be " + + "initialized later.", omKeyLocationInfo); + + addStream(omKeyLocationInfo, xceiverClientManager, requestId, + verifyChecksum); + + this.blockOffsets[i] = keyLength; + keyLength += omKeyLocationInfo.getLength(); + } + this.length = keyLength; } /** - * Append another ChunkInputStreamEntry to the end of the list. - * The stream will be constructed from the input information when it needs - * to be accessed. + * Append another BlockInputStream to the end of the list. Note that the + * BlockInputStream is only created here and not initialized. The + * BlockInputStream is initialized when a read operation is performed on + * the block for the first time. */ - private synchronized void addStream(OmKeyLocationInfo omKeyLocationInfo, + private synchronized void addStream(OmKeyLocationInfo blockInfo, XceiverClientManager xceiverClientMngr, String clientRequestId, boolean verifyChecksum) { - streamEntries.add(new ChunkInputStreamEntry(omKeyLocationInfo, - xceiverClientMngr, clientRequestId, verifyChecksum)); + blockStreams.add(new BlockInputStream(blockInfo.getBlockID(), + blockInfo.getLength(), blockInfo.getPipeline(), blockInfo.getToken(), + verifyChecksum, clientRequestId, xceiverClientMngr)); } - private synchronized ChunkInputStreamEntry getStreamEntry(int index) - throws IOException { - return streamEntries.get(index).getStream(); + @VisibleForTesting + public void addStream(BlockInputStream blockInputStream) { + blockStreams.add(blockInputStream); } + /** + * {@inheritDoc} + */ @Override public synchronized int read() throws IOException { byte[] buf = new byte[1]; @@ -116,9 +140,12 @@ public synchronized int read() throws IOException { return Byte.toUnsignedInt(buf[0]); } + /** + * {@inheritDoc} + */ @Override public synchronized int read(byte[] b, int off, int len) throws IOException { - checkNotClosed(); + checkOpen(); if (b == null) { throw new NullPointerException(); } @@ -131,13 +158,15 @@ public synchronized int read(byte[] b, int off, int len) throws IOException { int totalReadLen = 0; while (len > 0) { // if we are at the last block and have read the entire block, return - if (streamEntries.size() == 0 || - (streamEntries.size() - 1 <= currentStreamIndex && - streamEntries.get(currentStreamIndex) - .getRemaining() == 0)) { + if (blockStreams.size() == 0 || + (blockStreams.size() - 1 <= blockIndex && + blockStreams.get(blockIndex) + .getRemaining() == 0)) { return totalReadLen == 0 ? EOF : totalReadLen; } - ChunkInputStreamEntry current = getStreamEntry(currentStreamIndex); + + // Get the current blockStream and read data from it + BlockInputStream current = blockStreams.get(blockIndex); int numBytesToRead = Math.min(len, (int)current.getRemaining()); int numBytesRead = current.read(b, off, numBytesToRead); if (numBytesRead != numBytesToRead) { @@ -146,23 +175,35 @@ public synchronized int read(byte[] b, int off, int len) throws IOException { // this case. throw new IOException(String.format( "Inconsistent read for blockID=%s length=%d numBytesRead=%d", - current.blockInputStream.getBlockID(), current.length, - numBytesRead)); + current.getBlockID(), current.getLength(), numBytesRead)); } totalReadLen += numBytesRead; off += numBytesRead; len -= numBytesRead; if (current.getRemaining() <= 0 && - ((currentStreamIndex + 1) < streamEntries.size())) { - currentStreamIndex += 1; + ((blockIndex + 1) < blockStreams.size())) { + blockIndex += 1; } } return totalReadLen; } + /** + * Seeks the KeyInputStream to the specified position. This involves 2 steps: + * 1. Updating the blockIndex to the blockStream corresponding to the + * seeked position. + * 2. Seeking the corresponding blockStream to the adjusted position. + * + * For example, let’s say the block size is 200 bytes and block[0] stores + * data from indices 0 - 199, block[1] from indices 200 - 399 and so on. + * Let’s say we seek to position 240. In the first step, the blockIndex + * would be updated to 1 as indices 200 - 399 reside in blockStream[1]. In + * the second step, the blockStream[1] would be seeked to position 40 (= + * 240 - blockOffset[1] (= 200)). + */ @Override - public void seek(long pos) throws IOException { - checkNotClosed(); + public synchronized void seek(long pos) throws IOException { + checkOpen(); if (pos < 0 || pos >= length) { if (pos == 0) { // It is possible for length and pos to be zero in which case @@ -172,35 +213,39 @@ public void seek(long pos) throws IOException { throw new EOFException( "EOF encountered at pos: " + pos + " for key: " + key); } - Preconditions.assertTrue(currentStreamIndex >= 0); - if (currentStreamIndex >= streamEntries.size()) { - currentStreamIndex = Arrays.binarySearch(streamOffset, pos); - } else if (pos < streamOffset[currentStreamIndex]) { - currentStreamIndex = - Arrays.binarySearch(streamOffset, 0, currentStreamIndex, pos); - } else if (pos >= streamOffset[currentStreamIndex] + streamEntries - .get(currentStreamIndex).length) { - currentStreamIndex = Arrays - .binarySearch(streamOffset, currentStreamIndex + 1, - streamEntries.size(), pos); + + // 1. Update the blockIndex + if (blockIndex >= blockStreams.size()) { + blockIndex = Arrays.binarySearch(blockOffsets, pos); + } else if (pos < blockOffsets[blockIndex]) { + blockIndex = + Arrays.binarySearch(blockOffsets, 0, blockIndex, pos); + } else if (pos >= blockOffsets[blockIndex] + blockStreams + .get(blockIndex).getLength()) { + blockIndex = Arrays + .binarySearch(blockOffsets, blockIndex + 1, + blockStreams.size(), pos); } - if (currentStreamIndex < 0) { + if (blockIndex < 0) { // Binary search returns -insertionPoint - 1 if element is not present // in the array. insertionPoint is the point at which element would be - // inserted in the sorted array. We need to adjust the currentStreamIndex - // accordingly so that currentStreamIndex = insertionPoint - 1 - currentStreamIndex = -currentStreamIndex - 2; + // inserted in the sorted array. We need to adjust the blockIndex + // accordingly so that blockIndex = insertionPoint - 1 + blockIndex = -blockIndex - 2; } - // seek to the proper offset in the BlockInputStream - streamEntries.get(currentStreamIndex) - .seek(pos - streamOffset[currentStreamIndex]); + + // Reset the previous blockStream's position + blockStreams.get(blockIndexOfPrevPosition).resetPosition(); + + // 2. Seek the blockStream to the adjusted position + blockStreams.get(blockIndex).seek(pos - blockOffsets[blockIndex]); + blockIndexOfPrevPosition = blockIndex; } @Override - public long getPos() throws IOException { - return length == 0 ? 0 : - streamOffset[currentStreamIndex] + streamEntries.get(currentStreamIndex) - .getPos(); + public synchronized long getPos() throws IOException { + return length == 0 ? 0 : blockOffsets[blockIndex] + + blockStreams.get(blockIndex).getPos(); } @Override @@ -210,7 +255,7 @@ public boolean seekToNewSource(long targetPos) throws IOException { @Override public int available() throws IOException { - checkNotClosed(); + checkOpen(); long remaining = length - getPos(); return remaining <= Integer.MAX_VALUE ? (int) remaining : Integer.MAX_VALUE; } @@ -218,177 +263,30 @@ public int available() throws IOException { @Override public void close() throws IOException { closed = true; - for (int i = 0; i < streamEntries.size(); i++) { - streamEntries.get(i).close(); + for (BlockInputStream blockStream : blockStreams) { + blockStream.close(); } } - /** - * Encapsulates BlockInputStream. - */ - public static class ChunkInputStreamEntry extends InputStream - implements Seekable { - - private BlockInputStream blockInputStream; - private final OmKeyLocationInfo blockLocationInfo; - private final long length; - private final XceiverClientManager xceiverClientManager; - private final String requestId; - private boolean verifyChecksum; - - // the position of the blockInputStream is maintained by this variable - // till the stream is initialized - private long position; - - public ChunkInputStreamEntry(OmKeyLocationInfo omKeyLocationInfo, - XceiverClientManager xceiverClientMngr, String clientRequestId, - boolean verifyChecksum) { - this.blockLocationInfo = omKeyLocationInfo; - this.length = omKeyLocationInfo.getLength(); - this.xceiverClientManager = xceiverClientMngr; - this.requestId = clientRequestId; - this.verifyChecksum = verifyChecksum; - } - - @VisibleForTesting - public ChunkInputStreamEntry(BlockInputStream blockInputStream, - long length) { - this.blockInputStream = blockInputStream; - this.length = length; - this.blockLocationInfo = null; - this.xceiverClientManager = null; - this.requestId = null; - } - - private ChunkInputStreamEntry getStream() throws IOException { - if (this.blockInputStream == null) { - initializeBlockInputStream(); - } - return this; - } - - private void initializeBlockInputStream() throws IOException { - BlockID blockID = blockLocationInfo.getBlockID(); - long containerID = blockID.getContainerID(); - Pipeline pipeline = blockLocationInfo.getPipeline(); - - // irrespective of the container state, we will always read via Standalone - // protocol. - if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) { - pipeline = Pipeline.newBuilder(pipeline) - .setType(HddsProtos.ReplicationType.STAND_ALONE).build(); - } - XceiverClientSpi xceiverClient = xceiverClientManager - .acquireClient(pipeline); - boolean success = false; - long containerKey = blockLocationInfo.getLocalID(); - try { - LOG.debug("Initializing stream for get key to access {} {}", - containerID, containerKey); - ContainerProtos.DatanodeBlockID datanodeBlockID = blockID - .getDatanodeBlockIDProtobuf(); - if (blockLocationInfo.getToken() != null) { - UserGroupInformation.getCurrentUser(). - addToken(blockLocationInfo.getToken()); - } - ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls - .getBlock(xceiverClient, datanodeBlockID, requestId); - List chunks = - response.getBlockData().getChunksList(); - success = true; - this.blockInputStream = new BlockInputStream( - blockLocationInfo.getBlockID(), xceiverClientManager, xceiverClient, - chunks, requestId, verifyChecksum, position); - } finally { - if (!success) { - xceiverClientManager.releaseClient(xceiverClient, false); - } - } - } - - synchronized long getRemaining() throws IOException { - return length - getPos(); - } - - @Override - public synchronized int read(byte[] b, int off, int len) - throws IOException { - int readLen = blockInputStream.read(b, off, len); - return readLen; - } - - @Override - public synchronized int read() throws IOException { - int data = blockInputStream.read(); - return data; - } - - @Override - public synchronized void close() throws IOException { - if (blockInputStream != null) { - blockInputStream.close(); - } - } - - @Override - public void seek(long pos) throws IOException { - if (blockInputStream != null) { - blockInputStream.seek(pos); - } else { - position = pos; - } - } - - @Override - public long getPos() throws IOException { - if (blockInputStream != null) { - return blockInputStream.getPos(); - } else { - return position; - } - } - - @Override - public boolean seekToNewSource(long targetPos) throws IOException { - return false; - } - } - - public static LengthInputStream getFromOmKeyInfo( - OmKeyInfo keyInfo, - XceiverClientManager xceiverClientManager, - StorageContainerLocationProtocol - storageContainerLocationClient, - String requestId, boolean verifyChecksum) throws IOException { - long length = 0; - KeyInputStream groupInputStream = new KeyInputStream(); - groupInputStream.key = keyInfo.getKeyName(); - List keyLocationInfos = - keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly(); - groupInputStream.streamOffset = new long[keyLocationInfos.size()]; - for (int i = 0; i < keyLocationInfos.size(); i++) { - OmKeyLocationInfo omKeyLocationInfo = keyLocationInfos.get(i); - LOG.debug("Adding stream for accessing {}. The stream will be " + - "initialized later.", omKeyLocationInfo); - groupInputStream.addStream(omKeyLocationInfo, xceiverClientManager, - requestId, verifyChecksum); - - groupInputStream.streamOffset[i] = length; - length += omKeyLocationInfo.getLength(); - } - groupInputStream.length = length; - return new LengthInputStream(groupInputStream, length); - } - /** * Verify that the input stream is open. Non blocking; this gives * the last state of the volatile {@link #closed} field. * @throws IOException if the connection is closed. */ - private void checkNotClosed() throws IOException { + private void checkOpen() throws IOException { if (closed) { throw new IOException( ": " + FSExceptionMessages.STREAM_IS_CLOSED + " Key: " + key); } } + + @VisibleForTesting + public synchronized int getCurrentStreamIndex() { + return blockIndex; + } + + @VisibleForTesting + public long getRemainingOfIndex(int index) throws IOException { + return blockStreams.get(index).getRemaining(); + } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 48968a4647f80..5f2df7dd91720 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1072,8 +1072,8 @@ public List getAcl(OzoneObj obj) throws IOException { private OzoneInputStream createInputStream(OmKeyInfo keyInfo, String requestId) throws IOException { LengthInputStream lengthInputStream = KeyInputStream - .getFromOmKeyInfo(keyInfo, xceiverClientManager, - storageContainerLocationClient, requestId, verifyChecksum); + .getFromOmKeyInfo(keyInfo, xceiverClientManager, requestId, + verifyChecksum); FileEncryptionInfo feInfo = keyInfo.getFileEncryptionInfo(); if (feInfo != null) { final KeyProvider.KeyVersion decrypted = getDEK(feInfo); diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java index a4aa361e39862..6876166f8b0c0 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java @@ -503,8 +503,7 @@ public LengthInputStream newKeyReader(KeyArgs args) throws IOException, .build(); OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); return KeyInputStream.getFromOmKeyInfo( - keyInfo, xceiverClientManager, storageContainerLocationClient, - args.getRequestID(), verifyChecksum); + keyInfo, xceiverClientManager, args.getRequestID(), verifyChecksum); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java index 45f04dfae0b44..80717dde86fda 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java @@ -25,7 +25,6 @@ import java.io.ByteArrayInputStream; import java.io.IOException; -import java.util.ArrayList; import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertEquals; @@ -48,8 +47,7 @@ public void testReadGroupInputStream() throws Exception { for (int i = 0; i < 5; i++) { int tempOffset = offset; BlockInputStream in = - new BlockInputStream(null, null, null, new ArrayList<>(), null, - true, 0) { + new BlockInputStream(null, 100, null, null, true, null, null) { private long pos = 0; private ByteArrayInputStream in = new ByteArrayInputStream(buf, tempOffset, 100); @@ -84,7 +82,7 @@ public synchronized int read(byte[] b, int off, int len) } }; offset += 100; - groupInputStream.addStream(in, 100); + groupInputStream.addStream(in); } byte[] resBuf = new byte[500]; @@ -105,8 +103,7 @@ public void testErrorReadGroupInputStream() throws Exception { for (int i = 0; i < 5; i++) { int tempOffset = offset; BlockInputStream in = - new BlockInputStream(null, null, null, new ArrayList<>(), null, - true, 0) { + new BlockInputStream(null, 100, null, null, true, null, null) { private long pos = 0; private ByteArrayInputStream in = new ByteArrayInputStream(buf, tempOffset, 100); @@ -141,7 +138,7 @@ public synchronized int read(byte[] b, int off, int len) } }; offset += 100; - groupInputStream.addStream(in, 100); + groupInputStream.addStream(in); } byte[] resBuf = new byte[600]; From 14552d19e3d2fcf6a1b60f6fc2dc06f8414b8b8c Mon Sep 17 00:00:00 2001 From: Huan-Ping Su Date: Fri, 7 Jun 2019 12:55:17 +0100 Subject: [PATCH 0146/1308] HADOOP-16344. Make DurationInfo public unstable. Second attempt: imports fixed up. Contributed by Huan-Ping Su and Kai Xie. Change-Id: Ib2a85dd4b2d12b840692cc854cc53ddb3bdde7d5 --- .../main/java/org/apache/hadoop/util/DurationInfo.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java index 9dd75db27c733..605d060270f8e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java @@ -20,8 +20,8 @@ import org.slf4j.Logger; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; /** * A duration with logging of final state at info or debug @@ -29,8 +29,8 @@ * This allows it to be used in a try-with-resources clause, and have the * duration automatically logged. */ -@InterfaceAudience.Private -@InterfaceStability.Unstable +@Public +@Unstable public class DurationInfo extends OperationDuration implements AutoCloseable { private final String text; From 85479577da1b8934cfbd97fa815985399f19d933 Mon Sep 17 00:00:00 2001 From: S O'Donnell Date: Fri, 7 Jun 2019 17:50:57 +0200 Subject: [PATCH 0147/1308] HDDS-1622. Use picocli for StorageContainerManager Closes #918 --- .../hdds/scm/server/SCMStarterInterface.java | 37 ++++ .../scm/server/StorageContainerManager.java | 197 +----------------- .../StorageContainerManagerStarter.java | 153 ++++++++++++++ .../apache/hadoop/hdds/scm/HddsTestUtils.java | 2 +- .../TestStorageContainerManagerStarter.java | 166 +++++++++++++++ hadoop-ozone/common/src/main/bin/ozone | 2 +- .../hadoop/ozone/MiniOzoneClusterImpl.java | 4 +- .../hadoop/ozone/TestSecureOzoneCluster.java | 16 +- .../ozone/TestStorageContainerManager.java | 28 +-- 9 files changed, 380 insertions(+), 225 deletions(-) create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java new file mode 100644 index 0000000000000..7d84fc017afa3 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license + * agreements. See the NOTICE file distributed with this work for additional + * information regarding + * copyright ownership. The ASF licenses this file to you under the Apache + * License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a + * copy of the License at + * + *

    http://www.apache.org/licenses/LICENSE-2.0 + * + *

    Unless required by applicable law or agreed to in writing, software + * distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.server; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import java.io.IOException; + +/** + * This interface is used by the StorageContainerManager to allow the + * dependencies to be injected to the CLI class. + */ +public interface SCMStarterInterface { + + void start(OzoneConfiguration conf) throws Exception; + boolean init(OzoneConfiguration conf, String clusterId) + throws IOException; + String generateClusterId(); +} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 7cc5cbaf7f761..f13dc4e5b6164 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -81,7 +81,6 @@ import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RPC; @@ -90,7 +89,6 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.apache.hadoop.ozone.common.Storage.StorageState; -import org.apache.hadoop.ozone.common.StorageInfo; import org.apache.hadoop.ozone.lease.LeaseManager; import org.apache.hadoop.ozone.lock.LockManager; import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher; @@ -98,16 +96,13 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.JvmPauseMonitor; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.utils.HddsVersionInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.management.ObjectName; import java.io.IOException; -import java.io.PrintStream; import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.Collection; @@ -120,7 +115,6 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; -import static org.apache.hadoop.util.ExitUtil.terminate; /** * StorageContainerManager is the main entry point for the service that @@ -140,19 +134,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl private static final Logger LOG = LoggerFactory .getLogger(StorageContainerManager.class); - private static final String USAGE = - "Usage: \n ozone scm [genericOptions] " - + "[ " - + StartupOption.INIT.getName() - + " [ " - + StartupOption.CLUSTERID.getName() - + " ] ]\n " - + "ozone scm [genericOptions] [ " - + StartupOption.GENCLUSTERID.getName() - + " ]\n " - + "ozone scm [ " - + StartupOption.HELP.getName() - + " ]\n"; + /** * SCM metrics. */ @@ -586,114 +568,22 @@ public static RPC.Server startRpcServer( } /** - * Main entry point for starting StorageContainerManager. - * - * @param argv arguments - * @throws IOException if startup fails due to I/O error - */ - public static void main(String[] argv) throws IOException { - if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) { - System.exit(0); - } - try { - TracingUtil.initTracing("StorageContainerManager"); - OzoneConfiguration conf = new OzoneConfiguration(); - GenericOptionsParser hParser = new GenericOptionsParser(conf, argv); - if (!hParser.isParseSuccessful()) { - System.err.println("USAGE: " + USAGE + "\n"); - hParser.printGenericCommandUsage(System.err); - System.exit(1); - } - StorageContainerManager scm = createSCM( - hParser.getRemainingArgs(), conf, true); - if (scm != null) { - scm.start(); - scm.join(); - } - } catch (Throwable t) { - LOG.error("Failed to start the StorageContainerManager.", t); - terminate(1, t); - } - } - - private static void printUsage(PrintStream out) { - out.println(USAGE + "\n"); - } - - /** - * Create an SCM instance based on the supplied command-line arguments. - *

    - * This method is intended for unit tests only. It suppresses the - * startup/shutdown message and skips registering Unix signal - * handlers. + * Create an SCM instance based on the supplied configuration. * - * @param args command line arguments. - * @param conf HDDS configuration - * @return SCM instance - * @throws IOException, AuthenticationException - */ - @VisibleForTesting - public static StorageContainerManager createSCM( - String[] args, OzoneConfiguration conf) - throws IOException, AuthenticationException { - return createSCM(args, conf, false); - } - - /** - * Create an SCM instance based on the supplied command-line arguments. - * - * @param args command-line arguments. * @param conf HDDS configuration - * @param printBanner if true, then log a verbose startup message. * @return SCM instance * @throws IOException, AuthenticationException */ - private static StorageContainerManager createSCM( - String[] args, - OzoneConfiguration conf, - boolean printBanner) + public static StorageContainerManager createSCM( + OzoneConfiguration conf) throws IOException, AuthenticationException { - String[] argv = (args == null) ? new String[0] : args; if (!HddsUtils.isHddsEnabled(conf)) { System.err.println( "SCM cannot be started in secure mode or when " + OZONE_ENABLED + "" + " is set to false"); System.exit(1); } - StartupOption startOpt = parseArguments(argv); - if (startOpt == null) { - printUsage(System.err); - terminate(1); - return null; - } - switch (startOpt) { - case INIT: - if (printBanner) { - StringUtils.startupShutdownMessage(StorageContainerManager.class, argv, - LOG); - } - terminate(scmInit(conf) ? 0 : 1); - return null; - case GENCLUSTERID: - if (printBanner) { - StringUtils.startupShutdownMessage(StorageContainerManager.class, argv, - LOG); - } - System.out.println("Generating new cluster id:"); - System.out.println(StorageInfo.newClusterID()); - terminate(0); - return null; - case HELP: - printUsage(System.err); - terminate(0); - return null; - default: - if (printBanner) { - StringUtils.startupShutdownMessage(StorageContainerManager.class, argv, - LOG); - } - return new StorageContainerManager(conf); - } + return new StorageContainerManager(conf); } /** @@ -703,12 +593,12 @@ private static StorageContainerManager createSCM( * @return true if SCM initialization is successful, false otherwise. * @throws IOException if init fails due to I/O error */ - public static boolean scmInit(OzoneConfiguration conf) throws IOException { + public static boolean scmInit(OzoneConfiguration conf, + String clusterId) throws IOException { SCMStorageConfig scmStorageConfig = new SCMStorageConfig(conf); StorageState state = scmStorageConfig.getState(); if (state != StorageState.INITIALIZED) { try { - String clusterId = StartupOption.INIT.getClusterId(); if (clusterId != null && !clusterId.isEmpty()) { scmStorageConfig.setClusterId(clusterId); } @@ -735,48 +625,6 @@ public static boolean scmInit(OzoneConfiguration conf) throws IOException { } } - private static StartupOption parseArguments(String[] args) { - int argsLen = (args == null) ? 0 : args.length; - StartupOption startOpt = null; - if (argsLen == 0) { - startOpt = StartupOption.REGULAR; - } - for (int i = 0; i < argsLen; i++) { - String cmd = args[i]; - if (StartupOption.INIT.getName().equalsIgnoreCase(cmd)) { - startOpt = StartupOption.INIT; - if (argsLen > 3) { - return null; - } - for (i = i + 1; i < argsLen; i++) { - if (args[i].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) { - i++; - if (i < argsLen && !args[i].isEmpty()) { - startOpt.setClusterId(args[i]); - } else { - // if no cluster id specified or is empty string, return null - LOG.error( - "Must specify a valid cluster ID after the " - + StartupOption.CLUSTERID.getName() - + " flag"); - return null; - } - } else { - return null; - } - } - } else { - if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) { - if (argsLen > 1) { - return null; - } - startOpt = StartupOption.GENCLUSTERID; - } - } - } - return startOpt; - } - /** * Initialize SCM metrics. */ @@ -1219,35 +1067,4 @@ public Map getContainerStateCount() { public SCMMetadataStore getScmMetadataStore() { return scmMetadataStore; } - /** - * Startup options. - */ - public enum StartupOption { - INIT("--init"), - CLUSTERID("--clusterid"), - GENCLUSTERID("--genclusterid"), - REGULAR("--regular"), - HELP("-help"); - - private final String name; - private String clusterId = null; - - StartupOption(String arg) { - this.name = arg; - } - - public String getClusterId() { - return clusterId; - } - - public void setClusterId(String cid) { - if (cid != null && !cid.isEmpty()) { - clusterId = cid; - } - } - - public String getName() { - return name; - } - } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java new file mode 100644 index 0000000000000..62910f2314a1b --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java @@ -0,0 +1,153 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license + * agreements. See the NOTICE file distributed with this work for additional + * information regarding + * copyright ownership. The ASF licenses this file to you under the Apache + * License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a + * copy of the License at + * + *

    http://www.apache.org/licenses/LICENSE-2.0 + * + *

    Unless required by applicable law or agreed to in writing, software + * distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.server; + +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.ozone.common.StorageInfo; +import org.apache.hadoop.util.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine; +import picocli.CommandLine.Command; + +import java.io.IOException; + +/** + * This class provides a command line interface to start the SCM + * using Picocli. + */ + +@Command(name = "ozone scm", + hidden = true, description = "Start or initialize the scm server.", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true) +public class StorageContainerManagerStarter extends GenericCli { + + private OzoneConfiguration conf; + private SCMStarterInterface receiver; + private static final Logger LOG = + LoggerFactory.getLogger(StorageContainerManagerStarter.class); + + public static void main(String[] args) throws Exception { + TracingUtil.initTracing("StorageContainerManager"); + new StorageContainerManagerStarter( + new StorageContainerManagerStarter.SCMStarterHelper()).run(args); + } + + public StorageContainerManagerStarter(SCMStarterInterface receiverObj) { + super(); + receiver = receiverObj; + } + + @Override + public Void call() throws Exception { + commonInit(); + startScm(); + return null; + } + + /** + * This function implements a sub-command to generate a new + * cluster ID from the command line. + */ + @CommandLine.Command(name = "--genclusterid", + customSynopsis = "ozone scm [global options] --genclusterid [options]", + hidden = false, + description = "Generate a new Cluster ID", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) + public void generateClusterId() { + commonInit(); + System.out.println("Generating new cluster id:"); + System.out.println(receiver.generateClusterId()); + } + + /** + * This function implements a sub-command to allow the SCM to be + * initialized from the command line. + * + * @param clusterId - Cluster ID to use when initializing. If null, + * a random ID will be generated and used. + */ + @CommandLine.Command(name = "--init", + customSynopsis = "ozone scm [global options] --init [options]", + hidden = false, + description = "Initialize the SCM if not already initialized", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) + public void initScm(@CommandLine.Option(names = { "--clusterid" }, + description = "Optional: The cluster id to use when formatting SCM", + paramLabel = "id") String clusterId) + throws Exception { + commonInit(); + boolean result = receiver.init(conf, clusterId); + if (!result) { + throw new IOException("scm init failed"); + } + } + + /** + * This function is used by the command line to start the SCM. + */ + private void startScm() throws Exception { + receiver.start(conf); + } + + /** + * This function should be called by each command to ensure the configuration + * is set and print the startup banner message. + */ + private void commonInit() { + conf = createOzoneConfiguration(); + + String[] originalArgs = getCmd().getParseResult().originalArgs() + .toArray(new String[0]); + StringUtils.startupShutdownMessage(StorageContainerManager.class, + originalArgs, LOG); + } + + /** + * This static class wraps the external dependencies needed for this command + * to execute its tasks. This allows the dependency to be injected for unit + * testing. + */ + static class SCMStarterHelper implements SCMStarterInterface { + + public void start(OzoneConfiguration conf) throws Exception { + StorageContainerManager stm = StorageContainerManager.createSCM(conf); + stm.start(); + stm.join(); + } + + public boolean init(OzoneConfiguration conf, String clusterId) + throws IOException{ + return StorageContainerManager.scmInit(conf, clusterId); + } + + public String generateClusterId() { + return StorageInfo.newClusterID(); + } + } +} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java index 22c0c0173de34..38f78ad815350 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java @@ -91,7 +91,7 @@ public static StorageContainerManager getScm(OzoneConfiguration conf) // writes the version file properties scmStore.initialize(); } - return StorageContainerManager.createSCM(null, conf); + return StorageContainerManager.createSCM(conf); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java new file mode 100644 index 0000000000000..60a56e3ffbc92 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java @@ -0,0 +1,166 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.server; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import static org.junit.Assert.*; + + +/** + * This class is used to test the StorageContainerManagerStarter using a mock + * class to avoid starting any services and hence just test the CLI component. + */ +public class TestStorageContainerManagerStarter { + + private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); + private final PrintStream originalOut = System.out; + private final PrintStream originalErr = System.err; + + private MockSCMStarter mock; + + @Before + public void setUpStreams() { + System.setOut(new PrintStream(outContent)); + System.setErr(new PrintStream(errContent)); + mock = new MockSCMStarter(); + } + + @After + public void restoreStreams() { + System.setOut(originalOut); + System.setErr(originalErr); + } + + @Test + public void testCallsStartWhenServerStarted() throws Exception { + executeCommand(); + assertTrue(mock.startCalled); + } + + @Test + public void testExceptionThrownWhenStartFails() throws Exception { + mock.throwOnStart = true; + try { + executeCommand(); + fail("Exception show have been thrown"); + } catch (Exception e) { + assertTrue(true); + } + } + + @Test + public void testStartNotCalledWithInvalidParam() throws Exception { + executeCommand("--invalid"); + assertFalse(mock.startCalled); + } + + @Test + public void testPassingInitSwitchCallsInit() { + executeCommand("--init"); + assertTrue(mock.initCalled); + } + + @Test + public void testInitSwitchAcceptsClusterIdSSwitch() { + executeCommand("--init", "--clusterid=abcdefg"); + assertEquals("abcdefg", mock.clusterId); + } + + @Test + public void testInitSwitchWithInvalidParamDoesNotRun() { + executeCommand("--init", "--clusterid=abcdefg", "--invalid"); + assertFalse(mock.initCalled); + } + + @Test + public void testUnSuccessfulInitThrowsException() { + mock.throwOnInit = true; + try { + executeCommand("--init"); + fail("Exception show have been thrown"); + } catch (Exception e) { + assertTrue(true); + } + } + + @Test + public void testGenClusterIdRunsGenerate() { + executeCommand("--genclusterid"); + assertTrue(mock.generateCalled); + } + + @Test + public void testGenClusterIdWithInvalidParamDoesNotRun() { + executeCommand("--genclusterid", "--invalid"); + assertFalse(mock.generateCalled); + } + + @Test + public void testUsagePrintedOnInvalidInput() { + executeCommand("--invalid"); + Pattern p = Pattern.compile("^Unknown option:.*--invalid.*\nUsage"); + Matcher m = p.matcher(errContent.toString()); + assertTrue(m.find()); + } + + private void executeCommand(String... args) { + new StorageContainerManagerStarter(mock).execute(args); + } + + static class MockSCMStarter implements SCMStarterInterface { + + private boolean initStatus = true; + private boolean throwOnStart = false; + private boolean throwOnInit = false; + private boolean startCalled = false; + private boolean initCalled = false; + private boolean generateCalled = false; + private String clusterId = null; + + public void start(OzoneConfiguration conf) throws Exception { + if (throwOnStart) { + throw new Exception("Simulated error on start"); + } + startCalled = true; + } + + public boolean init(OzoneConfiguration conf, String cid) + throws IOException { + if (throwOnInit) { + throw new IOException("Simulated error on init"); + } + initCalled = true; + clusterId = cid; + return initStatus; + } + + public String generateClusterId() { + generateCalled = true; + return "static-cluster-id"; + } + } +} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone index 6307a8191d976..de8f47f1ae8f5 100755 --- a/hadoop-ozone/common/src/main/bin/ozone +++ b/hadoop-ozone/common/src/main/bin/ozone @@ -144,7 +144,7 @@ function ozonecmd_case ;; scm) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_CLASSNAME='org.apache.hadoop.hdds.scm.server.StorageContainerManager' + HADOOP_CLASSNAME='org.apache.hadoop.hdds.scm.server.StorageContainerManagerStarter' hadoop_debug "Appending HDFS_STORAGECONTAINERMANAGER_OPTS onto HADOOP_OPTS" HDFS_STORAGECONTAINERMANAGER_OPTS="${HDFS_STORAGECONTAINERMANAGER_OPTS} -Dlog4j.configurationFile=${HADOOP_CONF_DIR}/scm-audit-log4j2.properties" HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_STORAGECONTAINERMANAGER_OPTS}" diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 9fbdad7eed02f..ee1e34ad7bbce 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -254,7 +254,7 @@ public void restartStorageContainerManager(boolean waitForDatanode) AuthenticationException { scm.stop(); scm.join(); - scm = StorageContainerManager.createSCM(null, conf); + scm = StorageContainerManager.createSCM(conf); scm.start(); if (waitForDatanode) { waitForClusterToBeReady(); @@ -475,7 +475,7 @@ StorageContainerManager createSCM() configureSCM(); SCMStorageConfig scmStore = new SCMStorageConfig(conf); initializeScmStorage(scmStore); - return StorageContainerManager.createSCM(null, conf); + return StorageContainerManager.createSCM(conf); } private void initializeScmStorage(SCMStorageConfig scmStore) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 7269e30aaaa5e..498261999783d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -260,7 +260,7 @@ private void setSecureConfig(Configuration configuration) throws IOException { public void testSecureScmStartupSuccess() throws Exception { initSCM(); - scm = StorageContainerManager.createSCM(null, conf); + scm = StorageContainerManager.createSCM(conf); //Reads the SCM Info from SCM instance ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); Assert.assertEquals(clusterId, scmInfo.getClusterId()); @@ -271,7 +271,7 @@ public void testSecureScmStartupSuccess() throws Exception { public void testSCMSecurityProtocol() throws Exception { initSCM(); - scm = StorageContainerManager.createSCM(null, conf); + scm = StorageContainerManager.createSCM(conf); //Reads the SCM Info from SCM instance try { scm.start(); @@ -340,7 +340,7 @@ public void testSecureScmStartupFailure() throws Exception { LambdaTestUtils.intercept(IOException.class, "Running in secure mode, but config doesn't have a keytab", () -> { - StorageContainerManager.createSCM(null, conf); + StorageContainerManager.createSCM(conf); }); conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY, @@ -349,7 +349,7 @@ public void testSecureScmStartupFailure() throws Exception { "/etc/security/keytabs/scm.keytab"); testCommonKerberosFailures( - () -> StorageContainerManager.createSCM(null, conf)); + () -> StorageContainerManager.createSCM(conf)); } @@ -379,7 +379,7 @@ private void testCommonKerberosFailures(Callable callable) throws Exception { public void testSecureOMInitializationFailure() throws Exception { initSCM(); // Create a secure SCM instance as om client will connect to it - scm = StorageContainerManager.createSCM(null, conf); + scm = StorageContainerManager.createSCM(conf); setupOm(conf); conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY, "non-existent-user@EXAMPLE.com"); @@ -395,7 +395,7 @@ public void testSecureOMInitializationFailure() throws Exception { public void testSecureOmInitializationSuccess() throws Exception { initSCM(); // Create a secure SCM instance as om client will connect to it - scm = StorageContainerManager.createSCM(null, conf); + scm = StorageContainerManager.createSCM(conf); LogCapturer logs = LogCapturer.captureLogs(OzoneManager.LOG); GenericTestUtils.setLogLevel(OzoneManager.LOG, INFO); @@ -719,7 +719,7 @@ public void testSecureOmReInit() throws Exception { omLogs.clearOutput(); initSCM(); try { - scm = StorageContainerManager.createSCM(null, conf); + scm = StorageContainerManager.createSCM(conf); scm.start(); conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, false); OMStorage omStore = new OMStorage(conf); @@ -765,7 +765,7 @@ public void testSecureOmInitSuccess() throws Exception { omLogs.clearOutput(); initSCM(); try { - scm = StorageContainerManager.createSCM(null, conf); + scm = StorageContainerManager.createSCM(conf); scm.start(); OMStorage omStore = new OMStorage(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java index e882657cc2b89..5b60f491fc306 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java @@ -64,7 +64,6 @@ import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager.StartupOption; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.TypedEvent; import org.apache.hadoop.ozone.container.ContainerTestHelper; @@ -76,7 +75,6 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.utils.HddsVersionInfo; import org.junit.Assert; import org.junit.Rule; @@ -417,15 +415,13 @@ public void testSCMInitialization() throws Exception { Path scmPath = Paths.get(path, "scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - StartupOption.INIT.setClusterId("testClusterId"); // This will initialize SCM - StorageContainerManager.scmInit(conf); + StorageContainerManager.scmInit(conf, "testClusterId"); SCMStorageConfig scmStore = new SCMStorageConfig(conf); Assert.assertEquals(NodeType.SCM, scmStore.getNodeType()); Assert.assertEquals("testClusterId", scmStore.getClusterID()); - StartupOption.INIT.setClusterId("testClusterIdNew"); - StorageContainerManager.scmInit(conf); + StorageContainerManager.scmInit(conf, "testClusterIdNew"); Assert.assertEquals(NodeType.SCM, scmStore.getNodeType()); Assert.assertEquals("testClusterId", scmStore.getClusterID()); } @@ -441,9 +437,8 @@ public void testSCMReinitialization() throws Exception { MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); cluster.waitForClusterToBeReady(); - StartupOption.INIT.setClusterId("testClusterId"); // This will initialize SCM - StorageContainerManager.scmInit(conf); + StorageContainerManager.scmInit(conf, "testClusterId"); SCMStorageConfig scmStore = new SCMStorageConfig(conf); Assert.assertEquals(NodeType.SCM, scmStore.getNodeType()); Assert.assertNotEquals("testClusterId", scmStore.getClusterID()); @@ -462,20 +457,7 @@ public void testSCMInitializationFailure() exception.expect(SCMException.class); exception.expectMessage( "SCM not initialized due to storage config failure"); - StorageContainerManager.createSCM(null, conf); - } - - @Test - public void testSCMInitializationReturnCode() throws IOException, - AuthenticationException { - ExitUtil.disableSystemExit(); - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true); - // Set invalid args - String[] invalidArgs = {"--zxcvbnm"}; - exception.expect(ExitUtil.ExitException.class); - exception.expectMessage("ExitException"); - StorageContainerManager.createSCM(invalidArgs, conf); + StorageContainerManager.createSCM(conf); } @Test @@ -493,7 +475,7 @@ public void testScmInfo() throws Exception { scmStore.setScmId(scmId); // writes the version file properties scmStore.initialize(); - StorageContainerManager scm = StorageContainerManager.createSCM(null, conf); + StorageContainerManager scm = StorageContainerManager.createSCM(conf); //Reads the SCM Info from SCM instance ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); Assert.assertEquals(clusterId, scmInfo.getClusterId()); From 4e38dafde4dce8cd8c368783a291e830f06e1def Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Fri, 7 Jun 2019 18:26:06 +0100 Subject: [PATCH 0148/1308] HADOOP-15563. S3Guard to support creating on-demand DDB tables. Contributed by Steve Loughran Change-Id: I2262b5b9f52e42ded8ed6f50fd39756f96e77087 --- .../src/main/resources/core-default.xml | 20 +- .../org/apache/hadoop/fs/s3a/Constants.java | 49 ++--- .../fs/s3a/s3guard/DynamoDBMetadataStore.java | 61 ++++-- .../hadoop/fs/s3a/s3guard/S3GuardTool.java | 4 +- .../site/markdown/tools/hadoop-aws/s3guard.md | 155 +++++++++----- .../hadoop/fs/s3a/S3ATestConstants.java | 10 + .../s3guard/AbstractS3GuardToolTestBase.java | 10 +- .../hadoop/fs/s3a/s3guard/DDBCapacities.java | 27 ++- .../s3guard/ITestDynamoDBMetadataStore.java | 190 ++++++++++++------ .../s3a/s3guard/ITestS3GuardToolDynamoDB.java | 41 +--- .../fs/s3a/s3guard/MetadataStoreTestBase.java | 1 - 11 files changed, 375 insertions(+), 193 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 6edcb670a443e..b5056d1d23c4f 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -1581,23 +1581,27 @@ fs.s3a.s3guard.ddb.table.capacity.read - 500 + 0 Provisioned throughput requirements for read operations in terms of capacity - units for the DynamoDB table. This config value will only be used when - creating a new DynamoDB table, though later you can manually provision by - increasing or decreasing read capacity as needed for existing tables. - See DynamoDB documents for more information. + units for the DynamoDB table. This config value will only be used when + creating a new DynamoDB table. + If set to 0 (the default), new tables are created with "per-request" capacity. + If a positive integer is provided for this and the write capacity, then + a table with "provisioned capacity" will be created. + You can change the capacity of an existing provisioned-capacity table + through the "s3guard set-capacity" command. fs.s3a.s3guard.ddb.table.capacity.write - 100 + 0 Provisioned throughput requirements for write operations in terms of - capacity units for the DynamoDB table. Refer to related config - fs.s3a.s3guard.ddb.table.capacity.read before usage. + capacity units for the DynamoDB table. + If set to 0 (the default), new tables are created with "per-request" capacity. + Refer to related configuration option fs.s3a.s3guard.ddb.table.capacity.read diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index 18ed7b44027dc..a8dc161e5ec76 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -439,7 +439,6 @@ private Constants() { * This config has no default value. If the user does not set this, the * S3Guard will operate table in the associated S3 bucket region. */ - @InterfaceStability.Unstable public static final String S3GUARD_DDB_REGION_KEY = "fs.s3a.s3guard.ddb.region"; @@ -449,7 +448,6 @@ private Constants() { * This config has no default value. If the user does not set this, the * S3Guard implementation will use the respective S3 bucket name. */ - @InterfaceStability.Unstable public static final String S3GUARD_DDB_TABLE_NAME_KEY = "fs.s3a.s3guard.ddb.table"; @@ -459,36 +457,45 @@ private Constants() { * For example: * fs.s3a.s3guard.ddb.table.tag.mytag */ - @InterfaceStability.Unstable public static final String S3GUARD_DDB_TABLE_TAG = "fs.s3a.s3guard.ddb.table.tag."; - /** - * Test table name to use during DynamoDB integration test. - * - * The table will be modified, and deleted in the end of the tests. - * If this value is not set, the integration tests that would be destructive - * won't run. - */ - @InterfaceStability.Unstable - public static final String S3GUARD_DDB_TEST_TABLE_NAME_KEY = - "fs.s3a.s3guard.ddb.test.table"; - /** * Whether to create the DynamoDB table if the table does not exist. + * Value: {@value}. */ - @InterfaceStability.Unstable public static final String S3GUARD_DDB_TABLE_CREATE_KEY = "fs.s3a.s3guard.ddb.table.create"; - @InterfaceStability.Unstable + /** + * Read capacity when creating a table. + * When it and the write capacity are both "0", a per-request table is + * created. + * Value: {@value}. + */ public static final String S3GUARD_DDB_TABLE_CAPACITY_READ_KEY = "fs.s3a.s3guard.ddb.table.capacity.read"; - public static final long S3GUARD_DDB_TABLE_CAPACITY_READ_DEFAULT = 500; - @InterfaceStability.Unstable + + /** + * Default read capacity when creating a table. + * Value: {@value}. + */ + public static final long S3GUARD_DDB_TABLE_CAPACITY_READ_DEFAULT = 0; + + /** + * Write capacity when creating a table. + * When it and the read capacity are both "0", a per-request table is + * created. + * Value: {@value}. + */ public static final String S3GUARD_DDB_TABLE_CAPACITY_WRITE_KEY = "fs.s3a.s3guard.ddb.table.capacity.write"; - public static final long S3GUARD_DDB_TABLE_CAPACITY_WRITE_DEFAULT = 100; + + /** + * Default write capacity when creating a table. + * Value: {@value}. + */ + public static final long S3GUARD_DDB_TABLE_CAPACITY_WRITE_DEFAULT = 0; /** * The maximum put or delete requests per BatchWriteItem request. @@ -497,7 +504,6 @@ private Constants() { */ public static final int S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT = 25; - @InterfaceStability.Unstable public static final String S3GUARD_DDB_MAX_RETRIES = "fs.s3a.s3guard.ddb.max.retries"; @@ -509,7 +515,6 @@ private Constants() { public static final int S3GUARD_DDB_MAX_RETRIES_DEFAULT = DEFAULT_MAX_ERROR_RETRIES; - @InterfaceStability.Unstable public static final String S3GUARD_DDB_THROTTLE_RETRY_INTERVAL = "fs.s3a.s3guard.ddb.throttle.retry.interval"; public static final String S3GUARD_DDB_THROTTLE_RETRY_INTERVAL_DEFAULT = @@ -528,7 +533,6 @@ private Constants() { /** * The default "Null" metadata store: {@value}. */ - @InterfaceStability.Unstable public static final String S3GUARD_METASTORE_NULL = "org.apache.hadoop.fs.s3a.s3guard.NullMetadataStore"; @@ -561,7 +565,6 @@ private Constants() { /** * Use DynamoDB for the metadata: {@value}. */ - @InterfaceStability.Unstable public static final String S3GUARD_METASTORE_DYNAMO = "org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore"; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java index a9e1f3368990c..fa1a203fc7213 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java @@ -59,6 +59,7 @@ import com.amazonaws.services.dynamodbv2.document.spec.GetItemSpec; import com.amazonaws.services.dynamodbv2.document.spec.QuerySpec; import com.amazonaws.services.dynamodbv2.document.utils.ValueMap; +import com.amazonaws.services.dynamodbv2.model.BillingMode; import com.amazonaws.services.dynamodbv2.model.CreateTableRequest; import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughput; import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputDescription; @@ -1259,11 +1260,26 @@ void initTable() throws IOException { tableName, region, (created != null) ? new Date(created) : null); } catch (ResourceNotFoundException rnfe) { if (conf.getBoolean(S3GUARD_DDB_TABLE_CREATE_KEY, false)) { - final ProvisionedThroughput capacity = new ProvisionedThroughput( - conf.getLong(S3GUARD_DDB_TABLE_CAPACITY_READ_KEY, - S3GUARD_DDB_TABLE_CAPACITY_READ_DEFAULT), - conf.getLong(S3GUARD_DDB_TABLE_CAPACITY_WRITE_KEY, - S3GUARD_DDB_TABLE_CAPACITY_WRITE_DEFAULT)); + long readCapacity = conf.getLong(S3GUARD_DDB_TABLE_CAPACITY_READ_KEY, + S3GUARD_DDB_TABLE_CAPACITY_READ_DEFAULT); + long writeCapacity = conf.getLong( + S3GUARD_DDB_TABLE_CAPACITY_WRITE_KEY, + S3GUARD_DDB_TABLE_CAPACITY_WRITE_DEFAULT); + ProvisionedThroughput capacity; + if (readCapacity > 0 && writeCapacity > 0) { + capacity = new ProvisionedThroughput( + readCapacity, + writeCapacity); + } else { + // at least one capacity value is <= 0 + // verify they are both exactly zero + Preconditions.checkArgument( + readCapacity == 0 && writeCapacity == 0, + "S3Guard table read capacity %d and and write capacity %d" + + " are inconsistent", readCapacity, writeCapacity); + // and set the capacity to null for per-request billing. + capacity = null; + } createTable(capacity); } else { @@ -1403,20 +1419,31 @@ private void waitForTableActive(Table t) throws IOException { * marker. * Creating an setting up the table isn't wrapped by any retry operations; * the wait for a table to become available is RetryTranslated. - * @param capacity capacity to provision + * @param capacity capacity to provision. If null: create a per-request + * table. * @throws IOException on any failure. * @throws InterruptedIOException if the wait was interrupted */ @Retries.OnceRaw private void createTable(ProvisionedThroughput capacity) throws IOException { try { - LOG.info("Creating non-existent DynamoDB table {} in region {}", - tableName, region); - table = dynamoDB.createTable(new CreateTableRequest() + String mode; + CreateTableRequest request = new CreateTableRequest() .withTableName(tableName) .withKeySchema(keySchema()) - .withAttributeDefinitions(attributeDefinitions()) - .withProvisionedThroughput(capacity)); + .withAttributeDefinitions(attributeDefinitions()); + if (capacity != null) { + mode = String.format("with provisioned read capacity %d and" + + " write capacity %s", + capacity.getReadCapacityUnits(), capacity.getWriteCapacityUnits()); + request.withProvisionedThroughput(capacity); + } else { + mode = "with pay-per-request billing"; + request.withBillingMode(BillingMode.PAY_PER_REQUEST); + } + LOG.info("Creating non-existent DynamoDB table {} in region {} {}", + tableName, region, mode); + table = dynamoDB.createTable(request); LOG.debug("Awaiting table becoming active"); } catch (ResourceInUseException e) { LOG.warn("ResourceInUseException while creating DynamoDB table {} " @@ -1446,13 +1473,21 @@ private PutItemOutcome putItem(Item item) { * Provision the table with given read and write capacity units. * Call will fail if the table is busy, or the new values match the current * ones. - * @param readCapacity read units - * @param writeCapacity write units + *

    + * Until the AWS SDK lets us switch a table to on-demand, an attempt to + * set the I/O capacity to zero will fail. + * @param readCapacity read units: must be greater than zero + * @param writeCapacity write units: must be greater than zero * @throws IOException on a failure */ @Retries.RetryTranslated void provisionTable(Long readCapacity, Long writeCapacity) throws IOException { + + if (readCapacity == 0 || writeCapacity == 0) { + // table is pay on demand + throw new IOException(E_ON_DEMAND_NO_SET_CAPACITY); + } final ProvisionedThroughput toProvision = new ProvisionedThroughput() .withReadCapacityUnits(readCapacity) .withWriteCapacityUnits(writeCapacity); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java index 448ea9213f5b0..397a9cba670a8 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java @@ -434,7 +434,9 @@ static class Init extends S3GuardTool { "\n" + " URLs for Amazon DynamoDB are of the form dynamodb://TABLE_NAME.\n" + " Specifying both the -" + REGION_FLAG + " option and an S3A path\n" + - " is not supported."; + " is not supported.\n" + + "To create a table with per-request billing, set the read and write\n" + + "capacities to 0"; Init(Configuration conf) { super(conf); diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md index a766abc616be9..94dc89b70d332 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md @@ -251,9 +251,11 @@ this sets the table name to `my-ddb-table-name` ``` -It is good to share a table across multiple buckets for multiple reasons. +It is good to share a table across multiple buckets for multiple reasons, +especially if you are *not* using on-demand DynamoDB tables, and instead +prepaying for provisioned I/O capacity. -1. You are billed for the I/O capacity allocated to the table, +1. You are billed for the provisioned I/O capacity allocated to the table, *even when the table is not used*. Sharing capacity can reduce costs. 1. You can share the "provision burden" across the buckets. That is, rather @@ -265,8 +267,13 @@ lower. S3Guard, because there is only one table to review and configure in the AWS management console. +1. When you don't grant the permission to create DynamoDB tables to users. +A single pre-created table for all buckets avoids the needs for an administrator +to create one for every bucket. + When wouldn't you want to share a table? +1. When you are using on-demand DynamoDB and want to keep each table isolated. 1. When you do explicitly want to provision I/O capacity to a specific bucket and table, isolated from others. @@ -315,18 +322,25 @@ Next, you can choose whether or not the table will be automatically created ``` -### 7. If creating a table: Set your DynamoDB I/O Capacity +### 7. If creating a table: Choose your billing mode (and perhaps I/O Capacity) + +Next, you need to decide whether to use On-Demand DynamoDB and its +pay-per-request billing (recommended), or to explicitly request a +provisioned IO capacity. -Next, you need to set the DynamoDB read and write throughput requirements you -expect to need for your cluster. Setting higher values will cost you more -money. *Note* that these settings only affect table creation when +Before AWS offered pay-per-request billing, the sole billing mechanism, +was "provisioned capacity". This mechanism requires you to choose +the DynamoDB read and write throughput requirements you +expect to need for your expected uses of the S3Guard table. +Setting higher values cost you more money -*even when the table was idle* + *Note* that these settings only affect table creation when `fs.s3a.s3guard.ddb.table.create` is enabled. To change the throughput for an existing table, use the AWS console or CLI tool. For more details on DynamoDB capacity units, see the AWS page on [Capacity Unit Calculations](http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations). -The charges are incurred per hour for the life of the table, *even when the +Provisioned IO capacity is billed per hour for the life of the table, *even when the table and the underlying S3 buckets are not being used*. There are also charges incurred for data storage and for data I/O outside of the @@ -334,34 +348,56 @@ region of the DynamoDB instance. S3Guard only stores metadata in DynamoDB: path and summary details of objects —the actual data is stored in S3, so billed at S3 rates. +With provisioned I/O capacity, attempting to perform more I/O than the capacity +requested throttles the operation and may result in operations failing. +Larger I/O capacities cost more. + +With the introduction of On-Demand DynamoDB, you can now avoid paying for +provisioned capacity by creating an on-demand table. +With an on-demand table you are not throttled if your DynamoDB requests exceed +any pre-provisioned limit, nor do you pay per hour even when a table is idle. + +You do, however, pay more per DynamoDB operation. +Even so, the ability to cope with sudden bursts of read or write requests, combined +with the elimination of charges for idle tables, suit the use patterns made of +S3Guard tables by applications interacting with S3. That is: periods when the table +is rarely used, with intermittent high-load operations when directory trees +are scanned (query planning and similar), or updated (rename and delete operations). + + +We recommending using On-Demand DynamoDB for maximum performance in operations +such as query planning, and lowest cost when S3 buckets are not being accessed. + +This is the default, as configured in the default configuration options. + ```xml fs.s3a.s3guard.ddb.table.capacity.read - 500 + 0 Provisioned throughput requirements for read operations in terms of capacity - units for the DynamoDB table. This config value will only be used when - creating a new DynamoDB table, though later you can manually provision by - increasing or decreasing read capacity as needed for existing tables. - See DynamoDB documents for more information. + units for the DynamoDB table. This config value will only be used when + creating a new DynamoDB table. + If set to 0 (the default), new tables are created with "per-request" capacity. + If a positive integer is provided for this and the write capacity, then + a table with "provisioned capacity" will be created. + You can change the capacity of an existing provisioned-capacity table + through the "s3guard set-capacity" command. fs.s3a.s3guard.ddb.table.capacity.write - 100 + 0 Provisioned throughput requirements for write operations in terms of - capacity units for the DynamoDB table. Refer to related config - fs.s3a.s3guard.ddb.table.capacity.read before usage. + capacity units for the DynamoDB table. + If set to 0 (the default), new tables are created with "per-request" capacity. + Refer to related configuration option fs.s3a.s3guard.ddb.table.capacity.read ``` -Attempting to perform more I/O than the capacity requested throttles the -I/O, and may result in operations failing. Larger I/O capacities cost more. -We recommending using small read and write capacities when initially experimenting -with S3Guard, and considering DynamoDB On-Demand. ## Authenticating with S3Guard @@ -369,9 +405,7 @@ The DynamoDB metadata store takes advantage of the fact that the DynamoDB service uses the same authentication mechanisms as S3. S3Guard gets all its credentials from the S3A client that is using it. -All existing S3 authentication mechanisms can be used, except for one -exception. Credentials placed in URIs are not supported for S3Guard, for security -reasons. +All existing S3 authentication mechanisms can be used. ## Per-bucket S3Guard configuration @@ -512,7 +546,13 @@ hadoop s3guard init -meta URI ( -region REGION | s3a://BUCKET ) Creates and initializes an empty metadata store. A DynamoDB metadata store can be initialized with additional parameters -pertaining to [Provisioned Throughput](http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ProvisionedThroughput.html): +pertaining to capacity. + +If these values are both zero, then an on-demand DynamoDB table is created; +if positive values then they set the +[Provisioned Throughput](http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ProvisionedThroughput.html) +of the table. + ```bash [-write PROVISIONED_WRITES] [-read PROVISIONED_READS] @@ -528,29 +568,31 @@ metadata store will be created with these tags in DynamoDB. Example 1 ```bash -hadoop s3guard init -meta dynamodb://ireland-team -write 5 -read 10 s3a://ireland-1 +hadoop s3guard init -meta dynamodb://ireland-team -write 0 -read 0 s3a://ireland-1 ``` -Creates a table "ireland-team" with a capacity of 5 for writes, 10 for reads, -in the same location as the bucket "ireland-1". +Creates an on-demand table "ireland-team", +in the same location as the S3 bucket "ireland-1". Example 2 ```bash -hadoop s3guard init -meta dynamodb://ireland-team -region eu-west-1 +hadoop s3guard init -meta dynamodb://ireland-team -region eu-west-1 --read 0 --write 0 ``` Creates a table "ireland-team" in the region "eu-west-1.amazonaws.com" - Example 3 ```bash hadoop s3guard init -meta dynamodb://ireland-team -tag tag1=first;tag2=second; ``` -Creates a table "ireland-team" with tags "first" and "second". +Creates a table "ireland-team" with tags "first" and "second". The read and +write capacity will be those of the site configuration's values of +`fs.s3a.s3guard.ddb.table.capacity.read` and `fs.s3a.s3guard.ddb.table.capacity.write`; +if these are both zero then it will be an on-demand table. ### Import a bucket: `s3guard import` @@ -588,7 +630,7 @@ hadoop s3guard diff s3a://ireland-1 Prints and optionally checks the s3guard and encryption status of a bucket. ```bash -hadoop s3guard bucket-info [ -guarded ] [-unguarded] [-auth] [-nonauth] [-magic] [-encryption ENCRYPTION] s3a://BUCKET +hadoop s3guard bucket-info [-guarded] [-unguarded] [-auth] [-nonauth] [-magic] [-encryption ENCRYPTION] s3a://BUCKET ``` Options @@ -788,7 +830,8 @@ the region "eu-west-1". ### Tune the I/O capacity of the DynamoDB Table, `s3guard set-capacity` -Alter the read and/or write capacity of a s3guard table. +Alter the read and/or write capacity of a s3guard table created with provisioned +I/O capacity. ```bash hadoop s3guard set-capacity [--read UNIT] [--write UNIT] ( -region REGION | s3a://BUCKET ) @@ -796,6 +839,9 @@ hadoop s3guard set-capacity [--read UNIT] [--write UNIT] ( -region REGION | s3a: The `--read` and `--write` units are those of `s3guard init`. +It cannot be used to change the I/O capacity of an on demand table (there is +no need), and nor can it be used to convert an existing table to being +on-demand. For that the AWS console must be used. Example @@ -932,10 +978,10 @@ merits more testing before it could be considered reliable. ## Managing DynamoDB I/O Capacity -By default, DynamoDB is not only billed on use (data and I/O requests) --it is billed on allocated I/O Capacity. +Historically, DynamoDB has been not only billed on use (data and I/O requests) +-but on provisioned I/O Capacity. -When an application makes more requests than +With Provisioned IO, when an application makes more requests than the allocated capacity permits, the request is rejected; it is up to the calling application to detect when it is being so throttled and react. S3Guard does this, but as a result: when the client is being @@ -943,7 +989,7 @@ throttled, operations are slower. This capacity throttling is averaged over a few minutes: a briefly overloaded table will not be throttled, but the rate cannot be sustained. -The load on a table isvisible in the AWS console: go to the +The load on a table is visible in the AWS console: go to the DynamoDB page for the table and select the "metrics" tab. If the graphs of throttled read or write requests show that a lot of throttling has taken place, then there is not @@ -1015,20 +1061,33 @@ for S3Guard applications. * There's no explicit limit on I/O capacity, so operations which make heavy use of S3Guard tables (for example: SQL query planning) do not get throttled. +* You are charged more per DynamoDB API call, in exchange for paying nothing +when you are not interacting with DynamoDB. * There's no way put a limit on the I/O; you may unintentionally run up large bills through sustained heavy load. * The `s3guard set-capacity` command fails: it does not make sense any more. When idle, S3Guard tables are only billed for the data stored, not for -any unused capacity. For this reason, there is no benefit from sharing -a single S3Guard table across multiple buckets. +any unused capacity. For this reason, there is no performance benefit +from sharing a single S3Guard table across multiple buckets. + +*Creating a S3Guard Table with On-Demand Tables* + +The default settings for S3Guard are to create on-demand tables; this +can also be done explicitly in the `s3guard init` command by setting the +read and write capacities to zero. -*Enabling DynamoDB On-Demand for a S3Guard table* -You cannot currently enable DynamoDB on-demand from the `s3guard` command -when creating or updating a bucket. +```bash +hadoop s3guard init -meta dynamodb://ireland-team -write 0 -read 0 s3a://ireland-1 +``` -Instead it must be done through the AWS console or [the CLI](https://docs.aws.amazon.com/cli/latest/reference/dynamodb/update-table.html). +*Enabling DynamoDB On-Demand for an existing S3Guard table* + +You cannot currently convert an existing S3Guard table to being an on-demand +table through the `s3guard` command. + +It can be done through the AWS console or [the CLI](https://docs.aws.amazon.com/cli/latest/reference/dynamodb/update-table.html). From the Web console or the command line, switch the billing to pay-per-request. Once enabled, the read and write capacities of the table listed in the @@ -1078,7 +1137,7 @@ Metadata Store Diagnostics: The "magic" committer is supported ``` -### Autoscaling S3Guard tables. +### Autoscaling (Provisioned Capacity) S3Guard tables. [DynamoDB Auto Scaling](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/AutoScaling.html) can automatically increase and decrease the allocated capacity. @@ -1093,7 +1152,7 @@ until any extra capacity is allocated. Furthermore, as this retrying will block the threads from performing other operations -including more I/O, the the autoscale may not scale fast enough. -This is why the DynamoDB On-Demand appears to be a better option for +This is why the DynamoDB On-Demand appears is a better option for workloads with Hadoop, Spark, Hive and other applications. If autoscaling is to be used, we recommend experimenting with the option, @@ -1259,18 +1318,18 @@ Error Code: ProvisionedThroughputExceededException; ``` The I/O load of clients of the (shared) DynamoDB table was exceeded. -1. Increase the capacity of the DynamoDB table. -1. Increase the retry count and/or sleep time of S3Guard on throttle events. -1. Enable capacity autoscaling for the table in the AWS console. +1. Switch to On-Demand Dynamo DB tables (AWS console) +1. Increase the capacity of the DynamoDB table (AWS console or `s3guard set-capacity`)/ +1. Increase the retry count and/or sleep time of S3Guard on throttle events (Hadoop configuration). ### Error `Max retries exceeded` The I/O load of clients of the (shared) DynamoDB table was exceeded, and the number of attempts to retry the operation exceeded the configured amount. +1. Switch to On-Demand Dynamo DB tables (AWS console). 1. Increase the capacity of the DynamoDB table. 1. Increase the retry count and/or sleep time of S3Guard on throttle events. -1. Enable capacity autoscaling for the table in the AWS console. ### Error when running `set-capacity`: `org.apache.hadoop.fs.s3a.AWSServiceThrottledException: ProvisionTable` @@ -1286,7 +1345,7 @@ Next decrease can be made at Wednesday, July 25, 2018 9:48:14 PM UTC ``` There's are limit on how often you can change the capacity of an DynamoDB table; -if you call set-capacity too often, it fails. Wait until the after the time indicated +if you call `set-capacity` too often, it fails. Wait until the after the time indicated and try again. ### Error `Invalid region specified` diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java index 9d6e1ce00b78d..81db77c6e1694 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java @@ -197,4 +197,14 @@ public interface S3ATestConstants { Duration TEST_SESSION_TOKEN_DURATION = Duration.ofSeconds( TEST_SESSION_TOKEN_DURATION_SECONDS); + /** + * Test table name to use during DynamoDB integration tests in + * {@code ITestDynamoDBMetadataStore}. + * + * The table will be modified, and deleted in the end of the tests. + * If this value is not set, the integration tests that would be destructive + * won't run. + */ + String S3GUARD_DDB_TEST_TABLE_NAME_KEY = + "fs.s3a.s3guard.ddb.test.table"; } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java index 589628c5c9424..9241686090536 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java @@ -59,7 +59,6 @@ import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_TABLE_NAME_KEY; import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_METASTORE_NULL; import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL; -import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestDynamoTablePrefix; import static org.apache.hadoop.fs.s3a.S3AUtils.clearBucketOption; import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.E_BAD_STATE; import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.SUCCESS; @@ -332,7 +331,14 @@ public void testSetCapacityFailFastOnReadWriteOfZero() throws Exception{ @Test public void testBucketInfoUnguarded() throws Exception { final Configuration conf = getConfiguration(); + URI fsUri = getFileSystem().getUri(); conf.set(S3GUARD_DDB_TABLE_CREATE_KEY, Boolean.FALSE.toString()); + String bucket = fsUri.getHost(); + clearBucketOption(conf, bucket, + S3GUARD_DDB_TABLE_CREATE_KEY); + clearBucketOption(conf, bucket, S3_METADATA_STORE_IMPL); + clearBucketOption(conf, bucket, S3GUARD_DDB_TABLE_NAME_KEY); + conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL); conf.set(S3GUARD_DDB_TABLE_NAME_KEY, "testBucketInfoUnguarded-" + UUID.randomUUID()); @@ -341,7 +347,7 @@ public void testBucketInfoUnguarded() throws Exception { S3GuardTool.BucketInfo infocmd = new S3GuardTool.BucketInfo(conf); String info = exec(infocmd, S3GuardTool.BucketInfo.NAME, "-" + S3GuardTool.BucketInfo.UNGUARDED_FLAG, - getFileSystem().getUri().toString()); + fsUri.toString()); assertTrue("Output should contain information about S3A client " + info, info.contains("S3A Client")); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/DDBCapacities.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/DDBCapacities.java index c6e47c751855f..3f1e99061b84e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/DDBCapacities.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/DDBCapacities.java @@ -21,10 +21,15 @@ import java.util.Map; import java.util.Objects; +import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputDescription; import org.junit.Assert; import static org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore.READ_CAPACITY; +import static org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore.WRITE_CAPACITY; +/** + * Tuple of read and write capacity of a DDB table. + */ class DDBCapacities { private final long read, write; @@ -49,12 +54,6 @@ String getWriteStr() { return Long.toString(write); } - void checkEquals(String text, DDBCapacities that) throws Exception { - if (!this.equals(that)) { - throw new Exception(text + " expected = " + this +"; actual = "+ that); - } - } - @Override public boolean equals(Object o) { if (this == o) { @@ -82,7 +81,7 @@ public String toString() { } /** - * Is the the capacity that of a pay-on-demand table? + * Is the the capacity that of an On-Demand table? * @return true if the capacities are both 0. */ public boolean isOnDemandTable() { @@ -102,7 +101,19 @@ public static DDBCapacities extractCapacities( read); return new DDBCapacities( Long.parseLong(read), - Long.parseLong(diagnostics.get(DynamoDBMetadataStore.WRITE_CAPACITY))); + Long.parseLong(diagnostics.get(WRITE_CAPACITY))); + } + + /** + * Given a throughput information from table.describe(), build + * a DDBCapacities object. + * @param throughput throughput description. + * @return the capacities + */ + public static DDBCapacities extractCapacities( + ProvisionedThroughputDescription throughput) { + return new DDBCapacities(throughput.getReadCapacityUnits(), + throughput.getWriteCapacityUnits()); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java index 972cbe5f5ef7a..149d1f36065da 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java @@ -33,7 +33,6 @@ import com.amazonaws.services.dynamodbv2.document.PrimaryKey; import com.amazonaws.services.dynamodbv2.document.Table; import com.amazonaws.services.dynamodbv2.model.ListTagsOfResourceRequest; -import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputDescription; import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException; import com.amazonaws.services.dynamodbv2.model.TableDescription; @@ -43,6 +42,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.contract.s3a.S3AContract; import org.apache.hadoop.fs.s3a.Constants; +import org.apache.hadoop.fs.s3a.S3ATestConstants; import org.apache.hadoop.fs.s3a.Tristate; import org.apache.hadoop.io.IOUtils; @@ -64,6 +64,7 @@ import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; +import static org.apache.hadoop.fs.s3a.S3AUtils.clearBucketOption; import static org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation.*; import static org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore.*; import static org.apache.hadoop.test.LambdaTestUtils.*; @@ -78,7 +79,15 @@ * * According to the base class, every test case will have independent contract * to create a new {@link S3AFileSystem} instance and initializes it. - * A table will be created and shared between the tests, + * A table will be created and shared between the tests; some tests also + * create their own. + * + * Important: Any new test which creates a table must do the following + *

      + *
    1. Enable on-demand pricing.
    2. + *
    3. Always destroy the table, even if an assertion fails.
    4. + *
    + * This is needed to avoid "leaking" DDB tables and running up bills. */ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase { @@ -121,7 +130,7 @@ public void setUp() throws Exception { Assume.assumeTrue("Test DynamoDB table name should be set to run " + "integration tests.", testDynamoDBTableName != null); conf.set(S3GUARD_DDB_TABLE_NAME_KEY, testDynamoDBTableName); - + enableOnDemand(conf); s3AContract = new S3AContract(conf); s3AContract.init(); @@ -141,36 +150,40 @@ public void setUp() throws Exception { } } - @BeforeClass public static void beforeClassSetup() throws IOException { Configuration conf = prepareTestConfiguration(new Configuration()); assumeThatDynamoMetadataStoreImpl(conf); // S3GUARD_DDB_TEST_TABLE_NAME_KEY and S3GUARD_DDB_TABLE_NAME_KEY should // be configured to use this test. - testDynamoDBTableName = conf.get(S3GUARD_DDB_TEST_TABLE_NAME_KEY); + testDynamoDBTableName = conf.get( + S3ATestConstants.S3GUARD_DDB_TEST_TABLE_NAME_KEY); String dynamoDbTableName = conf.getTrimmed(S3GUARD_DDB_TABLE_NAME_KEY); - Assume.assumeTrue("No DynamoDB table name configured", !StringUtils - .isEmpty(dynamoDbTableName)); + Assume.assumeTrue("No DynamoDB table name configured", + !StringUtils.isEmpty(dynamoDbTableName)); // We should assert that the table name is configured, so the test should // fail if it's not configured. - assertTrue("Test DynamoDB table name '" - + S3GUARD_DDB_TEST_TABLE_NAME_KEY + "' should be set to run " - + "integration tests.", testDynamoDBTableName != null); + assertNotNull("Test DynamoDB table name '" + + S3ATestConstants.S3GUARD_DDB_TEST_TABLE_NAME_KEY + "'" + + " should be set to run integration tests.", + testDynamoDBTableName); // We should assert that the test table is not the same as the production // table, as the test table could be modified and destroyed multiple // times during the test. - assertTrue("Test DynamoDB table name: '" - + S3GUARD_DDB_TEST_TABLE_NAME_KEY + "' and production table name: '" - + S3GUARD_DDB_TABLE_NAME_KEY + "' can not be the same.", - !conf.get(S3GUARD_DDB_TABLE_NAME_KEY).equals(testDynamoDBTableName)); + assertNotEquals("Test DynamoDB table name: " + + "'" + S3ATestConstants.S3GUARD_DDB_TEST_TABLE_NAME_KEY + "'" + + " and production table name: " + + "'" + S3GUARD_DDB_TABLE_NAME_KEY + "' can not be the same.", + testDynamoDBTableName, conf.get(S3GUARD_DDB_TABLE_NAME_KEY)); // We can use that table in the test if these assertions are valid conf.set(S3GUARD_DDB_TABLE_NAME_KEY, testDynamoDBTableName); LOG.debug("Creating static ddbms which will be shared between tests."); + enableOnDemand(conf); + ddbmsStatic = new DynamoDBMetadataStore(); ddbmsStatic.initialize(conf); } @@ -198,18 +211,23 @@ private static void assumeThatDynamoMetadataStoreImpl(Configuration conf){ @Override public void tearDown() throws Exception { - LOG.info("Removing data from ddbms table in teardown."); - // The following is a way to be sure the table will be cleared and there - // will be no leftovers after the test. - PathMetadata meta = ddbmsStatic.get(strToPath("/")); - if (meta != null){ - for (DescendantsIterator desc = new DescendantsIterator(ddbmsStatic, meta); - desc.hasNext();) { - ddbmsStatic.forgetMetadata(desc.next().getPath()); + try { + if (ddbmsStatic != null) { + LOG.info("Removing data from ddbms table in teardown."); + // The following is a way to be sure the table will be cleared and there + // will be no leftovers after the test. + PathMetadata meta = ddbmsStatic.get(strToPath("/")); + if (meta != null){ + for (DescendantsIterator desc = + new DescendantsIterator(ddbmsStatic, meta); + desc.hasNext();) { + ddbmsStatic.forgetMetadata(desc.next().getPath()); + } + } } + } catch (IOException ignored) { } - - fileSystem.close(); + IOUtils.cleanupWithLogger(LOG, fileSystem); } /** @@ -263,6 +281,29 @@ private S3AFileSystem getFileSystem() { return this.fileSystem; } + /** + * Force the configuration into DDB on demand, so that + * even if a test bucket isn't cleaned up, the cost is $0. + * @param conf configuration to patch. + */ + public static void enableOnDemand(Configuration conf) { + conf.setInt(S3GUARD_DDB_TABLE_CAPACITY_WRITE_KEY, 0); + conf.setInt(S3GUARD_DDB_TABLE_CAPACITY_READ_KEY, 0); + } + + /** + * Get the configuration needed to create a table; extracts + * it from the filesystem then always patches it to be on demand. + * Why the patch? It means even if a cached FS has brought in + * some provisioned values, they get reset. + * @return a new configuration + */ + private Configuration getTableCreationConfig() { + Configuration conf = new Configuration(getFileSystem().getConf()); + enableOnDemand(conf); + return conf; + } + /** * This tests that after initialize() using an S3AFileSystem object, the * instance should have been initialized successfully, and tables are ACTIVE. @@ -272,9 +313,11 @@ public void testInitialize() throws IOException { final S3AFileSystem s3afs = this.fileSystem; final String tableName = getTestTableName("testInitialize"); - final Configuration conf = s3afs.getConf(); + Configuration conf = getFileSystem().getConf(); + enableOnDemand(conf); conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { + DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); + try { ddbms.initialize(s3afs); verifyTableInitialized(tableName, ddbms.getDynamoDB()); assertNotNull(ddbms.getTable()); @@ -285,7 +328,9 @@ public void testInitialize() throws IOException { " region as S3 bucket", expectedRegion, ddbms.getRegion()); + } finally { ddbms.destroy(); + ddbms.close(); } } @@ -297,7 +342,7 @@ public void testInitialize() throws IOException { public void testInitializeWithConfiguration() throws IOException { final String tableName = getTestTableName("testInitializeWithConfiguration"); - final Configuration conf = getFileSystem().getConf(); + final Configuration conf = getTableCreationConfig(); conf.unset(S3GUARD_DDB_TABLE_NAME_KEY); String savedRegion = conf.get(S3GUARD_DDB_REGION_KEY, getFileSystem().getBucketLocation()); @@ -316,7 +361,8 @@ public void testInitializeWithConfiguration() throws IOException { } // config region conf.set(S3GUARD_DDB_REGION_KEY, savedRegion); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { + DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); + try { ddbms.initialize(conf); verifyTableInitialized(tableName, ddbms.getDynamoDB()); assertNotNull(ddbms.getTable()); @@ -324,7 +370,9 @@ public void testInitializeWithConfiguration() throws IOException { assertEquals("Unexpected key schema found!", keySchema(), ddbms.getTable().describe().getKeySchema()); + } finally { ddbms.destroy(); + ddbms.close(); } } @@ -434,13 +482,14 @@ public void testItemLacksVersion() throws Throwable { @Test public void testTableVersionRequired() throws Exception { String tableName = getTestTableName("testTableVersionRequired"); - Configuration conf = getFileSystem().getConf(); + Configuration conf = getTableCreationConfig(); int maxRetries = conf.getInt(S3GUARD_DDB_MAX_RETRIES, S3GUARD_DDB_MAX_RETRIES_DEFAULT); conf.setInt(S3GUARD_DDB_MAX_RETRIES, 3); conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - try(DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { + DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); + try { ddbms.initialize(conf); Table table = verifyTableInitialized(tableName, ddbms.getDynamoDB()); table.deleteItem(VERSION_MARKER_PRIMARY_KEY); @@ -450,7 +499,9 @@ public void testTableVersionRequired() throws Exception { () -> ddbms.initTable()); conf.setInt(S3GUARD_DDB_MAX_RETRIES, maxRetries); + } finally { ddbms.destroy(); + ddbms.close(); } } @@ -461,10 +512,11 @@ public void testTableVersionRequired() throws Exception { @Test public void testTableVersionMismatch() throws Exception { String tableName = getTestTableName("testTableVersionMismatch"); - Configuration conf = getFileSystem().getConf(); + Configuration conf = getTableCreationConfig(); conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - try(DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { + DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); + try { ddbms.initialize(conf); Table table = verifyTableInitialized(tableName, ddbms.getDynamoDB()); table.deleteItem(VERSION_MARKER_PRIMARY_KEY); @@ -474,7 +526,9 @@ public void testTableVersionMismatch() throws Exception { // create existing table intercept(IOException.class, E_INCOMPATIBLE_VERSION, () -> ddbms.initTable()); + } finally { ddbms.destroy(); + ddbms.close(); } } @@ -491,10 +545,18 @@ public void testFailNonexistentTable() throws IOException { getTestTableName("testFailNonexistentTable"); final S3AFileSystem s3afs = getFileSystem(); final Configuration conf = s3afs.getConf(); + enableOnDemand(conf); conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); + String b = fsUri.getHost(); + clearBucketOption(conf, b, S3GUARD_DDB_TABLE_CREATE_KEY); + clearBucketOption(conf, b, S3_METADATA_STORE_IMPL); + clearBucketOption(conf, b, S3GUARD_DDB_TABLE_NAME_KEY); conf.unset(S3GUARD_DDB_TABLE_CREATE_KEY); try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { ddbms.initialize(s3afs); + // if an exception was not raised, a table was created. + // So destroy it before failing. + ddbms.destroy(); fail("Should have failed as table does not exist and table auto-creation" + " is disabled"); } catch (IOException ignored) { @@ -606,31 +668,36 @@ public void testMovePopulatesAncestors() throws IOException { public void testProvisionTable() throws Exception { final String tableName = getTestTableName("testProvisionTable-" + UUID.randomUUID()); - Configuration conf = getFileSystem().getConf(); + final Configuration conf = getTableCreationConfig(); conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - - try(DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { + conf.setInt(S3GUARD_DDB_TABLE_CAPACITY_WRITE_KEY, 2); + conf.setInt(S3GUARD_DDB_TABLE_CAPACITY_READ_KEY, 2); + DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); + try { ddbms.initialize(conf); DynamoDB dynamoDB = ddbms.getDynamoDB(); - final ProvisionedThroughputDescription oldProvision = - dynamoDB.getTable(tableName).describe().getProvisionedThroughput(); - ddbms.provisionTable(oldProvision.getReadCapacityUnits() * 2, - oldProvision.getWriteCapacityUnits() * 2); + final DDBCapacities oldProvision = DDBCapacities.extractCapacities( + dynamoDB.getTable(tableName).describe().getProvisionedThroughput()); + Assume.assumeFalse("Table is on-demand", oldProvision.isOnDemandTable()); + long desiredReadCapacity = oldProvision.getRead() - 1; + long desiredWriteCapacity = oldProvision.getWrite() - 1; + ddbms.provisionTable(desiredReadCapacity, + desiredWriteCapacity); ddbms.initTable(); // we have to wait until the provisioning settings are applied, // so until the table is ACTIVE again and not in UPDATING ddbms.getTable().waitForActive(); - final ProvisionedThroughputDescription newProvision = - dynamoDB.getTable(tableName).describe().getProvisionedThroughput(); - LOG.info("Old provision = {}, new provision = {}", oldProvision, - newProvision); + final DDBCapacities newProvision = DDBCapacities.extractCapacities( + dynamoDB.getTable(tableName).describe().getProvisionedThroughput()); assertEquals("Check newly provisioned table read capacity units.", - oldProvision.getReadCapacityUnits() * 2, - newProvision.getReadCapacityUnits().longValue()); + desiredReadCapacity, + newProvision.getRead()); assertEquals("Check newly provisioned table write capacity units.", - oldProvision.getWriteCapacityUnits() * 2, - newProvision.getWriteCapacityUnits().longValue()); + desiredWriteCapacity, + newProvision.getWrite()); + } finally { ddbms.destroy(); + ddbms.close(); } } @@ -639,9 +706,11 @@ public void testDeleteTable() throws Exception { final String tableName = getTestTableName("testDeleteTable"); Path testPath = new Path(new Path(fsUri), "/" + tableName); final S3AFileSystem s3afs = getFileSystem(); - final Configuration conf = s3afs.getConf(); + final Configuration conf = getTableCreationConfig(); conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { + enableOnDemand(conf); + DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); + try { ddbms.initialize(s3afs); // we can list the empty table ddbms.listChildren(testPath); @@ -649,23 +718,22 @@ public void testDeleteTable() throws Exception { ddbms.destroy(); verifyTableNotExist(tableName, dynamoDB); - // delete table once more; be ResourceNotFoundException swallowed silently + // delete table once more; the ResourceNotFoundException swallowed + // silently ddbms.destroy(); verifyTableNotExist(tableName, dynamoDB); - try { - // we can no longer list the destroyed table - ddbms.listChildren(testPath); - fail("Should have failed after the table is destroyed!"); - } catch (IOException ignored) { - } + intercept(IOException.class, "", + "Should have failed after the table is destroyed!", + () -> ddbms.listChildren(testPath)); + } finally { ddbms.destroy(); + ddbms.close(); } } @Test public void testTableTagging() throws IOException { - final Configuration conf = getFileSystem().getConf(); - + final Configuration conf = getTableCreationConfig(); // clear all table tagging config before this test conf.getPropsWithPrefix(S3GUARD_DDB_TABLE_TAG).keySet().forEach( propKey -> conf.unset(S3GUARD_DDB_TABLE_TAG + propKey) @@ -683,7 +751,8 @@ public void testTableTagging() throws IOException { conf.set(S3GUARD_DDB_TABLE_TAG + tagEntry.getKey(), tagEntry.getValue()); } - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { + DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); + try { ddbms.initialize(conf); assertNotNull(ddbms.getTable()); assertEquals(tableName, ddbms.getTable().getTableName()); @@ -696,6 +765,9 @@ public void testTableTagging() throws IOException { for (Tag tag : tags) { Assert.assertEquals(tagMap.get(tag.getKey()), tag.getValue()); } + } finally { + ddbms.destroy(); + ddbms.close(); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java index 98c1e998ed0aa..45c5e79fad263 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java @@ -44,7 +44,6 @@ import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_REGION_KEY; import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_TABLE_NAME_KEY; import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_TABLE_TAG; -import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestDynamoTablePrefix; import static org.apache.hadoop.fs.s3a.S3AUtils.setBucketOption; import static org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore.*; import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.*; @@ -178,8 +177,8 @@ public void testDynamoDBInitDestroyCycle() throws Throwable { expectSuccess("Init command did not exit successfully - see output", initCmd, Init.NAME, - "-" + READ_FLAG, "2", - "-" + WRITE_FLAG, "2", + "-" + READ_FLAG, "0", + "-" + WRITE_FLAG, "0", "-" + META_FLAG, "dynamodb://" + testTableName, testS3Url); // Verify it exists @@ -210,39 +209,21 @@ public void testDynamoDBInitDestroyCycle() throws Throwable { testS3Url); assertTrue("No Dynamo diagnostics in output " + info, info.contains(DESCRIPTION)); + assertTrue("No Dynamo diagnostics in output " + info, + info.contains(DESCRIPTION)); // get the current values to set again // play with the set-capacity option + String fsURI = getFileSystem().getUri().toString(); DDBCapacities original = getCapacities(); - String fsURI = getFileSystem().getUri().toString(); - if (!original.isOnDemandTable()) { - // classic provisioned table - assertTrue("Wrong billing mode in " + info, - info.contains(BILLING_MODE_PROVISIONED)); - String capacityOut = exec(newSetCapacity(), - SetCapacity.NAME, - fsURI); - LOG.info("Set Capacity output=\n{}", capacityOut); - capacityOut = exec(newSetCapacity(), - SetCapacity.NAME, - "-" + READ_FLAG, original.getReadStr(), - "-" + WRITE_FLAG, original.getWriteStr(), - fsURI); - LOG.info("Set Capacity output=\n{}", capacityOut); - } else { - // on demand table - assertTrue("Wrong billing mode in " + info, - info.contains(BILLING_MODE_PER_REQUEST)); - // on demand tables fail here, so expect that - intercept(IOException.class, E_ON_DEMAND_NO_SET_CAPACITY, - () -> exec(newSetCapacity(), - SetCapacity.NAME, + assertTrue("Wrong billing mode in " + info, + info.contains(BILLING_MODE_PER_REQUEST)); + // per-request tables fail here, so expect that + intercept(IOException.class, E_ON_DEMAND_NO_SET_CAPACITY, + () -> exec(newSetCapacity(), + SetCapacity.NAME, fsURI)); - } - - // that call does not change the values - original.checkEquals("unchanged", getCapacities()); // Destroy MetadataStore Destroy destroyCmd = new Destroy(fs.getConf()); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java index 47544f4eb6204..55f4707fe460f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java @@ -44,7 +44,6 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.HadoopTestBase; -import static org.apache.hadoop.fs.s3a.S3ATestUtils.isMetadataStoreAuthoritative; import static org.apache.hadoop.fs.s3a.S3ATestUtils.metadataStorePersistsAuthoritativeBit; /** From 76b94c274fe9775efcfd51c676d80c88a4f7fdb9 Mon Sep 17 00:00:00 2001 From: Erik Krogen Date: Fri, 7 Jun 2019 14:20:44 -0700 Subject: [PATCH 0149/1308] HADOOP-16345. Fix a potential NPE when instantiating FairCallQueue metrics. Contributed by Erik Krogen. --- .../org/apache/hadoop/ipc/FairCallQueue.java | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java index 380426fe5b07d..b4e953948c657 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java @@ -377,9 +377,21 @@ public void setDelegate(FairCallQueue obj) { this.revisionNumber++; } + /** + * Fetch the current call queue from the weak reference delegate. If there + * is no delegate, or the delegate is empty, this will return null. + */ + private FairCallQueue getCallQueue() { + WeakReference> ref = this.delegate; + if (ref == null) { + return null; + } + return ref.get(); + } + @Override public int[] getQueueSizes() { - FairCallQueue obj = this.delegate.get(); + FairCallQueue obj = getCallQueue(); if (obj == null) { return new int[]{}; } @@ -389,7 +401,7 @@ public int[] getQueueSizes() { @Override public long[] getOverflowedCalls() { - FairCallQueue obj = this.delegate.get(); + FairCallQueue obj = getCallQueue(); if (obj == null) { return new long[]{}; } From 46b23c11b033c76b25897d61de53e9e36bb2b4b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Sat, 8 Jun 2019 05:40:32 +0200 Subject: [PATCH 0150/1308] HDDS-1636. Tracing id is not propagated via async datanode grpc call (#895) --- .../hadoop/hdds/scm/XceiverClientGrpc.java | 7 ++- .../scm/client/ContainerOperationClient.java | 14 ++--- .../hdds/scm/storage/BlockInputStream.java | 8 +-- .../hdds/scm/storage/BlockOutputStream.java | 18 ++---- .../hdds/scm/storage/ChunkInputStream.java | 8 +-- .../scm/storage/TestBlockInputStream.java | 7 +-- .../scm/storage/TestChunkInputStream.java | 8 +-- .../scm/storage/ContainerProtocolCalls.java | 63 ++++++------------- .../hadoop/hdds/tracing/StringCodec.java | 2 +- .../client/io/BlockOutputStreamEntry.java | 8 +-- .../ozone/client/io/KeyInputStream.java | 12 ++-- .../hadoop/ozone/client/rpc/RpcClient.java | 11 ++-- .../TestContainerStateMachineIdempotency.java | 11 +--- .../ozone/scm/TestContainerSmallFile.java | 34 +++++----- .../TestGetCommittedBlockLengthAndPutKey.java | 18 ++---- .../ozone/scm/TestXceiverClientManager.java | 9 +-- .../storage/DistributedStorageHandler.java | 2 +- .../hadoop/ozone/om/TestChunkStreams.java | 4 +- 18 files changed, 90 insertions(+), 154 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 13d3eedec340a..8dd3753887f87 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -315,8 +315,13 @@ public XceiverClientReply sendCommandAsync( try (Scope scope = GlobalTracer.get() .buildSpan("XceiverClientGrpc." + request.getCmdType().name()) .startActive(true)) { + + ContainerCommandRequestProto finalPayload = + ContainerCommandRequestProto.newBuilder(request) + .setTraceID(TracingUtil.exportCurrentSpan()) + .build(); XceiverClientReply asyncReply = - sendCommandAsync(request, pipeline.getFirstNode()); + sendCommandAsync(finalPayload, pipeline.getFirstNode()); // TODO : for now make this API sync in nature as async requests are // served out of order over XceiverClientGrpc. This needs to be fixed // if this API is to be used for I/O path. Currently, this is not diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java index b0be34dd9bfff..3077f9fa2e1e4 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java @@ -37,7 +37,6 @@ import java.io.IOException; import java.util.List; -import java.util.UUID; /** * This class provides the client-facing APIs of container operations. @@ -113,8 +112,7 @@ public ContainerWithPipeline createContainer(String owner) */ public void createContainer(XceiverClientSpi client, long containerId) throws IOException { - String traceID = UUID.randomUUID().toString(); - ContainerProtocolCalls.createContainer(client, containerId, traceID, null); + ContainerProtocolCalls.createContainer(client, containerId, null); // Let us log this info after we let SCM know that we have completed the // creation state. @@ -257,9 +255,8 @@ public void deleteContainer(long containerId, Pipeline pipeline, XceiverClientSpi client = null; try { client = xceiverClientManager.acquireClient(pipeline); - String traceID = UUID.randomUUID().toString(); ContainerProtocolCalls - .deleteContainer(client, containerId, force, traceID, null); + .deleteContainer(client, containerId, force, null); storageContainerLocationClient .deleteContainer(containerId); if (LOG.isDebugEnabled()) { @@ -307,10 +304,8 @@ public ContainerDataProto readContainer(long containerID, XceiverClientSpi client = null; try { client = xceiverClientManager.acquireClient(pipeline); - String traceID = UUID.randomUUID().toString(); ReadContainerResponseProto response = - ContainerProtocolCalls.readContainer(client, containerID, traceID, - null); + ContainerProtocolCalls.readContainer(client, containerID, null); if (LOG.isDebugEnabled()) { LOG.debug("Read container {}, machines: {} ", containerID, pipeline.getNodes()); @@ -393,7 +388,6 @@ public void closeContainer(long containerId, Pipeline pipeline) */ // Actually close the container on Datanode client = xceiverClientManager.acquireClient(pipeline); - String traceID = UUID.randomUUID().toString(); storageContainerLocationClient.notifyObjectStageChange( ObjectStageChangeRequestProto.Type.container, @@ -401,7 +395,7 @@ public void closeContainer(long containerId, Pipeline pipeline) ObjectStageChangeRequestProto.Op.close, ObjectStageChangeRequestProto.Stage.begin); - ContainerProtocolCalls.closeContainer(client, containerId, traceID, + ContainerProtocolCalls.closeContainer(client, containerId, null); // Notify SCM to close the container storageContainerLocationClient.notifyObjectStageChange( diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index bccbc9bdb96a7..e66db5ffb619c 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -60,7 +60,6 @@ public class BlockInputStream extends InputStream implements Seekable { private Pipeline pipeline; private final Token token; private final boolean verifyChecksum; - private final String traceID; private XceiverClientManager xceiverClientManager; private XceiverClientSpi xceiverClient; private boolean initialized = false; @@ -96,13 +95,12 @@ public class BlockInputStream extends InputStream implements Seekable { public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, Token token, boolean verifyChecksum, - String traceId, XceiverClientManager xceiverClientManager) { + XceiverClientManager xceiverClientManager) { this.blockID = blockId; this.length = blockLen; this.pipeline = pipeline; this.token = token; this.verifyChecksum = verifyChecksum; - this.traceID = traceId; this.xceiverClientManager = xceiverClientManager; } @@ -166,7 +164,7 @@ protected List getChunkInfos() throws IOException { DatanodeBlockID datanodeBlockID = blockID .getDatanodeBlockIDProtobuf(); GetBlockResponseProto response = ContainerProtocolCalls - .getBlock(xceiverClient, datanodeBlockID, traceID); + .getBlock(xceiverClient, datanodeBlockID); chunks = response.getBlockData().getChunksList(); success = true; @@ -185,7 +183,7 @@ protected List getChunkInfos() throws IOException { * Datanode only when a read operation is performed on for that chunk. */ protected synchronized void addStream(ChunkInfo chunkInfo) { - chunkStreams.add(new ChunkInputStream(chunkInfo, blockID, traceID, + chunkStreams.add(new ChunkInputStream(chunkInfo, blockID, xceiverClient, verifyChecksum)); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index 5ca32630c87c4..fc9d4049e967e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -82,7 +82,7 @@ public class BlockOutputStream extends OutputStream { private volatile BlockID blockID; private final String key; - private final String traceID; + private final BlockData.Builder containerBlockData; private XceiverClientManager xceiverClientManager; private XceiverClientSpi xceiverClient; @@ -128,7 +128,6 @@ public class BlockOutputStream extends OutputStream { * @param key chunk key * @param xceiverClientManager client manager that controls client * @param pipeline pipeline where block will be written - * @param traceID container protocol call args * @param chunkSize chunk size * @param bufferPool pool of buffers * @param streamBufferFlushSize flush size @@ -140,13 +139,12 @@ public class BlockOutputStream extends OutputStream { @SuppressWarnings("parameternumber") public BlockOutputStream(BlockID blockID, String key, XceiverClientManager xceiverClientManager, Pipeline pipeline, - String traceID, int chunkSize, long streamBufferFlushSize, + int chunkSize, long streamBufferFlushSize, long streamBufferMaxSize, long watchTimeout, BufferPool bufferPool, ChecksumType checksumType, int bytesPerChecksum) throws IOException { this.blockID = blockID; this.key = key; - this.traceID = traceID; this.chunkSize = chunkSize; KeyValue keyValue = KeyValue.newBuilder().setKey("TYPE").setValue("KEY").build(); @@ -379,13 +377,12 @@ ContainerCommandResponseProto> executePutBlock() List byteBufferList = bufferList; bufferList = null; Preconditions.checkNotNull(byteBufferList); - String requestId = - traceID + ContainerProtos.Type.PutBlock + chunkIndex + blockID; + CompletableFuture flushFuture; try { XceiverClientReply asyncReply = - putBlockAsync(xceiverClient, containerBlockData.build(), requestId); + putBlockAsync(xceiverClient, containerBlockData.build()); CompletableFuture future = asyncReply.getResponse(); flushFuture = future.thenApplyAsync(e -> { @@ -606,13 +603,10 @@ private void writeChunkToContainer(ByteBuffer chunk) throws IOException { .setLen(effectiveChunkSize) .setChecksumData(checksumData.getProtoBufMessage()) .build(); - // generate a unique requestId - String requestId = - traceID + ContainerProtos.Type.WriteChunk + chunkIndex + chunkInfo - .getChunkName(); + try { XceiverClientReply asyncReply = - writeChunkAsync(xceiverClient, chunkInfo, blockID, data, requestId); + writeChunkAsync(xceiverClient, chunkInfo, blockID, data); CompletableFuture future = asyncReply.getResponse(); future.thenApplyAsync(e -> { diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java index 8d30c22540021..f94d2d87340be 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java @@ -49,7 +49,6 @@ public class ChunkInputStream extends InputStream implements Seekable { private ChunkInfo chunkInfo; private final long length; private final BlockID blockID; - private final String traceID; private XceiverClientSpi xceiverClient; private boolean verifyChecksum; private boolean allocated = false; @@ -76,12 +75,11 @@ public class ChunkInputStream extends InputStream implements Seekable { private static final int EOF = -1; - ChunkInputStream(ChunkInfo chunkInfo, BlockID blockId, - String traceId, XceiverClientSpi xceiverClient, boolean verifyChecksum) { + ChunkInputStream(ChunkInfo chunkInfo, BlockID blockId, + XceiverClientSpi xceiverClient, boolean verifyChecksum) { this.chunkInfo = chunkInfo; this.length = chunkInfo.getLen(); this.blockID = blockId; - this.traceID = traceId; this.xceiverClient = xceiverClient; this.verifyChecksum = verifyChecksum; } @@ -335,7 +333,7 @@ protected ByteString readChunk(ChunkInfo readChunkInfo) throws IOException { validators.add(validator); readChunkResponse = ContainerProtocolCalls.readChunk(xceiverClient, - readChunkInfo, blockID, traceID, validators); + readChunkInfo, blockID, validators); } catch (IOException e) { if (e instanceof StorageContainerException) { diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java index a1985f05eea44..042bfd941743e 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java @@ -63,7 +63,7 @@ public void setup() throws Exception { createChunkList(5); blockStream = new DummyBlockInputStream(blockID, blockSize, null, null, - false, null, null); + false, null); } /** @@ -113,10 +113,9 @@ private class DummyBlockInputStream extends BlockInputStream { Pipeline pipeline, Token token, boolean verifyChecksum, - String traceId, XceiverClientManager xceiverClientManager) { super(blockId, blockLen, pipeline, token, verifyChecksum, - traceId, xceiverClientManager); + xceiverClientManager); } @Override @@ -128,7 +127,7 @@ protected List getChunkInfos() { protected void addStream(ChunkInfo chunkInfo) { TestChunkInputStream testChunkInputStream = new TestChunkInputStream(); getChunkStreams().add(testChunkInputStream.new DummyChunkInputStream( - chunkInfo, null, null, null, false, + chunkInfo, null, null, false, chunkDataMap.get(chunkInfo.getChunkName()).clone())); } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java index b113bc7f68580..a5fe26b5619ab 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java @@ -66,7 +66,7 @@ public void setup() throws Exception { chunkData, 0, CHUNK_SIZE).getProtoBufMessage()) .build(); - chunkStream = new DummyChunkInputStream(chunkInfo, null, null, null, true); + chunkStream = new DummyChunkInputStream(chunkInfo, null, null, true); } static byte[] generateRandomData(int length) { @@ -85,19 +85,17 @@ public class DummyChunkInputStream extends ChunkInputStream { DummyChunkInputStream(ChunkInfo chunkInfo, BlockID blockId, - String traceId, XceiverClientSpi xceiverClient, boolean verifyChecksum) { - super(chunkInfo, blockId, traceId, xceiverClient, verifyChecksum); + super(chunkInfo, blockId, xceiverClient, verifyChecksum); } public DummyChunkInputStream(ChunkInfo chunkInfo, BlockID blockId, - String traceId, XceiverClientSpi xceiverClient, boolean verifyChecksum, byte[] data) { - super(chunkInfo, blockId, traceId, xceiverClient, verifyChecksum); + super(chunkInfo, blockId, xceiverClient, verifyChecksum); chunkData = data; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java index 08f5d87f5ec13..c29f3959183e3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java @@ -92,12 +92,11 @@ private ContainerProtocolCalls() { * * @param xceiverClient client to perform call * @param datanodeBlockID blockID to identify container - * @param traceID container protocol call args * @return container protocol get block response * @throws IOException if there is an I/O error while performing the call */ public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, - DatanodeBlockID datanodeBlockID, String traceID) throws IOException { + DatanodeBlockID datanodeBlockID) throws IOException { GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto .newBuilder() .setBlockID(datanodeBlockID); @@ -107,7 +106,6 @@ public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, .newBuilder() .setCmdType(Type.GetBlock) .setContainerID(datanodeBlockID.getContainerID()) - .setTraceID(traceID) .setDatanodeUuid(id) .setGetBlock(readBlockRequest); String encodedToken = getEncodedBlockToken(getService(datanodeBlockID)); @@ -126,13 +124,12 @@ public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, * * @param xceiverClient client to perform call * @param blockID blockId for the Block - * @param traceID container protocol call args * @return container protocol getLastCommittedBlockLength response * @throws IOException if there is an I/O error while performing the call */ public static ContainerProtos.GetCommittedBlockLengthResponseProto getCommittedBlockLength( - XceiverClientSpi xceiverClient, BlockID blockID, String traceID) + XceiverClientSpi xceiverClient, BlockID blockID) throws IOException { ContainerProtos.GetCommittedBlockLengthRequestProto.Builder getBlockLengthRequestBuilder = @@ -143,7 +140,6 @@ public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, ContainerCommandRequestProto.newBuilder() .setCmdType(Type.GetCommittedBlockLength) .setContainerID(blockID.getContainerID()) - .setTraceID(traceID) .setDatanodeUuid(id) .setGetCommittedBlockLength(getBlockLengthRequestBuilder); String encodedToken = getEncodedBlockToken(new Text(blockID. @@ -162,20 +158,19 @@ public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, * * @param xceiverClient client to perform call * @param containerBlockData block data to identify container - * @param traceID container protocol call args * @return putBlockResponse * @throws IOException if there is an I/O error while performing the call */ public static ContainerProtos.PutBlockResponseProto putBlock( - XceiverClientSpi xceiverClient, BlockData containerBlockData, - String traceID) throws IOException { + XceiverClientSpi xceiverClient, BlockData containerBlockData) + throws IOException { PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(containerBlockData); String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutBlock) .setContainerID(containerBlockData.getBlockID().getContainerID()) - .setTraceID(traceID).setDatanodeUuid(id) + .setDatanodeUuid(id) .setPutBlock(createBlockRequest); String encodedToken = getEncodedBlockToken(getService(containerBlockData.getBlockID())); @@ -193,15 +188,13 @@ public static ContainerProtos.PutBlockResponseProto putBlock( * * @param xceiverClient client to perform call * @param containerBlockData block data to identify container - * @param traceID container protocol call args * @return putBlockResponse * @throws IOException if there is an error while performing the call * @throws InterruptedException * @throws ExecutionException */ public static XceiverClientReply putBlockAsync( - XceiverClientSpi xceiverClient, BlockData containerBlockData, - String traceID) + XceiverClientSpi xceiverClient, BlockData containerBlockData) throws IOException, InterruptedException, ExecutionException { PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(containerBlockData); @@ -209,7 +202,7 @@ public static XceiverClientReply putBlockAsync( ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutBlock) .setContainerID(containerBlockData.getBlockID().getContainerID()) - .setTraceID(traceID).setDatanodeUuid(id) + .setDatanodeUuid(id) .setPutBlock(createBlockRequest); String encodedToken = getEncodedBlockToken(getService(containerBlockData.getBlockID())); @@ -226,14 +219,13 @@ public static XceiverClientReply putBlockAsync( * @param xceiverClient client to perform call * @param chunk information about chunk to read * @param blockID ID of the block - * @param traceID container protocol call args * @param validators functions to validate the response * @return container protocol read chunk response * @throws IOException if there is an I/O error while performing the call */ public static ContainerProtos.ReadChunkResponseProto readChunk( XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID, - String traceID, List validators) throws IOException { + List validators) throws IOException { ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto.newBuilder() .setBlockID(blockID.getDatanodeBlockIDProtobuf()) @@ -241,7 +233,7 @@ public static ContainerProtos.ReadChunkResponseProto readChunk( String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.ReadChunk) - .setContainerID(blockID.getContainerID()).setTraceID(traceID) + .setContainerID(blockID.getContainerID()) .setDatanodeUuid(id).setReadChunk(readChunkRequest); String encodedToken = getEncodedBlockToken(new Text(blockID. getContainerBlockID().toString())); @@ -261,11 +253,10 @@ public static ContainerProtos.ReadChunkResponseProto readChunk( * @param chunk information about chunk to write * @param blockID ID of the block * @param data the data of the chunk to write - * @param traceID container protocol call args * @throws IOException if there is an error while performing the call */ public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk, - BlockID blockID, ByteString data, String traceID) + BlockID blockID, ByteString data) throws IOException { WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto .newBuilder() @@ -277,7 +268,6 @@ public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk, .newBuilder() .setCmdType(Type.WriteChunk) .setContainerID(blockID.getContainerID()) - .setTraceID(traceID) .setDatanodeUuid(id) .setWriteChunk(writeChunkRequest); String encodedToken = getEncodedBlockToken(new Text(blockID. @@ -296,12 +286,11 @@ public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk, * @param chunk information about chunk to write * @param blockID ID of the block * @param data the data of the chunk to write - * @param traceID container protocol call args * @throws IOException if there is an I/O error while performing the call */ public static XceiverClientReply writeChunkAsync( XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID, - ByteString data, String traceID) + ByteString data) throws IOException, ExecutionException, InterruptedException { WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto.newBuilder() @@ -310,7 +299,7 @@ public static XceiverClientReply writeChunkAsync( String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.WriteChunk) - .setContainerID(blockID.getContainerID()).setTraceID(traceID) + .setContainerID(blockID.getContainerID()) .setDatanodeUuid(id).setWriteChunk(writeChunkRequest); String encodedToken = getEncodedBlockToken(new Text(blockID. getContainerBlockID().toString())); @@ -330,13 +319,12 @@ public static XceiverClientReply writeChunkAsync( * @param client - client that communicates with the container. * @param blockID - ID of the block * @param data - Data to be written into the container. - * @param traceID - Trace ID for logging purpose. * @return container protocol writeSmallFile response * @throws IOException */ public static PutSmallFileResponseProto writeSmallFile( - XceiverClientSpi client, BlockID blockID, byte[] data, - String traceID) throws IOException { + XceiverClientSpi client, BlockID blockID, byte[] data) + throws IOException { BlockData containerBlockData = BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf()) @@ -369,7 +357,6 @@ public static PutSmallFileResponseProto writeSmallFile( ContainerCommandRequestProto.newBuilder() .setCmdType(Type.PutSmallFile) .setContainerID(blockID.getContainerID()) - .setTraceID(traceID) .setDatanodeUuid(id) .setPutSmallFile(putSmallFileRequest); String encodedToken = getEncodedBlockToken(new Text(blockID. @@ -387,12 +374,11 @@ public static PutSmallFileResponseProto writeSmallFile( * createContainer call that creates a container on the datanode. * @param client - client * @param containerID - ID of container - * @param traceID - traceID * @param encodedToken - encodedToken if security is enabled * @throws IOException */ public static void createContainer(XceiverClientSpi client, long containerID, - String traceID, String encodedToken) throws IOException { + String encodedToken) throws IOException { ContainerProtos.CreateContainerRequestProto.Builder createRequest = ContainerProtos.CreateContainerRequestProto .newBuilder(); @@ -409,7 +395,6 @@ public static void createContainer(XceiverClientSpi client, long containerID, request.setContainerID(containerID); request.setCreateContainer(createRequest.build()); request.setDatanodeUuid(id); - request.setTraceID(traceID); client.sendCommand(request.build(), getValidatorList()); } @@ -418,12 +403,11 @@ public static void createContainer(XceiverClientSpi client, long containerID, * * @param client * @param force whether or not to forcibly delete the container. - * @param traceID * @param encodedToken - encodedToken if security is enabled * @throws IOException */ public static void deleteContainer(XceiverClientSpi client, long containerID, - boolean force, String traceID, String encodedToken) throws IOException { + boolean force, String encodedToken) throws IOException { ContainerProtos.DeleteContainerRequestProto.Builder deleteRequest = ContainerProtos.DeleteContainerRequestProto.newBuilder(); deleteRequest.setForceDelete(force); @@ -434,7 +418,6 @@ public static void deleteContainer(XceiverClientSpi client, long containerID, request.setCmdType(ContainerProtos.Type.DeleteContainer); request.setContainerID(containerID); request.setDeleteContainer(deleteRequest); - request.setTraceID(traceID); request.setDatanodeUuid(id); if (encodedToken != null) { request.setEncodedToken(encodedToken); @@ -447,12 +430,11 @@ public static void deleteContainer(XceiverClientSpi client, long containerID, * * @param client * @param containerID - * @param traceID * @param encodedToken - encodedToken if security is enabled * @throws IOException */ public static void closeContainer(XceiverClientSpi client, - long containerID, String traceID, String encodedToken) + long containerID, String encodedToken) throws IOException { String id = client.getPipeline().getFirstNode().getUuidString(); @@ -461,7 +443,6 @@ public static void closeContainer(XceiverClientSpi client, request.setCmdType(Type.CloseContainer); request.setContainerID(containerID); request.setCloseContainer(CloseContainerRequestProto.getDefaultInstance()); - request.setTraceID(traceID); request.setDatanodeUuid(id); if(encodedToken != null) { request.setEncodedToken(encodedToken); @@ -473,13 +454,12 @@ public static void closeContainer(XceiverClientSpi client, * readContainer call that gets meta data from an existing container. * * @param client - client - * @param traceID - trace ID * @param encodedToken - encodedToken if security is enabled * @throws IOException */ public static ReadContainerResponseProto readContainer( - XceiverClientSpi client, long containerID, - String traceID, String encodedToken) throws IOException { + XceiverClientSpi client, long containerID, String encodedToken) + throws IOException { String id = client.getPipeline().getFirstNode().getUuidString(); ContainerCommandRequestProto.Builder request = @@ -488,7 +468,6 @@ public static ReadContainerResponseProto readContainer( request.setContainerID(containerID); request.setReadContainer(ReadContainerRequestProto.getDefaultInstance()); request.setDatanodeUuid(id); - request.setTraceID(traceID); if(encodedToken != null) { request.setEncodedToken(encodedToken); } @@ -503,12 +482,11 @@ public static ReadContainerResponseProto readContainer( * * @param client * @param blockID - ID of the block - * @param traceID - trace ID * @return GetSmallFileResponseProto * @throws IOException */ public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client, - BlockID blockID, String traceID) throws IOException { + BlockID blockID) throws IOException { GetBlockRequestProto.Builder getBlock = GetBlockRequestProto .newBuilder() .setBlockID(blockID.getDatanodeBlockIDProtobuf()); @@ -522,7 +500,6 @@ public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client, .newBuilder() .setCmdType(Type.GetSmallFile) .setContainerID(blockID.getContainerID()) - .setTraceID(traceID) .setDatanodeUuid(id) .setGetSmallFile(getSmallFileRequest); String encodedToken = getEncodedBlockToken(new Text(blockID. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java index 03365cf54ba28..41ba537c85cc9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java @@ -45,7 +45,7 @@ public JaegerSpanContext extract(StringBuilder s) { if (value != null && !value.equals("")) { String[] parts = value.split(":"); if (parts.length != 4) { - LOG.trace("MalformedTracerStateString: {}", value); + LOG.debug("MalformedTracerStateString: {}", value); throw new MalformedTracerStateStringException(value); } else { String traceId = parts[0]; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java index e11eab90e48c2..2888149870695 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java @@ -47,7 +47,6 @@ public final class BlockOutputStreamEntry extends OutputStream { private final Pipeline pipeline; private final ChecksumType checksumType; private final int bytesPerChecksum; - private final String requestId; private final int chunkSize; // total number of bytes that should be written to this stream private final long length; @@ -73,7 +72,6 @@ private BlockOutputStreamEntry(BlockID blockID, String key, this.key = key; this.xceiverClientManager = xceiverClientManager; this.pipeline = pipeline; - this.requestId = requestId; this.chunkSize = chunkSize; this.token = token; this.length = length; @@ -111,7 +109,7 @@ private void checkStream() throws IOException { } this.outputStream = new BlockOutputStream(blockID, key, xceiverClientManager, - pipeline, requestId, chunkSize, streamBufferFlushSize, + pipeline, chunkSize, streamBufferFlushSize, streamBufferMaxSize, watchTimeout, bufferPool, checksumType, bytesPerChecksum); } @@ -324,10 +322,6 @@ public Pipeline getPipeline() { return pipeline; } - public String getRequestId() { - return requestId; - } - public int getChunkSize() { return chunkSize; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java index 41ac60f0bd855..fa1672a1fa7d0 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java @@ -76,21 +76,21 @@ public KeyInputStream() { * For each block in keyInfo, add a BlockInputStream to blockStreams. */ public static LengthInputStream getFromOmKeyInfo(OmKeyInfo keyInfo, - XceiverClientManager xceiverClientManager, String requestId, + XceiverClientManager xceiverClientManager, boolean verifyChecksum) { List keyLocationInfos = keyInfo .getLatestVersionLocations().getBlocksLatestVersionOnly(); KeyInputStream keyInputStream = new KeyInputStream(); keyInputStream.initialize(keyInfo.getKeyName(), keyLocationInfos, - xceiverClientManager, requestId, verifyChecksum); + xceiverClientManager, verifyChecksum); return new LengthInputStream(keyInputStream, keyInputStream.length); } private synchronized void initialize(String keyName, List blockInfos, - XceiverClientManager xceiverClientManager, String requestId, + XceiverClientManager xceiverClientManager, boolean verifyChecksum) { this.key = keyName; this.blockOffsets = new long[blockInfos.size()]; @@ -100,7 +100,7 @@ private synchronized void initialize(String keyName, LOG.debug("Adding stream for accessing {}. The stream will be " + "initialized later.", omKeyLocationInfo); - addStream(omKeyLocationInfo, xceiverClientManager, requestId, + addStream(omKeyLocationInfo, xceiverClientManager, verifyChecksum); this.blockOffsets[i] = keyLength; @@ -116,11 +116,11 @@ private synchronized void initialize(String keyName, * the block for the first time. */ private synchronized void addStream(OmKeyLocationInfo blockInfo, - XceiverClientManager xceiverClientMngr, String clientRequestId, + XceiverClientManager xceiverClientMngr, boolean verifyChecksum) { blockStreams.add(new BlockInputStream(blockInfo.getBlockID(), blockInfo.getLength(), blockInfo.getPipeline(), blockInfo.getToken(), - verifyChecksum, clientRequestId, xceiverClientMngr)); + verifyChecksum, xceiverClientMngr)); } @VisibleForTesting diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 5f2df7dd91720..ffb8bce52a544 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -654,7 +654,6 @@ public OzoneInputStream getKey( throws IOException { HddsClientUtils.verifyResourceName(volumeName, bucketName); Preconditions.checkNotNull(keyName); - String requestId = UUID.randomUUID().toString(); OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -662,7 +661,7 @@ public OzoneInputStream getKey( .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); - return createInputStream(keyInfo, requestId); + return createInputStream(keyInfo); } @Override @@ -984,7 +983,7 @@ public OzoneInputStream readFile(String volumeName, String bucketName, .setKeyName(keyName) .build(); OmKeyInfo keyInfo = ozoneManagerClient.lookupFile(keyArgs); - return createInputStream(keyInfo, UUID.randomUUID().toString()); + return createInputStream(keyInfo); } @Override @@ -1069,10 +1068,10 @@ public List getAcl(OzoneObj obj) throws IOException { return ozoneManagerClient.getAcl(obj); } - private OzoneInputStream createInputStream(OmKeyInfo keyInfo, - String requestId) throws IOException { + private OzoneInputStream createInputStream(OmKeyInfo keyInfo) + throws IOException { LengthInputStream lengthInputStream = KeyInputStream - .getFromOmKeyInfo(keyInfo, xceiverClientManager, requestId, + .getFromOmKeyInfo(keyInfo, xceiverClientManager, verifyChecksum); FileEncryptionInfo feInfo = keyInfo.getFileEncryptionInfo(); if (feInfo != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java index 41d3198765d99..2d2d028884a36 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java @@ -44,7 +44,6 @@ import org.junit.Test; import java.io.IOException; -import java.util.UUID; /** * Tests the idempotent operations in ContainerStateMachine. @@ -80,7 +79,6 @@ public static void shutdown() { @Test public void testContainerStateMachineIdempotency() throws Exception { - String traceID = UUID.randomUUID().toString(); ContainerWithPipeline container = storageContainerLocationClient .allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, containerOwner); @@ -89,8 +87,7 @@ public void testContainerStateMachineIdempotency() throws Exception { XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline); try { //create the container - ContainerProtocolCalls.createContainer(client, containerID, traceID, - null); + ContainerProtocolCalls.createContainer(client, containerID, null); // call create Container again BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); byte[] data = @@ -112,10 +109,8 @@ public void testContainerStateMachineIdempotency() throws Exception { client.sendCommand(putKeyRequest); // close container call - ContainerProtocolCalls.closeContainer(client, containerID, traceID, - null); - ContainerProtocolCalls.closeContainer(client, containerID, traceID, - null); + ContainerProtocolCalls.closeContainer(client, containerID, null); + ContainerProtocolCalls.closeContainer(client, containerID, null); } catch (IOException ioe) { Assert.fail("Container operation failed" + ioe); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java index 08fa4e54cbcb6..4c62c70db7f06 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java @@ -41,8 +41,6 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import java.util.UUID; - /** * Test Container calls. */ @@ -80,7 +78,6 @@ public static void shutdown() throws InterruptedException { @Test public void testAllocateWrite() throws Exception { - String traceID = UUID.randomUUID().toString(); ContainerWithPipeline container = storageContainerLocationClient.allocateContainer( xceiverClientManager.getType(), @@ -88,14 +85,14 @@ public void testAllocateWrite() throws Exception { XceiverClientSpi client = xceiverClientManager .acquireClient(container.getPipeline()); ContainerProtocolCalls.createContainer(client, - container.getContainerInfo().getContainerID(), traceID, null); + container.getContainerInfo().getContainerID(), null); BlockID blockID = ContainerTestHelper.getTestBlockID( container.getContainerInfo().getContainerID()); ContainerProtocolCalls.writeSmallFile(client, blockID, - "data123".getBytes(), traceID); + "data123".getBytes()); ContainerProtos.GetSmallFileResponseProto response = - ContainerProtocolCalls.readSmallFile(client, blockID, traceID); + ContainerProtocolCalls.readSmallFile(client, blockID); String readData = response.getData().getData().toStringUtf8(); Assert.assertEquals("data123", readData); xceiverClientManager.releaseClient(client, false); @@ -103,7 +100,6 @@ public void testAllocateWrite() throws Exception { @Test public void testInvalidBlockRead() throws Exception { - String traceID = UUID.randomUUID().toString(); ContainerWithPipeline container = storageContainerLocationClient.allocateContainer( xceiverClientManager.getType(), @@ -111,7 +107,7 @@ public void testInvalidBlockRead() throws Exception { XceiverClientSpi client = xceiverClientManager .acquireClient(container.getPipeline()); ContainerProtocolCalls.createContainer(client, - container.getContainerInfo().getContainerID(), traceID, null); + container.getContainerInfo().getContainerID(), null); thrown.expect(StorageContainerException.class); thrown.expectMessage("Unable to find the block"); @@ -120,13 +116,12 @@ public void testInvalidBlockRead() throws Exception { container.getContainerInfo().getContainerID()); // Try to read a Key Container Name ContainerProtos.GetSmallFileResponseProto response = - ContainerProtocolCalls.readSmallFile(client, blockID, traceID); + ContainerProtocolCalls.readSmallFile(client, blockID); xceiverClientManager.releaseClient(client, false); } @Test public void testInvalidContainerRead() throws Exception { - String traceID = UUID.randomUUID().toString(); long nonExistContainerID = 8888L; ContainerWithPipeline container = storageContainerLocationClient.allocateContainer( @@ -135,11 +130,11 @@ public void testInvalidContainerRead() throws Exception { XceiverClientSpi client = xceiverClientManager .acquireClient(container.getPipeline()); ContainerProtocolCalls.createContainer(client, - container.getContainerInfo().getContainerID(), traceID, null); + container.getContainerInfo().getContainerID(), null); BlockID blockID = ContainerTestHelper.getTestBlockID( container.getContainerInfo().getContainerID()); ContainerProtocolCalls.writeSmallFile(client, blockID, - "data123".getBytes(), traceID); + "data123".getBytes()); thrown.expect(StorageContainerException.class); thrown.expectMessage("ContainerID 8888 does not exist"); @@ -148,13 +143,12 @@ public void testInvalidContainerRead() throws Exception { ContainerProtos.GetSmallFileResponseProto response = ContainerProtocolCalls.readSmallFile(client, ContainerTestHelper.getTestBlockID( - nonExistContainerID), traceID); + nonExistContainerID)); xceiverClientManager.releaseClient(client, false); } @Test public void testReadWriteWithBCSId() throws Exception { - String traceID = UUID.randomUUID().toString(); ContainerWithPipeline container = storageContainerLocationClient.allocateContainer( HddsProtos.ReplicationType.RATIS, @@ -162,20 +156,20 @@ public void testReadWriteWithBCSId() throws Exception { XceiverClientSpi client = xceiverClientManager .acquireClient(container.getPipeline()); ContainerProtocolCalls.createContainer(client, - container.getContainerInfo().getContainerID(), traceID, null); + container.getContainerInfo().getContainerID(), null); BlockID blockID1 = ContainerTestHelper.getTestBlockID( container.getContainerInfo().getContainerID()); ContainerProtos.PutSmallFileResponseProto responseProto = ContainerProtocolCalls - .writeSmallFile(client, blockID1, "data123".getBytes(), traceID); + .writeSmallFile(client, blockID1, "data123".getBytes()); long bcsId = responseProto.getCommittedBlockLength().getBlockID() .getBlockCommitSequenceId(); try { blockID1.setBlockCommitSequenceId(bcsId + 1); //read a file with higher bcsId than the container bcsId ContainerProtocolCalls - .readSmallFile(client, blockID1, traceID); + .readSmallFile(client, blockID1); Assert.fail("Expected exception not thrown"); } catch (StorageContainerException sce) { Assert @@ -186,12 +180,12 @@ public void testReadWriteWithBCSId() throws Exception { BlockID blockID2 = ContainerTestHelper .getTestBlockID(container.getContainerInfo().getContainerID()); ContainerProtocolCalls - .writeSmallFile(client, blockID2, "data123".getBytes(), traceID); + .writeSmallFile(client, blockID2, "data123".getBytes()); try { blockID1.setBlockCommitSequenceId(bcsId + 1); //read a file with higher bcsId than the committed bcsId for the block - ContainerProtocolCalls.readSmallFile(client, blockID1, traceID); + ContainerProtocolCalls.readSmallFile(client, blockID1); Assert.fail("Expected exception not thrown"); } catch (StorageContainerException sce) { Assert @@ -199,7 +193,7 @@ public void testReadWriteWithBCSId() throws Exception { } blockID1.setBlockCommitSequenceId(bcsId); ContainerProtos.GetSmallFileResponseProto response = - ContainerProtocolCalls.readSmallFile(client, blockID1, traceID); + ContainerProtocolCalls.readSmallFile(client, blockID1); String readData = response.getData().getData().toStringUtf8(); Assert.assertEquals("data123", readData); xceiverClientManager.releaseClient(client, false); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java index acef63c47b01b..8e4645f01af86 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java @@ -46,9 +46,6 @@ import org.junit.BeforeClass; import org.junit.Test; - -import java.util.UUID; - /** * Test Container calls. */ @@ -85,7 +82,6 @@ public static void shutdown() throws InterruptedException { @Test public void tesGetCommittedBlockLength() throws Exception { ContainerProtos.GetCommittedBlockLengthResponseProto response; - String traceID = UUID.randomUUID().toString(); ContainerWithPipeline container = storageContainerLocationClient .allocateContainer(xceiverClientManager.getType(), HddsProtos.ReplicationFactor.ONE, containerOwner); @@ -93,7 +89,7 @@ public void tesGetCommittedBlockLength() throws Exception { Pipeline pipeline = container.getPipeline(); XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline); //create the container - ContainerProtocolCalls.createContainer(client, containerID, traceID, null); + ContainerProtocolCalls.createContainer(client, containerID, null); BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); byte[] data = @@ -109,7 +105,7 @@ public void tesGetCommittedBlockLength() throws Exception { .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk()); client.sendCommand(putKeyRequest); response = ContainerProtocolCalls - .getCommittedBlockLength(client, blockID, traceID); + .getCommittedBlockLength(client, blockID); // make sure the block ids in the request and response are same. Assert.assertTrue( BlockID.getFromProtobuf(response.getBlockID()).equals(blockID)); @@ -119,22 +115,21 @@ public void tesGetCommittedBlockLength() throws Exception { @Test public void testGetCommittedBlockLengthForInvalidBlock() throws Exception { - String traceID = UUID.randomUUID().toString(); ContainerWithPipeline container = storageContainerLocationClient .allocateContainer(xceiverClientManager.getType(), HddsProtos.ReplicationFactor.ONE, containerOwner); long containerID = container.getContainerInfo().getContainerID(); XceiverClientSpi client = xceiverClientManager .acquireClient(container.getPipeline()); - ContainerProtocolCalls.createContainer(client, containerID, traceID, null); + ContainerProtocolCalls.createContainer(client, containerID, null); BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); // move the container to closed state - ContainerProtocolCalls.closeContainer(client, containerID, traceID, null); + ContainerProtocolCalls.closeContainer(client, containerID, null); try { // There is no block written inside the container. The request should // fail. - ContainerProtocolCalls.getCommittedBlockLength(client, blockID, traceID); + ContainerProtocolCalls.getCommittedBlockLength(client, blockID); Assert.fail("Expected exception not thrown"); } catch (StorageContainerException sce) { Assert.assertTrue(sce.getMessage().contains("Unable to find the block")); @@ -145,7 +140,6 @@ public void testGetCommittedBlockLengthForInvalidBlock() throws Exception { @Test public void tesPutKeyResposne() throws Exception { ContainerProtos.PutBlockResponseProto response; - String traceID = UUID.randomUUID().toString(); ContainerWithPipeline container = storageContainerLocationClient .allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, containerOwner); @@ -153,7 +147,7 @@ public void tesPutKeyResposne() throws Exception { Pipeline pipeline = container.getPipeline(); XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline); //create the container - ContainerProtocolCalls.createContainer(client, containerID, traceID, null); + ContainerProtocolCalls.createContainer(client, containerID, null); BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); byte[] data = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java index 82ae349bb156c..9d4ed68082311 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.scm; import com.google.common.cache.Cache; -import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -144,9 +143,8 @@ public void testFreeByReference() throws IOException { + container1.getContainerInfo().getReplicationType()); Assert.assertEquals(null, nonExistent1); // However container call should succeed because of refcount on the client. - String traceID1 = "trace" + RandomStringUtils.randomNumeric(4); ContainerProtocolCalls.createContainer(client1, - container1.getContainerInfo().getContainerID(), traceID1, null); + container1.getContainerInfo().getContainerID(), null); // After releasing the client, this connection should be closed // and any container operations should fail @@ -155,7 +153,7 @@ public void testFreeByReference() throws IOException { String expectedMessage = "This channel is not connected."; try { ContainerProtocolCalls.createContainer(client1, - container1.getContainerInfo().getContainerID(), traceID1, null); + container1.getContainerInfo().getContainerID(), null); Assert.fail("Create container should throw exception on closed" + "client"); } catch (Exception e) { @@ -202,11 +200,10 @@ public void testFreeByEviction() throws IOException { Assert.assertEquals(null, nonExistent); // Any container operation should now fail - String traceID2 = "trace" + RandomStringUtils.randomNumeric(4); String expectedMessage = "This channel is not connected."; try { ContainerProtocolCalls.createContainer(client1, - container1.getContainerInfo().getContainerID(), traceID2, null); + container1.getContainerInfo().getContainerID(), null); Assert.fail("Create container should throw exception on closed" + "client"); } catch (Exception e) { diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java index 6876166f8b0c0..7041a84c8caba 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java @@ -503,7 +503,7 @@ public LengthInputStream newKeyReader(KeyArgs args) throws IOException, .build(); OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); return KeyInputStream.getFromOmKeyInfo( - keyInfo, xceiverClientManager, args.getRequestID(), verifyChecksum); + keyInfo, xceiverClientManager, verifyChecksum); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java index 80717dde86fda..78e1c4456ee93 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java @@ -47,7 +47,7 @@ public void testReadGroupInputStream() throws Exception { for (int i = 0; i < 5; i++) { int tempOffset = offset; BlockInputStream in = - new BlockInputStream(null, 100, null, null, true, null, null) { + new BlockInputStream(null, 100, null, null, true, null) { private long pos = 0; private ByteArrayInputStream in = new ByteArrayInputStream(buf, tempOffset, 100); @@ -103,7 +103,7 @@ public void testErrorReadGroupInputStream() throws Exception { for (int i = 0; i < 5; i++) { int tempOffset = offset; BlockInputStream in = - new BlockInputStream(null, 100, null, null, true, null, null) { + new BlockInputStream(null, 100, null, null, true, null) { private long pos = 0; private ByteArrayInputStream in = new ByteArrayInputStream(buf, tempOffset, 100); From 9deac3b6bf46ff8875cdf2dfa6f7064f9379bccd Mon Sep 17 00:00:00 2001 From: Ajay Yadav <7813154+ajayydv@users.noreply.github.com> Date: Fri, 7 Jun 2019 21:47:14 -0700 Subject: [PATCH 0151/1308] HDDS-1657. Fix parallelStream usage in volume and key native acl. Contributed by Ajay Kumar. (#926) --- .../OzoneManagerProtocolClientSideTranslatorPB.java | 2 +- .../ozone/client/rpc/TestOzoneRpcClientAbstract.java | 8 ++++++-- .../ozone/protocolPB/OzoneManagerRequestHandler.java | 3 +-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index c93ed3cabac83..149f5582682f1 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -1377,7 +1377,7 @@ public boolean setAcl(OzoneObj obj, List acls) throws IOException { SetAclRequest.Builder builder = SetAclRequest.newBuilder() .setObj(OzoneObj.toProtobuf(obj)); - acls.parallelStream().forEach(a -> builder.addAcl(OzoneAcl.toProtobuf(a))); + acls.forEach(a -> builder.addAcl(OzoneAcl.toProtobuf(a))); OMRequest omRequest = createOMRequest(Type.SetAcl) .setSetAclRequest(builder.build()) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 2a03107254be1..b0f7888c3be56 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -2291,9 +2291,13 @@ private void validateOzoneAcl(OzoneObj ozObj) throws IOException { expectedAcls.forEach(a -> assertTrue(finalNewAcls.contains(a))); // Reset acl's. - store.setAcl(ozObj, new ArrayList<>()); + OzoneAcl ua = new OzoneAcl(ACLIdentityType.USER, "userx", ACLType.READ_ACL); + OzoneAcl ug = new OzoneAcl(ACLIdentityType.GROUP, "userx", ACLType.ALL); + store.setAcl(ozObj, Arrays.asList(ua, ug)); newAcls = store.getAcl(ozObj); - assertTrue(newAcls.size() == 0); + assertTrue(newAcls.size() == 2); + assertTrue(newAcls.contains(ua)); + assertTrue(newAcls.contains(ug)); } private void writeKey(String key1, OzoneBucket bucket) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 568262f55548b..6ea1a2b6d7cb8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -411,8 +411,7 @@ private RemoveAclResponse removeAcl(RemoveAclRequest req) private SetAclResponse setAcl(SetAclRequest req) throws IOException { List ozoneAcl = new ArrayList<>(); - req.getAclList().parallelStream().forEach(a -> - ozoneAcl.add(OzoneAcl.fromProtobuf(a))); + req.getAclList().forEach(a -> ozoneAcl.add(OzoneAcl.fromProtobuf(a))); boolean response = impl.setAcl(OzoneObjInfo.fromProtobuf(req.getObj()), ozoneAcl); return SetAclResponse.newBuilder().setResponse(response).build(); From fcfe7a3cc0568aa71568b83ce35d8c5b75e4ddaf Mon Sep 17 00:00:00 2001 From: Shweta Yakkali Date: Sat, 8 Jun 2019 22:12:29 -0700 Subject: [PATCH 0152/1308] HDDS-1655. Redundant toString() call for metaDataPath in KeyValueContainerCheck (#932) --- .../ozone/container/keyvalue/KeyValueContainerCheck.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index 373408bc6cc9c..ebbd4e01d0b4b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -279,9 +279,8 @@ private void iterateBlockDB(ReferenceCountedDB db) } private void loadContainerData() throws IOException { - File containerFile = KeyValueContainer - .getContainerFile(metadataPath.toString(), containerID); + .getContainerFile(metadataPath, containerID); onDiskContainerData = (KeyValueContainerData) ContainerDataYaml .readContainerFile(containerFile); From d6d95d2686bfc1d1e5511f60f169195734e2998e Mon Sep 17 00:00:00 2001 From: Shweta Yakkali Date: Mon, 10 Jun 2019 10:22:16 -0700 Subject: [PATCH 0153/1308] HDFS-14494. Move Server logging of StatedId inside receiveRequestState(). Contributed by Shweta Yakkali. Signed-off-by: Wei-Chiu Chuang --- .../src/main/java/org/apache/hadoop/ipc/Server.java | 2 -- .../hadoop/hdfs/server/namenode/GlobalStateIdContext.java | 6 ++++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 6fc907fb2e258..9018bed80e2f1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -2676,8 +2676,6 @@ private void processRpcRequest(RpcRequestHeaderProto header, stateId = alignmentContext.receiveRequestState( header, getMaxIdleTime()); call.setClientStateId(stateId); - LOG.trace("Client State ID= {} and Server State ID= {}", - call.getClientStateId(), alignmentContext.getLastSeenStateId()); } } catch (IOException ioe) { throw new RpcServerException("Processing RPC request caught ", ioe); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GlobalStateIdContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GlobalStateIdContext.java index 9f3a135336b9b..738d0a3ac942d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GlobalStateIdContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GlobalStateIdContext.java @@ -124,9 +124,11 @@ public void updateRequestState(RpcRequestHeaderProto.Builder header) { @Override public long receiveRequestState(RpcRequestHeaderProto header, long clientWaitTime) throws RetriableException { - long serverStateId = - namesystem.getFSImage().getCorrectLastAppliedOrWrittenTxId(); + long serverStateId = getLastSeenStateId(); long clientStateId = header.getStateId(); + FSNamesystem.LOG.trace("Client State ID= {} and Server State ID= {}", + clientStateId, serverStateId); + if (clientStateId > serverStateId && HAServiceState.ACTIVE.equals(namesystem.getState())) { FSNamesystem.LOG.warn("The client stateId: {} is greater than " From e94e6435842c5b9dc0f5fe681e0829d33dd5b24e Mon Sep 17 00:00:00 2001 From: Adam Antal Date: Mon, 10 Jun 2019 12:17:16 -0700 Subject: [PATCH 0154/1308] YARN-9471. Cleanup in TestLogAggregationIndexFileController. Contributed by Adam Antal. Signed-off-by: Wei-Chiu Chuang --- ...tLogAggregationIndexedFileController.java} | 134 +++++++++--------- 1 file changed, 68 insertions(+), 66 deletions(-) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/{TestLogAggregationIndexFileController.java => TestLogAggregationIndexedFileController.java} (82%) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexedFileController.java similarity index 82% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexFileController.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexedFileController.java index 64e0b20957cd9..e63e469d982f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexedFileController.java @@ -18,11 +18,6 @@ package org.apache.hadoop.yarn.logaggregation.filecontroller.ifile; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertNotNull; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileWriter; @@ -39,6 +34,7 @@ import java.util.Set; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; @@ -64,15 +60,23 @@ import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.ControlledClock; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + /** - * Function test for {@link LogAggregationIndexFileController}. + * Function test for {@link LogAggregationIndexedFileController}. * */ -public class TestLogAggregationIndexFileController { +public class TestLogAggregationIndexedFileController + extends Configured { private final String rootLocalLogDir = "target/LocalLogs"; private final Path rootLocalLogDirPath = new Path(rootLocalLogDir); @@ -82,37 +86,37 @@ public class TestLogAggregationIndexFileController { private static final UserGroupInformation USER_UGI = UserGroupInformation .createRemoteUser("testUser"); private FileSystem fs; - private Configuration conf; private ApplicationId appId; private ContainerId containerId; private NodeId nodeId; private ByteArrayOutputStream sysOutStream; - private PrintStream sysOut; - private ByteArrayOutputStream sysErrStream; - private PrintStream sysErr; + private Configuration getTestConf() { + Configuration conf = new Configuration(); + conf.set("yarn.log-aggregation.Indexed.remote-app-log-dir", + remoteLogDir); + conf.set("yarn.log-aggregation.Indexed.remote-app-log-dir-suffix", + "logs"); + conf.set(YarnConfiguration.NM_LOG_AGG_COMPRESSION_TYPE, "gz"); + return conf; + } @Before public void setUp() throws IOException { + setConf(getTestConf()); appId = ApplicationId.newInstance(123456, 1); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( appId, 1); containerId = ContainerId.newContainerId(attemptId, 1); nodeId = NodeId.newInstance("localhost", 9999); - conf = new Configuration(); - conf.set("yarn.log-aggregation.Indexed.remote-app-log-dir", - remoteLogDir); - conf.set("yarn.log-aggregation.Indexed.remote-app-log-dir-suffix", - "logs"); - conf.set(YarnConfiguration.NM_LOG_AGG_COMPRESSION_TYPE, "gz"); - fs = FileSystem.get(conf); + fs = FileSystem.get(getConf()); sysOutStream = new ByteArrayOutputStream(); - sysOut = new PrintStream(sysOutStream); + PrintStream sysOut = new PrintStream(sysOutStream); System.setOut(sysOut); - sysErrStream = new ByteArrayOutputStream(); - sysErr = new PrintStream(sysErrStream); + ByteArrayOutputStream sysErrStream = new ByteArrayOutputStream(); + PrintStream sysErr = new PrintStream(sysErrStream); System.setErr(sysErr); } @@ -173,7 +177,7 @@ public boolean isRollover(final FileContext fc, } }; - fileFormat.initialize(conf, "Indexed"); + fileFormat.initialize(getConf(), "Indexed"); Map appAcls = new HashMap<>(); Path appDir = fileFormat.getRemoteAppLogDir(appId, @@ -203,28 +207,28 @@ public boolean isRollover(final FileContext fc, logRequest.setBytes(Long.MAX_VALUE); List meta = fileFormat.readAggregatedLogsMeta( logRequest); - Assert.assertTrue(meta.size() == 1); + assertEquals(1, meta.size()); List fileNames = new ArrayList<>(); for (ContainerLogMeta log : meta) { - Assert.assertTrue(log.getContainerId().equals(containerId.toString())); - Assert.assertTrue(log.getNodeId().equals(nodeId.toString())); - Assert.assertTrue(log.getContainerLogMeta().size() == 3); + assertEquals(containerId.toString(), log.getContainerId()); + assertEquals(nodeId.toString(), log.getNodeId()); + assertEquals(3, log.getContainerLogMeta().size()); for (ContainerLogFileInfo file : log.getContainerLogMeta()) { fileNames.add(file.getFileName()); } } fileNames.removeAll(logTypes); - Assert.assertTrue(fileNames.isEmpty()); + assertTrue(fileNames.isEmpty()); boolean foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out); - Assert.assertTrue(foundLogs); + assertTrue(foundLogs); for (String logType : logTypes) { - Assert.assertTrue(sysOutStream.toString().contains(logMessage( + assertTrue(sysOutStream.toString().contains(logMessage( containerId, logType))); } sysOutStream.reset(); - Configuration factoryConf = new Configuration(conf); + Configuration factoryConf = new Configuration(getConf()); factoryConf.set("yarn.log-aggregation.file-formats", "Indexed"); factoryConf.set("yarn.log-aggregation.file-controller.Indexed.class", "org.apache.hadoop.yarn.logaggregation.filecontroller.ifile" @@ -233,12 +237,12 @@ public boolean isRollover(final FileContext fc, new LogAggregationFileControllerFactory(factoryConf); LogAggregationFileController fileController = factory .getFileControllerForRead(appId, USER_UGI.getShortUserName()); - Assert.assertTrue(fileController instanceof + assertTrue(fileController instanceof LogAggregationIndexedFileController); foundLogs = fileController.readAggregatedLogs(logRequest, System.out); - Assert.assertTrue(foundLogs); + assertTrue(foundLogs); for (String logType : logTypes) { - Assert.assertTrue(sysOutStream.toString().contains(logMessage( + assertTrue(sysOutStream.toString().contains(logMessage( containerId, logType))); } sysOutStream.reset(); @@ -261,12 +265,12 @@ public boolean isRollover(final FileContext fc, } meta = fileFormat.readAggregatedLogsMeta( logRequest); - Assert.assertTrue(meta.size() == 0); + assertTrue(meta.isEmpty()); foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out); - Assert.assertFalse(foundLogs); + assertFalse(foundLogs); sysOutStream.reset(); fs.delete(checksumFile, false); - Assert.assertFalse(fs.exists(checksumFile)); + assertFalse(fs.exists(checksumFile)); List newLogTypes = new ArrayList<>(logTypes); files.clear(); @@ -291,24 +295,24 @@ public boolean isRollover(final FileContext fc, logRequest); assertThat(meta.size()).isEqualTo(1); for (ContainerLogMeta log : meta) { - Assert.assertTrue(log.getContainerId().equals(containerId.toString())); - Assert.assertTrue(log.getNodeId().equals(nodeId.toString())); - Assert.assertTrue(log.getContainerLogMeta().size() == 3); + assertEquals(containerId.toString(), log.getContainerId()); + assertEquals(nodeId.toString(), log.getNodeId()); + assertEquals(3, log.getContainerLogMeta().size()); for (ContainerLogFileInfo file : log.getContainerLogMeta()) { fileNames.add(file.getFileName()); } } fileNames.removeAll(logTypes); - Assert.assertTrue(fileNames.isEmpty()); + assertTrue(fileNames.isEmpty()); foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out); - Assert.assertTrue(foundLogs); + assertTrue(foundLogs); for (String logType : logTypes) { - Assert.assertTrue(sysOutStream.toString().contains(logMessage( + assertTrue(sysOutStream.toString().contains(logMessage( containerId, logType))); } - Assert.assertFalse(sysOutStream.toString().contains(logMessage( + assertFalse(sysOutStream.toString().contains(logMessage( containerId, "test1"))); - Assert.assertFalse(sysOutStream.toString().contains(logMessage( + assertFalse(sysOutStream.toString().contains(logMessage( containerId, "test2"))); sysOutStream.reset(); @@ -322,18 +326,18 @@ public boolean isRollover(final FileContext fc, logRequest); assertThat(meta.size()).isEqualTo(2); for (ContainerLogMeta log : meta) { - Assert.assertTrue(log.getContainerId().equals(containerId.toString())); - Assert.assertTrue(log.getNodeId().equals(nodeId.toString())); + assertEquals(containerId.toString(), log.getContainerId()); + assertEquals(nodeId.toString(), log.getNodeId()); for (ContainerLogFileInfo file : log.getContainerLogMeta()) { fileNames.add(file.getFileName()); } } fileNames.removeAll(newLogTypes); - Assert.assertTrue(fileNames.isEmpty()); + assertTrue(fileNames.isEmpty()); foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out); - Assert.assertTrue(foundLogs); + assertTrue(foundLogs); for (String logType : newLogTypes) { - Assert.assertTrue(sysOutStream.toString().contains(logMessage( + assertTrue(sysOutStream.toString().contains(logMessage( containerId, logType))); } sysOutStream.reset(); @@ -345,23 +349,23 @@ public boolean isRollover(final FileContext fc, fileFormat.postWrite(context); fileFormat.closeWriter(); FileStatus[] status = fs.listStatus(logPath.getParent()); - Assert.assertTrue(status.length == 2); + assertEquals(2, status.length); meta = fileFormat.readAggregatedLogsMeta( logRequest); assertThat(meta.size()).isEqualTo(3); for (ContainerLogMeta log : meta) { - Assert.assertTrue(log.getContainerId().equals(containerId.toString())); - Assert.assertTrue(log.getNodeId().equals(nodeId.toString())); + assertEquals(containerId.toString(), log.getContainerId()); + assertEquals(nodeId.toString(), log.getNodeId()); for (ContainerLogFileInfo file : log.getContainerLogMeta()) { fileNames.add(file.getFileName()); } } fileNames.removeAll(newLogTypes); - Assert.assertTrue(fileNames.isEmpty()); + assertTrue(fileNames.isEmpty()); foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out); - Assert.assertTrue(foundLogs); + assertTrue(foundLogs); for (String logType : newLogTypes) { - Assert.assertTrue(sysOutStream.toString().contains(logMessage( + assertTrue(sysOutStream.toString().contains(logMessage( containerId, logType))); } sysOutStream.reset(); @@ -390,7 +394,7 @@ public void testFetchApplictionLogsHar() throws Exception { assertTrue(fs.exists(harPath)); LogAggregationIndexedFileController fileFormat = new LogAggregationIndexedFileController(); - fileFormat.initialize(conf, "Indexed"); + fileFormat.initialize(getConf(), "Indexed"); ContainerLogsRequest logRequest = new ContainerLogsRequest(); logRequest.setAppId(appId); logRequest.setNodeId(nodeId.toString()); @@ -399,21 +403,21 @@ public void testFetchApplictionLogsHar() throws Exception { logRequest.setBytes(Long.MAX_VALUE); List meta = fileFormat.readAggregatedLogsMeta( logRequest); - Assert.assertEquals(meta.size(), 3); + assertEquals(3, meta.size()); List fileNames = new ArrayList<>(); for (ContainerLogMeta log : meta) { - Assert.assertTrue(log.getContainerId().equals(containerId.toString())); - Assert.assertTrue(log.getNodeId().equals(nodeId.toString())); + assertEquals(containerId.toString(), log.getContainerId()); + assertEquals(nodeId.toString(), log.getNodeId()); for (ContainerLogFileInfo file : log.getContainerLogMeta()) { fileNames.add(file.getFileName()); } } fileNames.removeAll(newLogTypes); - Assert.assertTrue(fileNames.isEmpty()); + assertTrue(fileNames.isEmpty()); boolean foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out); - Assert.assertTrue(foundLogs); + assertTrue(foundLogs); for (String logType : newLogTypes) { - Assert.assertTrue(sysOutStream.toString().contains(logMessage( + assertTrue(sysOutStream.toString().contains(logMessage( containerId, logType))); } sysOutStream.reset(); @@ -438,8 +442,6 @@ private File createAndWriteLocalLogFile(ContainerId containerId, } private String logMessage(ContainerId containerId, String logType) { - StringBuilder sb = new StringBuilder(); - sb.append("Hello " + containerId + " in " + logType + "!"); - return sb.toString(); + return "Hello " + containerId + " in " + logType + "!"; } } From 7217494f40dd99068a3f3b155261b1dac6c67828 Mon Sep 17 00:00:00 2001 From: Hanisha Koneru Date: Mon, 10 Jun 2019 13:43:56 -0700 Subject: [PATCH 0155/1308] HDFS-10659. Namenode crashes after Journalnode re-installation in an HA cluster due to missing paxos directory. Contributed by star, Hanisha Koneru. Signed-off-by: Wei-Chiu Chuang --- .../hdfs/qjournal/server/JNStorage.java | 30 +++++++++++-------- .../hadoop/hdfs/qjournal/server/Journal.java | 2 +- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java index 612fd3d19f51d..305f1e87ef9d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java @@ -82,7 +82,8 @@ FileJournalManager getJournalManager() { } @Override - public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException { + public boolean isPreUpgradableLayout(StorageDirectory sd) + throws IOException { return false; } @@ -90,7 +91,8 @@ public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException { * Find an edits file spanning the given transaction ID range. * If no such file exists, an exception is thrown. */ - File findFinalizedEditsFile(long startTxId, long endTxId) throws IOException { + File findFinalizedEditsFile(long startTxId, long endTxId) + throws IOException { File ret = new File(sd.getCurrentDir(), NNStorage.getFinalizedEditsFileName(startTxId, endTxId)); if (!ret.exists()) { @@ -152,11 +154,18 @@ File getFinalizedEditsFile(long startTxId, long endTxId) { * paxos-like recovery process for the given log segment. */ File getPaxosFile(long segmentTxId) { - return new File(getPaxosDir(), String.valueOf(segmentTxId)); + return new File(getOrCreatePaxosDir(), String.valueOf(segmentTxId)); } - File getPaxosDir() { - return new File(sd.getCurrentDir(), "paxos"); + File getOrCreatePaxosDir() { + File paxosDir = new File(sd.getCurrentDir(), "paxos"); + if(!paxosDir.exists()) { + LOG.info("Creating paxos dir: {}", paxosDir.toPath()); + if(!paxosDir.mkdir()) { + LOG.error("Could not create paxos dir: {}", paxosDir.toPath()); + } + } + return paxosDir; } File getRoot() { @@ -170,7 +179,8 @@ File getRoot() { void purgeDataOlderThan(long minTxIdToKeep) throws IOException { purgeMatching(sd.getCurrentDir(), CURRENT_DIR_PURGE_REGEXES, minTxIdToKeep); - purgeMatching(getPaxosDir(), PAXOS_DIR_PURGE_REGEXES, minTxIdToKeep); + purgeMatching(getOrCreatePaxosDir(), + PAXOS_DIR_PURGE_REGEXES, minTxIdToKeep); } /** @@ -221,16 +231,10 @@ void format(NamespaceInfo nsInfo, boolean force) throws IOException { unlockAll(); sd.clearDirectory(); writeProperties(sd); - createPaxosDir(); + getOrCreatePaxosDir(); analyzeStorage(); } - void createPaxosDir() throws IOException { - if (!getPaxosDir().mkdirs()) { - throw new IOException("Could not create paxos dir: " + getPaxosDir()); - } - } - void analyzeStorage() throws IOException { this.state = sd.analyzeStorage(StartupOption.REGULAR, this); if (state == StorageState.NORMAL) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index aa0099efb8e53..17c09fee84eab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -1119,7 +1119,7 @@ public synchronized void doUpgrade(StorageInfo sInfo) throws IOException { + ".\n new LV = " + storage.getLayoutVersion() + "; new CTime = " + storage.getCTime()); storage.getJournalManager().doUpgrade(storage); - storage.createPaxosDir(); + storage.getOrCreatePaxosDir(); // Copy over the contents of the epoch data files to the new dir. File currentDir = storage.getSingularStorageDir().getCurrentDir(); From 0d160a0ba829ad3c31f11ea0b6ff4a9d7d94327d Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 10 Jun 2019 14:12:47 -0700 Subject: [PATCH 0156/1308] HDFS-10210. Remove the defunct startKdc profile from hdfs. Contributed by Wei-Chiu Chuang, Akira Ajisaka. Signed-off-by: Wei-Chiu Chuang Co-authored-by: Akira Ajisaka --- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 82 ------------------------- 1 file changed, 82 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 63cebe4a6494a..ac01ca72a4486 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -31,7 +31,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdfs - ../../hadoop-common-project/hadoop-common/src/test/resources/kdc true @@ -223,8 +222,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-surefire-plugin - ${startKdc} - ${kdc.resource.dir} ${runningWithNative} @@ -427,85 +424,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - - startKdc - - - startKdc - true - - - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - enforce-os - - enforce - - - - - - mac - unix - - - true - - - - - - org.apache.maven.plugins - maven-antrun-plugin - - - kdc - compile - - run - - - - - - - - - - - - - - - - - - - - - killKdc - test - - run - - - - - - - - - - - - - parallel-tests From 9191e08f0ad4ebc2a3b776c4cc71d0fc5c053beb Mon Sep 17 00:00:00 2001 From: Suma Shivaprasad Date: Mon, 10 Jun 2019 14:33:24 -0700 Subject: [PATCH 0157/1308] YARN-9569. Auto-created leaf queues do not honor cluster-wide min/max memory/vcores. Contributed by Craig Condit. --- .../capacity/AbstractManagedParentQueue.java | 8 ++++++ ...CapacitySchedulerAutoCreatedQueueBase.java | 27 +++++++++++++++++-- ...estCapacitySchedulerAutoQueueCreation.java | 11 ++++---- 3 files changed, 38 insertions(+), 8 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractManagedParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractManagedParentQueue.java index 3e90863e5df2e..7626c66d555dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractManagedParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractManagedParentQueue.java @@ -19,6 +19,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common @@ -200,6 +201,13 @@ protected CapacitySchedulerConfiguration initializeLeafQueueConfigs(String CapacitySchedulerConfiguration leafQueueConfigs = new CapacitySchedulerConfiguration(new Configuration(false), false); + String prefix = YarnConfiguration.RESOURCE_TYPES + "."; + Map rtProps = csContext + .getConfiguration().getPropsWithPrefix(prefix); + for (Map.Entry entry : rtProps.entrySet()) { + leafQueueConfigs.set(prefix + entry.getKey(), entry.getValue()); + } + SortedMap sortedConfigs = sortCSConfigurations(); SortedMap templateConfigs = getConfigurationsWithPrefix (sortedConfigs, configPrefix); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java index c64582ef21fe6..d0cacde21e54f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java @@ -22,6 +22,7 @@ import org.apache.commons.lang3.RandomUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.security.ShellBasedUnixGroupsMapping; @@ -189,7 +190,7 @@ void spyOnNextEvent(Event expectedEvent, long timeout) @Before public void setUp() throws Exception { - CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); + CapacitySchedulerConfiguration conf = setupSchedulerConfiguration(); setupQueueConfiguration(conf); conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class); @@ -494,8 +495,22 @@ protected List setupQueueMapping( return queueMappings; } + protected CapacitySchedulerConfiguration setupSchedulerConfiguration() { + Configuration schedConf = new Configuration(); + schedConf.setInt(YarnConfiguration.RESOURCE_TYPES + + ".vcores.minimum-allocation", 1); + schedConf.setInt(YarnConfiguration.RESOURCE_TYPES + + ".vcores.maximum-allocation", 8); + schedConf.setInt(YarnConfiguration.RESOURCE_TYPES + + ".memory-mb.minimum-allocation", 1024); + schedConf.setInt(YarnConfiguration.RESOURCE_TYPES + + ".memory-mb.maximum-allocation", 16384); + + return new CapacitySchedulerConfiguration(schedConf); + } + protected MockRM setupSchedulerInstance() throws Exception { - CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); + CapacitySchedulerConfiguration conf = setupSchedulerConfiguration(); setupQueueConfiguration(conf); conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class); @@ -579,6 +594,14 @@ protected void validateUserAndAppLimits( autoCreatedLeafQueue.getMaxApplicationsPerUser()); } + protected void validateContainerLimits( + AutoCreatedLeafQueue autoCreatedLeafQueue) { + assertEquals(8, + autoCreatedLeafQueue.getMaximumAllocation().getVirtualCores()); + assertEquals(16384, + autoCreatedLeafQueue.getMaximumAllocation().getMemorySize()); + } + protected void validateInitialQueueEntitlement(CSQueue parentQueue, String leafQueueName, Map expectedTotalChildQueueAbsCapacityByLabel, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java index f859ca7328570..5f32e0ddff4cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java @@ -132,6 +132,7 @@ public void testAutoCreateLeafQueueCreation() throws Exception { expectedChildQueueAbsCapacity, accessibleNodeLabelsOnC); validateUserAndAppLimits(autoCreatedLeafQueue, 1000, 1000); + validateContainerLimits(autoCreatedLeafQueue); assertTrue(autoCreatedLeafQueue .getOrderingPolicy() instanceof FairOrderingPolicy); @@ -256,8 +257,7 @@ public void testConvertAutoCreateDisabledOnManagedParentQueueFails() throws Exception { CapacityScheduler newCS = new CapacityScheduler(); try { - CapacitySchedulerConfiguration newConf = - new CapacitySchedulerConfiguration(); + CapacitySchedulerConfiguration newConf = setupSchedulerConfiguration(); setupQueueConfiguration(newConf); newConf.setAutoCreateChildQueueEnabled(C, false); @@ -285,8 +285,7 @@ public void testConvertLeafQueueToParentQueueWithAutoCreate() throws Exception { CapacityScheduler newCS = new CapacityScheduler(); try { - CapacitySchedulerConfiguration newConf = - new CapacitySchedulerConfiguration(); + CapacitySchedulerConfiguration newConf = setupSchedulerConfiguration(); setupQueueConfiguration(newConf); newConf.setAutoCreatedLeafQueueConfigCapacity(A1, A1_CAPACITY / 10); newConf.setAutoCreateChildQueueEnabled(A1, true); @@ -315,8 +314,7 @@ public void testConvertFailsFromParentQueueToManagedParentQueue() throws Exception { CapacityScheduler newCS = new CapacityScheduler(); try { - CapacitySchedulerConfiguration newConf = - new CapacitySchedulerConfiguration(); + CapacitySchedulerConfiguration newConf = setupSchedulerConfiguration(); setupQueueConfiguration(newConf); newConf.setAutoCreatedLeafQueueConfigCapacity(A, A_CAPACITY / 10); newConf.setAutoCreateChildQueueEnabled(A, true); @@ -773,6 +771,7 @@ public void testReinitializeQueuesWithAutoCreatedLeafQueues() validateCapacities(user3Queue, 0.3f, 0.09f, 0.4f,0.2f); validateUserAndAppLimits(user3Queue, 900, 900); + validateContainerLimits(user3Queue); GuaranteedOrZeroCapacityOverTimePolicy autoCreatedQueueManagementPolicy = (GuaranteedOrZeroCapacityOverTimePolicy) ((ManagedParentQueue) From bd46bdf9f9244f3f3474d316255ac98717ed5719 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Mon, 10 Jun 2019 17:20:50 -0700 Subject: [PATCH 0158/1308] HDFS-14553. Make queue size of BlockReportProcessingThread configurable. Contributed by He Xiaoqiao. Signed-off-by: Wei-Chiu Chuang --- .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 ++++ .../hdfs/server/blockmanagement/BlockManager.java | 14 +++++++++----- .../src/main/resources/hdfs-default.xml | 8 ++++++++ 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index e9d0eec8d2d98..7bb30a6494292 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -269,6 +269,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { = "dfs.namenode.storageinfo.defragment.ratio"; public static final double DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_RATIO_DEFAULT = 0.75; + public static final String DFS_NAMENODE_BLOCKREPORT_QUEUE_SIZE_KEY + = "dfs.namenode.blockreport.queue.size"; + public static final int DFS_NAMENODE_BLOCKREPORT_QUEUE_SIZE_DEFAULT + = 1024; public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = "dfs.web.authentication.filter"; /* Phrased as below to avoid javac inlining as a constant, to match the behavior when this was AuthFilter.class.getName(). Note that if you change the import for AuthFilter, you diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 9cfa18098e1b4..bc2141d986aef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -317,8 +317,7 @@ public long getTotalECBlockGroups() { new Daemon(new StorageInfoDefragmenter()); /** Block report thread for handling async reports. */ - private final BlockReportProcessingThread blockReportThread = - new BlockReportProcessingThread(); + private final BlockReportProcessingThread blockReportThread; /** * Store blocks {@literal ->} datanodedescriptor(s) map of corrupt replicas. @@ -574,6 +573,11 @@ public BlockManager(final Namesystem namesystem, boolean haEnabled, bmSafeMode = new BlockManagerSafeMode(this, namesystem, haEnabled, conf); + int queueSize = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_BLOCKREPORT_QUEUE_SIZE_KEY, + DFSConfigKeys.DFS_NAMENODE_BLOCKREPORT_QUEUE_SIZE_DEFAULT); + blockReportThread = new BlockReportProcessingThread(queueSize); + LOG.info("defaultReplication = {}", defaultReplication); LOG.info("maxReplication = {}", maxReplication); LOG.info("minReplication = {}", minReplication); @@ -4966,11 +4970,11 @@ private class BlockReportProcessingThread extends Thread { private static final long MAX_LOCK_HOLD_MS = 4; private long lastFull = 0; - private final BlockingQueue queue = - new ArrayBlockingQueue(1024); + private final BlockingQueue queue; - BlockReportProcessingThread() { + BlockReportProcessingThread(int size) { super("Block report processor"); + queue = new ArrayBlockingQueue<>(size); setDaemon(true); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 8c575af90c08d..76c0660ebde51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -5337,4 +5337,12 @@ inter-DN QOP.
    + + + dfs.namenode.blockreport.queue.size + 1024 + + The queue size of BlockReportProcessingThread in BlockManager. + + From 101d5b5f865f94e4772051ea8ce4ee0f92ddedca Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Mon, 10 Jun 2019 17:55:16 -0700 Subject: [PATCH 0159/1308] HDFS-14234. Limit WebHDFS to specifc user, host, directory triples. Contributed by Clay B. --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 14 +- .../HostRestrictingAuthorizationFilter.java | 443 ++++++++++++++++++ .../datanode/web/DatanodeHttpServer.java | 246 +++++----- ...RestrictingAuthorizationFilterHandler.java | 240 ++++++++++ .../web/RestCsrfPreventionFilterHandler.java | 64 ++- .../server/datanode/web/package-info.java | 21 + .../src/main/resources/hdfs-default.xml | 7 + .../hadoop-hdfs/src/site/markdown/WebHDFS.md | 10 + ...estHostRestrictingAuthorizationFilter.java | 275 +++++++++++ ...RestrictingAuthorizationFilterHandler.java | 178 +++++++ ...stWebHdfsWithRestCsrfPreventionFilter.java | 20 +- 11 files changed, 1388 insertions(+), 130 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HostRestrictingAuthorizationFilter.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/HostRestrictingAuthorizationFilterHandler.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/package-info.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestHostRestrictingAuthorizationFilter.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestHostRestrictingAuthorizationFilterHandler.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 7bb30a6494292..f4a8def17713a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -154,7 +154,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { 600000; public static final String DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT = - "dfs.namenode.path.based.cache.block.map.allocation.percent"; + "dfs.namenode.path.based.cache.block.map.allocation.percent"; public static final float DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT = 0.25f; public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT = @@ -185,7 +185,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { // DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT to populate // needed replication queues before exiting safe mode public static final String DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY = - "dfs.namenode.replqueue.threshold-pct"; + "dfs.namenode.replqueue.threshold-pct"; public static final String DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY = "dfs.namenode.safemode.min.datanodes"; public static final int DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0; public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = @@ -786,6 +786,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT = 10; public static final String DFS_HTTP_POLICY_KEY = "dfs.http.policy"; public static final String DFS_HTTP_POLICY_DEFAULT = HttpConfig.Policy.HTTP_ONLY.name(); + public static final String DFS_DATANODE_HTTPSERVER_FILTER_HANDLERS = "dfs.datanode.httpserver.filter.handlers"; + public static final String DFS_DATANODE_HTTPSERVER_FILTER_HANDLERS_DEFAULT = "org.apache.hadoop.hdfs.server.datanode.web.RestCsrfPreventionFilterHandler"; public static final String DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY = "dfs.default.chunk.view.size"; public static final int DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT = 32*1024; public static final String DFS_DATANODE_HTTPS_ADDRESS_KEY = "dfs.datanode.https.address"; @@ -1192,7 +1194,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { // Slow io warning log threshold settings for dfsclient and datanode. public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY = - "dfs.datanode.slow.io.warning.threshold.ms"; + "dfs.datanode.slow.io.warning.threshold.ms"; public static final long DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 300; // Number of parallel threads to load multiple datanode volumes @@ -1224,15 +1226,15 @@ public class DFSConfigKeys extends CommonConfigurationKeys { // comma separated list of nntop reporting periods in minutes public static final String NNTOP_WINDOWS_MINUTES_KEY = "dfs.namenode.top.windows.minutes"; - public static final String[] NNTOP_WINDOWS_MINUTES_DEFAULT = {"1","5","25"}; + public static final String[] NNTOP_WINDOWS_MINUTES_DEFAULT = {"1", "5", "25"}; public static final String DFS_PIPELINE_ECN_ENABLED = "dfs.pipeline.ecn"; public static final boolean DFS_PIPELINE_ECN_ENABLED_DEFAULT = false; // Key Provider Cache Expiry public static final String DFS_DATANODE_BLOCK_PINNING_ENABLED = - "dfs.datanode.block-pinning.enabled"; + "dfs.datanode.block-pinning.enabled"; public static final boolean DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT = - false; + false; public static final String DFS_DATANODE_TRANSFER_SOCKET_SEND_BUFFER_SIZE_KEY = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HostRestrictingAuthorizationFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HostRestrictingAuthorizationFilter.java new file mode 100644 index 0000000000000..1a51b46e585ea --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HostRestrictingAuthorizationFilter.java @@ -0,0 +1,443 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + +import org.apache.commons.io.FilenameUtils; +import org.apache.commons.net.util.SubnetUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.security.token.Token; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.BiFunction; +import java.util.function.Predicate; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * An HTTP filter that can filter requests based on Hosts. + */ +public class HostRestrictingAuthorizationFilter implements Filter { + public static final String HDFS_CONFIG_PREFIX = "dfs.web.authentication."; + public static final String RESTRICTION_CONFIG = "host.allow.rules"; + // A Java Predicate for query string parameters on which to filter requests + public static final Predicate RESTRICTED_OPERATIONS = + qStr -> (qStr.trim().equalsIgnoreCase("op=OPEN") || + qStr.trim().equalsIgnoreCase("op=GETDELEGATIONTOKEN")); + private final Map> rulemap = + new ConcurrentHashMap<>(); + private static final Logger LOG = + LoggerFactory.getLogger(HostRestrictingAuthorizationFilter.class); + + /* + * Constructs a mapping of configuration properties to be used for filter + * initialization. The mapping includes all properties that start with the + * specified configuration prefix. Property names in the mapping are trimmed + * to remove the configuration prefix. + * + * @param conf configuration to read + * @param confPrefix configuration prefix + * @return mapping of configuration properties to be used for filter + * initialization + */ + public static Map getFilterParams(Configuration conf, + String confPrefix) { + return conf.getPropsWithPrefix(confPrefix); + } + + /* + * Check all rules for this user to see if one matches for this host/path pair + * + * @param: user - user to check rules for + * @param: host - IP address (e.g. "192.168.0.1") + * @param: path - file path with no scheme (e.g. /path/foo) + * @returns: true if a rule matches this user, host, path tuple false if an + * error occurs or no match + */ + private boolean matchRule(String user, String remoteIp, String path) { + // allow lookups for blank in the rules for user and path + user = (user != null ? user : ""); + path = (path != null ? path : ""); + + LOG.trace("Got user: {}, remoteIp: {}, path: {}", user, remoteIp, path); + + // isInRange fails for null/blank IPs, require an IP to approve + if (remoteIp == null) { + LOG.trace("Returned false due to null rempteIp"); + return false; + } + + List userRules = ((userRules = rulemap.get(user)) != null) ? + userRules : new ArrayList(); + List anyRules = ((anyRules = rulemap.get("*")) != null) ? + anyRules : new ArrayList(); + + List rules = Stream.of(userRules, anyRules) + .flatMap(l -> l.stream()).collect(Collectors.toList()); + + for (Rule rule : rules) { + SubnetUtils.SubnetInfo subnet = rule.getSubnet(); + String rulePath = rule.getPath(); + LOG.trace("Evaluating rule, subnet: {}, path: {}", + subnet != null ? subnet.getCidrSignature() : "*", rulePath); + try { + if ((subnet == null || subnet.isInRange(remoteIp)) + && FilenameUtils.directoryContains(rulePath, path)) { + LOG.debug("Found matching rule, subnet: {}, path: {}; returned true", + rule.getSubnet() != null ? subnet.getCidrSignature() : null, + rulePath); + return true; + } + } catch (IOException e) { + LOG.warn("Got IOException {}; returned false", e); + return false; + } + } + + LOG.trace("Found no rules for user"); + return false; + } + + @Override + public void destroy() { + } + + @Override + public void init(FilterConfig config) throws ServletException { + // Process dropbox rules + String dropboxRules = config.getInitParameter(RESTRICTION_CONFIG); + loadRuleMap(dropboxRules); + } + + /* + * Initializes the rule map state for the filter + * + * @param ruleString - a string of newline delineated, comma separated + * three field records + * @throws IllegalArgumentException - when a rule can not be properly parsed + * Postconditions: + *
      + *
    • The {@rulemap} hash will be populated with all parsed rules.
    • + *
    + */ + private void loadRuleMap(String ruleString) throws IllegalArgumentException { + if (ruleString == null || ruleString.equals("")) { + LOG.debug("Got no rules - will disallow anyone access"); + } else { + // value: user1,network/bits1,path_glob1|user2,network/bits2,path_glob2... + Pattern comma_split = Pattern.compile(","); + Pattern rule_split = Pattern.compile("\\||\n"); + // split all rule lines + Map> splits = rule_split.splitAsStream(ruleString) + .map(x -> comma_split.split(x, 3)) + .collect(Collectors.groupingBy(x -> x.length)); + // verify all rules have three parts + if (!splits.keySet().equals(Collections.singleton(3))) { + // instead of re-joining parts, re-materialize lines which do not split + // correctly for the exception + String bad_lines = rule_split.splitAsStream(ruleString) + .filter(x -> comma_split.split(x, 3).length != 3) + .collect(Collectors.joining("\n")); + throw new IllegalArgumentException("Bad rule definition: " + bad_lines); + } + // create a list of Rules + int user = 0; + int cidr = 1; + int path = 2; + BiFunction, CopyOnWriteArrayList, + CopyOnWriteArrayList> arrayListMerge = (v1, v2) -> { + v1.addAll(v2); + return v1; + }; + for (String[] split : splits.get(3)) { + LOG.debug("Loaded rule: user: {}, network/bits: {} path: {}", + split[user], split[cidr], split[path]); + Rule rule = (split[cidr].trim().equals("*") ? new Rule(null, + split[path]) : new Rule(new SubnetUtils(split[cidr]).getInfo(), + split[path])); + // Rule map is {"user": [rule1, rule2, ...]}, update the user's array + CopyOnWriteArrayList arrayListRule = + new CopyOnWriteArrayList() { + { + add(rule); + } + }; + rulemap.merge(split[user], arrayListRule, arrayListMerge); + } + } + } + + /* + * doFilter() is a shim to create an HttpInteraction object and pass that to + * the actual processing logic + */ + @Override + public void doFilter(ServletRequest request, ServletResponse response, + FilterChain filterChain) + throws IOException, ServletException { + final HttpServletRequest httpRequest = (HttpServletRequest) request; + HttpServletResponse httpResponse = (HttpServletResponse) response; + + handleInteraction(new ServletFilterHttpInteraction(httpRequest, + httpResponse, filterChain)); + } + + /* + * The actual processing logic of the Filter + * Uses our {@HttpInteraction} shim which can be called from a variety of + * incoming request sources + * @param interaction - An HttpInteraction object from any of our callers + */ + public void handleInteraction(HttpInteraction interaction) + throws IOException, ServletException { + final String address = interaction.getRemoteAddr(); + final String query = interaction.getQueryString(); + final String path = + interaction.getRequestURI() + .substring(WebHdfsFileSystem.PATH_PREFIX.length()); + String user = interaction.getRemoteUser(); + + LOG.trace("Got request user: {}, remoteIp: {}, query: {}, path: {}", + user, address, query, path); + boolean authenticatedQuery = + Arrays.stream(Optional.ofNullable(query).orElse("") + .trim() + .split("&")) + .anyMatch(RESTRICTED_OPERATIONS); + if (!interaction.isCommitted() && authenticatedQuery) { + // loop over all query parts + String[] queryParts = query.split("&"); + + if (user == null) { + LOG.trace("Looking for delegation token to identify user"); + for (String part : queryParts) { + if (part.trim().startsWith("delegation=")) { + Token t = new Token(); + t.decodeFromUrlString(part.split("=", 2)[1]); + ByteArrayInputStream buf = + new ByteArrayInputStream(t.getIdentifier()); + DelegationTokenIdentifier identifier = + new DelegationTokenIdentifier(); + identifier.readFields(new DataInputStream(buf)); + user = identifier.getUser().getUserName(); + LOG.trace("Updated request user: {}, remoteIp: {}, query: {}, " + + "path: {}", user, address, query, path); + } + } + } + + if (authenticatedQuery && !(matchRule("*", address, + path) || matchRule(user, address, path))) { + LOG.trace("Rejecting interaction; no rule found"); + interaction.sendError(HttpServletResponse.SC_FORBIDDEN, + "WebHDFS is configured write-only for " + user + "@" + address + + " for file: " + path); + return; + } + } + + LOG.trace("Proceeding with interaction"); + interaction.proceed(); + } + + /* + * Defines the minimal API requirements for the filter to execute its + * filtering logic. This interface exists to facilitate integration in + * components that do not run within a servlet container and therefore cannot + * rely on a servlet container to dispatch to the {@link #doFilter} method. + * Applications that do run inside a servlet container will not need to write + * code that uses this interface. Instead, they can use typical servlet + * container configuration mechanisms to insert the filter. + */ + public interface HttpInteraction { + + /* + * Returns if the request has been committed. + * + * @return boolean + */ + boolean isCommitted(); + + /* + * Returns the value of the requesting client address. + * + * @return the remote address + */ + String getRemoteAddr(); + + /* + * Returns the user ID making the request. + * + * @return the user + */ + String getRemoteUser(); + + /* + * Returns the value of the request URI. + * + * @return the request URI + */ + String getRequestURI(); + + /* + * Returns the value of the query string. + * + * @return an optional contianing the URL query string + */ + String getQueryString(); + + /* + * Returns the method. + * + * @return method + */ + String getMethod(); + + /* + * Called by the filter after it decides that the request may proceed. + * + * @throws IOException if there is an I/O error + * @throws ServletException if the implementation relies on the servlet API + * and a servlet API call has failed + */ + void proceed() throws IOException, ServletException; + + /* + * Called by the filter after it decides that the request is an + * unauthorized request and therefore must be rejected. + * + * @param code status code to send + * @param message response message + * @throws IOException if there is an I/O error + */ + void sendError(int code, String message) throws IOException; + } + + private static class Rule { + private final SubnetUtils.SubnetInfo subnet; + private final String path; + + /* + * A class for holding dropbox filter rules + * + * @param subnet - the IPv4 subnet for which this rule is valid (pass + * null for any network location) + * @param path - the HDFS path for which this rule is valid + */ + Rule(SubnetUtils.SubnetInfo subnet, String path) { + this.subnet = subnet; + this.path = path; + } + + public SubnetUtils.SubnetInfo getSubnet() { + return (subnet); + } + + public String getPath() { + return (path); + } + } + + /* + * {@link HttpInteraction} implementation for use in the servlet filter. + */ + private static final class ServletFilterHttpInteraction + implements HttpInteraction { + + private final FilterChain chain; + private final HttpServletRequest httpRequest; + private final HttpServletResponse httpResponse; + + /* + * Creates a new ServletFilterHttpInteraction. + * + * @param httpRequest request to process + * @param httpResponse response to process + * @param chain filter chain to forward to if HTTP interaction is allowed + */ + public ServletFilterHttpInteraction(HttpServletRequest httpRequest, + HttpServletResponse httpResponse, FilterChain chain) { + this.httpRequest = httpRequest; + this.httpResponse = httpResponse; + this.chain = chain; + } + + @Override + public boolean isCommitted() { + return (httpResponse.isCommitted()); + } + + @Override + public String getRemoteAddr() { + return (httpRequest.getRemoteAddr()); + } + + @Override + public String getRemoteUser() { + return (httpRequest.getRemoteUser()); + } + + @Override + public String getRequestURI() { + return (httpRequest.getRequestURI()); + } + + @Override + public String getQueryString() { + return (httpRequest.getQueryString()); + } + + @Override + public String getMethod() { + return httpRequest.getMethod(); + } + + @Override + public void proceed() throws IOException, ServletException { + chain.doFilter(httpRequest, httpResponse); + } + + @Override + public void sendError(int code, String message) throws IOException { + httpResponse.sendError(code, message); + } + + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java index 1c7850608aee1..86672b403c908 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,18 +17,10 @@ */ package org.apache.hadoop.hdfs.server.datanode.web; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_WEBHDFS_REST_CSRF_ENABLED_DEFAULT; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_WEBHDFS_REST_CSRF_ENABLED_KEY; - -import java.util.Enumeration; -import java.util.Map; -import javax.servlet.FilterConfig; -import javax.servlet.ServletContext; -import javax.servlet.ServletException; - import io.netty.bootstrap.ChannelFactory; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.ChannelPipeline; @@ -40,9 +32,6 @@ import io.netty.handler.codec.http.HttpResponseEncoder; import io.netty.handler.ssl.SslHandler; import io.netty.handler.stream.ChunkedWriteHandler; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -55,11 +44,17 @@ import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.authorize.AccessControlList; -import org.apache.hadoop.security.http.RestCsrfPreventionFilter; import org.apache.hadoop.security.ssl.SSLFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import javax.servlet.FilterConfig; +import javax.servlet.ServletContext; import java.io.Closeable; import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import java.net.BindException; import java.net.InetSocketAddress; import java.net.SocketAddress; @@ -67,6 +62,9 @@ import java.net.URI; import java.nio.channels.ServerSocketChannel; import java.security.GeneralSecurityException; +import java.util.Enumeration; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT; @@ -74,7 +72,19 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT; +/** + * Data node HTTP Server Class. + */ public class DatanodeHttpServer implements Closeable { + static final Logger LOG = LoggerFactory.getLogger(DatanodeHttpServer.class); + private static final ConcurrentHashMap, Object> HANDLER_STATE + = new ConcurrentHashMap, Object>() {}; + // HttpServer threads are only used for the web UI and basic servlets, so + // set them to the minimum possible + private static final int HTTP_SELECTOR_THREADS = 1; + private static final int HTTP_ACCEPTOR_THREADS = 1; + private static final int HTTP_MAX_THREADS = + HTTP_SELECTOR_THREADS + HTTP_ACCEPTOR_THREADS + 1; private final HttpServer2 infoServer; private final EventLoopGroup bossGroup; private final EventLoopGroup workerGroup; @@ -84,23 +94,13 @@ public class DatanodeHttpServer implements Closeable { private final ServerBootstrap httpsServer; private final Configuration conf; private final Configuration confForCreate; - private final RestCsrfPreventionFilter restCsrfPreventionFilter; private InetSocketAddress httpAddress; private InetSocketAddress httpsAddress; - static final Logger LOG = LoggerFactory.getLogger(DatanodeHttpServer.class); - - // HttpServer threads are only used for the web UI and basic servlets, so - // set them to the minimum possible - private static final int HTTP_SELECTOR_THREADS = 1; - private static final int HTTP_ACCEPTOR_THREADS = 1; - private static final int HTTP_MAX_THREADS = - HTTP_SELECTOR_THREADS + HTTP_ACCEPTOR_THREADS + 1; public DatanodeHttpServer(final Configuration conf, - final DataNode datanode, - final ServerSocketChannel externalHttpChannel) - throws IOException { - this.restCsrfPreventionFilter = createRestCsrfPreventionFilter(conf); + final DataNode datanode, + final ServerSocketChannel externalHttpChannel) + throws IOException { this.conf = conf; Configuration confForInfoServer = new Configuration(conf); @@ -136,7 +136,7 @@ public DatanodeHttpServer(final Configuration conf, this.infoServer.setAttribute("datanode", datanode); this.infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); this.infoServer.addServlet(null, "/blockScannerReport", - BlockScanner.Servlet.class); + BlockScanner.Servlet.class); DataNodeUGIProvider.init(conf); this.infoServer.start(); final InetSocketAddress jettyAddr = infoServer.getConnectorAddress(0); @@ -148,24 +148,26 @@ public DatanodeHttpServer(final Configuration conf, this.workerGroup = new NioEventLoopGroup(); this.externalHttpChannel = externalHttpChannel; HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); + final ChannelHandler[] handlers = getFilterHandlers(conf); if (policy.isHttpEnabled()) { this.httpServer = new ServerBootstrap().group(bossGroup, workerGroup) - .childHandler(new ChannelInitializer() { - @Override - protected void initChannel(SocketChannel ch) throws Exception { - ChannelPipeline p = ch.pipeline(); - p.addLast(new HttpRequestDecoder(), - new HttpResponseEncoder()); - if (restCsrfPreventionFilter != null) { - p.addLast(new RestCsrfPreventionFilterHandler( - restCsrfPreventionFilter)); - } - p.addLast( - new ChunkedWriteHandler(), - new URLDispatcher(jettyAddr, conf, confForCreate)); - } - }); + .childHandler(new ChannelInitializer() { + @Override + protected void initChannel(SocketChannel ch) throws Exception { + ChannelPipeline p = ch.pipeline(); + p.addLast(new HttpRequestDecoder(), + new HttpResponseEncoder()); + if (handlers != null) { + for (ChannelHandler c : handlers) { + p.addLast(c); + } + } + p.addLast( + new ChunkedWriteHandler(), + new URLDispatcher(jettyAddr, conf, confForCreate)); + } + }); this.httpServer.childOption( ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, @@ -188,7 +190,9 @@ public NioServerSocketChannel newChannel() { // The channel has been bounded externally via JSVC, // thus bind() becomes a no-op. @Override - protected void doBind(SocketAddress localAddress) throws Exception {} + protected void doBind(SocketAddress localAddress) + throws Exception { + } }; } }); @@ -205,30 +209,92 @@ protected void doBind(SocketAddress localAddress) throws Exception {} throw new IOException(e); } this.httpsServer = new ServerBootstrap().group(bossGroup, workerGroup) - .channel(NioServerSocketChannel.class) - .childHandler(new ChannelInitializer() { - @Override - protected void initChannel(SocketChannel ch) throws Exception { - ChannelPipeline p = ch.pipeline(); - p.addLast( - new SslHandler(sslFactory.createSSLEngine()), - new HttpRequestDecoder(), - new HttpResponseEncoder()); - if (restCsrfPreventionFilter != null) { - p.addLast(new RestCsrfPreventionFilterHandler( - restCsrfPreventionFilter)); + .channel(NioServerSocketChannel.class) + .childHandler(new ChannelInitializer() { + @Override + protected void initChannel(SocketChannel ch) throws Exception { + ChannelPipeline p = ch.pipeline(); + p.addLast( + new SslHandler(sslFactory.createSSLEngine()), + new HttpRequestDecoder(), + new HttpResponseEncoder()); + if (handlers != null) { + for (ChannelHandler c : handlers) { + p.addLast(c); + } + } + p.addLast( + new ChunkedWriteHandler(), + new URLDispatcher(jettyAddr, conf, confForCreate)); } - p.addLast( - new ChunkedWriteHandler(), - new URLDispatcher(jettyAddr, conf, confForCreate)); - } - }); + }); } else { this.httpsServer = null; this.sslFactory = null; } } + private static String getHostnameForSpnegoPrincipal(Configuration conf) { + String addr = conf.getTrimmed(DFS_DATANODE_HTTP_ADDRESS_KEY, null); + if (addr == null) { + addr = conf.getTrimmed(DFS_DATANODE_HTTPS_ADDRESS_KEY, + DFS_DATANODE_HTTPS_ADDRESS_DEFAULT); + } + InetSocketAddress inetSocker = NetUtils.createSocketAddr(addr); + return inetSocker.getHostString(); + } + + /* Get an array of ChannelHandlers specified in the conf + * @param conf configuration to read and pass + * @return array of ChannelHandlers ready to be used + * @throws NoSuchMethodException if the handler does not implement a method + * initializeState(conf) + * @throws InvocationTargetException if the handler's initalizeState method + * raises an exception + */ + private ChannelHandler[] getFilterHandlers(Configuration configuration) { + if (configuration == null) { + return null; + } + // If the hdfs-site.xml has the proper configs for filter classes, use them. + Class[] classes = + configuration.getClasses( + DFSConfigKeys.DFS_DATANODE_HTTPSERVER_FILTER_HANDLERS); + + // else use the hard coded class from the default configuration. + if (classes == null) { + classes = + configuration.getClasses( + DFSConfigKeys.DFS_DATANODE_HTTPSERVER_FILTER_HANDLERS_DEFAULT); + } + + // if we are not able to find any handlers, let us fail since running + // with Csrf will is a security hole. Let us abort the startup. + if(classes == null) { + return null; + } + + ChannelHandler[] handlers = new ChannelHandler[classes.length]; + for (int i = 0; i < classes.length; i++) { + LOG.debug("Loading filter handler {}", classes[i].getName()); + try { + Method initializeState = classes[i].getDeclaredMethod("initializeState", + Configuration.class); + Constructor constructor = + classes[i].getDeclaredConstructor(initializeState.getReturnType()); + handlers[i] = (ChannelHandler) constructor.newInstance( + HANDLER_STATE.getOrDefault(classes[i], + initializeState.invoke(null, configuration))); + } catch (NoSuchMethodException | InvocationTargetException + | IllegalAccessException | InstantiationException + | IllegalArgumentException e) { + LOG.error("Failed to initialize handler {}", classes[i].toString()); + throw new RuntimeException(e); + } + } + return (handlers); + } + public InetSocketAddress getHttpAddress() { return httpAddress; } @@ -294,55 +360,21 @@ public void close() throws IOException { } } - private static String getHostnameForSpnegoPrincipal(Configuration conf) { - String addr = conf.getTrimmed(DFS_DATANODE_HTTP_ADDRESS_KEY, null); - if (addr == null) { - addr = conf.getTrimmed(DFS_DATANODE_HTTPS_ADDRESS_KEY, - DFS_DATANODE_HTTPS_ADDRESS_DEFAULT); - } - InetSocketAddress inetSocker = NetUtils.createSocketAddr(addr); - return inetSocker.getHostString(); - } - - /** - * Creates the {@link RestCsrfPreventionFilter} for the DataNode. Since the - * DataNode HTTP server is not implemented in terms of the servlet API, it - * takes some extra effort to obtain an instance of the filter. This method - * takes care of configuration and implementing just enough of the servlet API - * and related interfaces so that the DataNode can get a fully initialized - * instance of the filter. - * - * @param conf configuration to read - * @return initialized filter, or null if CSRF protection not enabled - */ - private static RestCsrfPreventionFilter createRestCsrfPreventionFilter( - Configuration conf) { - if (!conf.getBoolean(DFS_WEBHDFS_REST_CSRF_ENABLED_KEY, - DFS_WEBHDFS_REST_CSRF_ENABLED_DEFAULT)) { - return null; - } - String restCsrfClassName = RestCsrfPreventionFilter.class.getName(); - Map restCsrfParams = RestCsrfPreventionFilter - .getFilterParams(conf, "dfs.webhdfs.rest-csrf."); - RestCsrfPreventionFilter filter = new RestCsrfPreventionFilter(); - try { - filter.init(new MapBasedFilterConfig(restCsrfClassName, restCsrfParams)); - } catch (ServletException e) { - throw new IllegalStateException( - "Failed to initialize RestCsrfPreventionFilter.", e); - } - return filter; - } - /** - * A minimal {@link FilterConfig} implementation backed by a {@link Map}. + * Since the DataNode HTTP server is not implemented in terms of the + * servlet API, it + * takes some extra effort to obtain an instance of the filter. This + * method provides + * a minimal {@link FilterConfig} implementation backed by a {@link Map}. + * Call this from + * your filter handler to initialize a servlet filter. */ - private static final class MapBasedFilterConfig implements FilterConfig { + public static final class MapBasedFilterConfig implements FilterConfig { private final String filterName; private final Map parameters; - /** + /* * Creates a new MapBasedFilterConfig. * * @param filterName filter name @@ -374,10 +406,10 @@ public ServletContext getServletContext() { throw this.notImplemented(); } - /** + /* * Creates an exception indicating that an interface method is not - * implemented. These should never be seen in practice, because it is only - * used for methods that are not called by {@link RestCsrfPreventionFilter}. + * implemented. If you are building a handler it is possible you will + * need to make this interface more extensive. * * @return exception indicating method not implemented */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/HostRestrictingAuthorizationFilterHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/HostRestrictingAuthorizationFilterHandler.java new file mode 100644 index 0000000000000..584e12bdc9b02 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/HostRestrictingAuthorizationFilterHandler.java @@ -0,0 +1,240 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode.web; + +import com.google.common.collect.ImmutableMap; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandler.Sharable; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.handler.codec.http.DefaultHttpResponse; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.QueryStringDecoder; +import io.netty.util.ReferenceCountUtil; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.common.HostRestrictingAuthorizationFilter; +import org.apache.hadoop.hdfs.server.common.HostRestrictingAuthorizationFilter.HttpInteraction; +import org.apache.hadoop.hdfs.web.resources.UserParam; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.List; +import java.util.Map; + +import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION; +import static io.netty.handler.codec.http.HttpHeaders.Values.CLOSE; +import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; + +/* + * Netty handler that integrates with the {@link + * HostRestrictingAuthorizationFilter}. If + * the filter determines that the request is allowed, then this handler forwards + * the request to the next handler in the Netty pipeline. Otherwise, this + * handler drops the request and sends an HTTP 403 response. + */ +@InterfaceAudience.Private +@Sharable +final class HostRestrictingAuthorizationFilterHandler + extends SimpleChannelInboundHandler { + + private static final Logger LOG = + LoggerFactory.getLogger(HostRestrictingAuthorizationFilterHandler.class); + private final + HostRestrictingAuthorizationFilter hostRestrictingAuthorizationFilter; + + /* + * Creates a new HostRestrictingAuthorizationFilterHandler. There will be + * a new instance created for each new Netty channel/pipeline serving a new + * request. + * + * To prevent the cost of repeated initialization of the filter, this + * constructor requires the caller to pass in a pre-built, fully initialized + * filter instance. The filter is stateless after initialization, so it can + * be shared across multiple Netty channels/pipelines. + * + * @param hostRestrictingAuthorizationFilter initialized filter + */ + public HostRestrictingAuthorizationFilterHandler( + HostRestrictingAuthorizationFilter hostRestrictingAuthorizationFilter) { + this.hostRestrictingAuthorizationFilter = + hostRestrictingAuthorizationFilter; + } + + /* + * Creates a new HostRestrictingAuthorizationFilterHandler. There will be + * a new instance created for each new Netty channel/pipeline serving a new + * request. + * To prevent the cost of repeated initialization of the filter, this + * constructor requires the caller to pass in a pre-built, fully initialized + * filter instance. The filter is stateless after initialization, so it can + * be shared across multiple Netty channels/pipelines. + */ + public HostRestrictingAuthorizationFilterHandler() { + Configuration conf = new Configuration(); + this.hostRestrictingAuthorizationFilter = initializeState(conf); + } + + /* + * Creates a {@link HostRestrictingAuthorizationFilter} for the + * {@DatanodeHttpServer}. + * This method takes care of configuration and implementing just enough of the + * servlet API and related interfaces so that the DataNode can get a fully + * initialized + * instance of the filter. + * + * @param conf configuration to read + * @return initialized filter, or null if CSRF protection not enabled + * @throws IllegalStateException if filter fails initialization + */ + public static HostRestrictingAuthorizationFilter + initializeState(Configuration conf) { + String confName = HostRestrictingAuthorizationFilter.HDFS_CONFIG_PREFIX + + HostRestrictingAuthorizationFilter.RESTRICTION_CONFIG; + String confValue = conf.get(confName); + // simply pass a blank value if we do not have one set + confValue = (confValue == null ? "" : confValue); + + Map confMap = + ImmutableMap.of(HostRestrictingAuthorizationFilter.RESTRICTION_CONFIG + , confValue); + FilterConfig fc = + new DatanodeHttpServer.MapBasedFilterConfig( + HostRestrictingAuthorizationFilter.class.getName(), confMap); + HostRestrictingAuthorizationFilter hostRestrictingAuthorizationFilter = + new HostRestrictingAuthorizationFilter(); + try { + hostRestrictingAuthorizationFilter.init(fc); + } catch (ServletException e) { + throw new IllegalStateException( + "Failed to initialize HostRestrictingAuthorizationFilter.", e); + } + return hostRestrictingAuthorizationFilter; + } + + /* + * Finish handling this pipeline by writing a response with the + * "Connection: close" header, flushing, and scheduling a close of the + * connection. + * + * @param ctx context to receive the response + * @param resp response to send + */ + private static void sendResponseAndClose(ChannelHandlerContext ctx, + DefaultHttpResponse resp) { + resp.headers().set(CONNECTION, CLOSE); + ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE); + } + + @Override + protected void channelRead0(final ChannelHandlerContext ctx, + final HttpRequest req) throws Exception { + hostRestrictingAuthorizationFilter + .handleInteraction(new NettyHttpInteraction(ctx, req)); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + LOG.error("Exception in " + this.getClass().getSimpleName(), cause); + sendResponseAndClose(ctx, + new DefaultHttpResponse(HTTP_1_1, INTERNAL_SERVER_ERROR)); + } + + /* + * {@link HttpInteraction} implementation for use in a Netty pipeline. + */ + private static final class NettyHttpInteraction implements HttpInteraction { + + private final ChannelHandlerContext ctx; + private final HttpRequest req; + private boolean committed; + + /* + * Creates a new NettyHttpInteraction. + * + * @param ctx context to receive the response + * @param req request to process + */ + public NettyHttpInteraction(ChannelHandlerContext ctx, HttpRequest req) { + this.committed = false; + this.ctx = ctx; + this.req = req; + } + + @Override + public boolean isCommitted() { + return committed; + } + + @Override + public String getRemoteAddr() { + return ((InetSocketAddress) ctx.channel().remoteAddress()). + getAddress().getHostAddress(); + } + + @Override + public String getQueryString() { + try { + return (new URI(req.getUri()).getQuery()); + } catch (URISyntaxException e) { + return null; + } + } + + @Override + public String getRequestURI() { + String uri = req.getUri(); + // Netty's getUri includes the query string, while Servlet's does not + return (uri.substring(0, uri.indexOf("?") >= 0 ? uri.indexOf("?") : + uri.length())); + } + + @Override + public String getRemoteUser() { + QueryStringDecoder queryString = new QueryStringDecoder(req.getUri()); + List p = queryString.parameters().get(UserParam.NAME); + String user = (p == null ? null : p.get(0)); + return (new UserParam(user).getValue()); + } + + @Override + public String getMethod() { + return req.getMethod().name(); + } + + @Override + public void proceed() { + ReferenceCountUtil.retain(req); + ctx.fireChannelRead(req); + } + + @Override + public void sendError(int code, String message) { + HttpResponseStatus status = new HttpResponseStatus(code, message); + sendResponseAndClose(ctx, new DefaultHttpResponse(HTTP_1_1, status)); + this.committed = true; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java index be29eaf58a538..a2c2d370439ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java @@ -21,8 +21,21 @@ import static io.netty.handler.codec.http.HttpHeaders.Values.CLOSE; import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_WEBHDFS_REST_CSRF_ENABLED_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_WEBHDFS_REST_CSRF_ENABLED_KEY; + +import java.util.Map; + +import javax.servlet.ServletException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.http.RestCsrfPreventionFilter; +import org.apache.hadoop.security.http.RestCsrfPreventionFilter.HttpInteraction; +import org.slf4j.Logger; import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandler.Sharable; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.DefaultHttpResponse; @@ -30,11 +43,6 @@ import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.util.ReferenceCountUtil; -import org.slf4j.Logger; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.security.http.RestCsrfPreventionFilter; -import org.apache.hadoop.security.http.RestCsrfPreventionFilter.HttpInteraction; /** * Netty handler that integrates with the {@link RestCsrfPreventionFilter}. If @@ -43,6 +51,7 @@ * handler drops the request and immediately sends an HTTP 400 response. */ @InterfaceAudience.Private +@Sharable final class RestCsrfPreventionFilterHandler extends SimpleChannelInboundHandler { @@ -60,16 +69,24 @@ final class RestCsrfPreventionFilterHandler * * @param restCsrfPreventionFilter initialized filter */ - public RestCsrfPreventionFilterHandler( + RestCsrfPreventionFilterHandler( RestCsrfPreventionFilter restCsrfPreventionFilter) { + if(restCsrfPreventionFilter == null) { + LOG.warn("Got null for restCsrfPreventionFilter - will not do any filtering."); + } this.restCsrfPreventionFilter = restCsrfPreventionFilter; } @Override protected void channelRead0(final ChannelHandlerContext ctx, final HttpRequest req) throws Exception { - restCsrfPreventionFilter.handleHttpInteraction(new NettyHttpInteraction( - ctx, req)); + if(restCsrfPreventionFilter != null) { + restCsrfPreventionFilter.handleHttpInteraction(new NettyHttpInteraction( + ctx, req)); + } else { + // we do not have a valid filter simply pass requests + new NettyHttpInteraction(ctx, req).proceed(); + } } @Override @@ -107,7 +124,7 @@ private static final class NettyHttpInteraction implements HttpInteraction { * @param ctx context to receive the response * @param req request to process */ - public NettyHttpInteraction(ChannelHandlerContext ctx, HttpRequest req) { + NettyHttpInteraction(ChannelHandlerContext ctx, HttpRequest req) { this.ctx = ctx; this.req = req; } @@ -134,4 +151,33 @@ public void sendError(int code, String message) { sendResponseAndClose(ctx, new DefaultHttpResponse(HTTP_1_1, status)); } } + + /** + * Creates a {@link RestCsrfPreventionFilter} for the {@DatanodeHttpServer}. + * This method takes care of configuration and implementing just enough of the + * servlet API and related interfaces so that the DataNode can get a fully + * initialized instance of the filter. + * + * @param conf configuration to read + * @return initialized filter, or null if CSRF protection not enabled + */ + public static RestCsrfPreventionFilter initializeState( + Configuration conf) { + if (!conf.getBoolean(DFS_WEBHDFS_REST_CSRF_ENABLED_KEY, + DFS_WEBHDFS_REST_CSRF_ENABLED_DEFAULT)) { + return null; + } + String restCsrfClassName = RestCsrfPreventionFilter.class.getName(); + Map restCsrfParams = RestCsrfPreventionFilter + .getFilterParams(conf, "dfs.webhdfs.rest-csrf."); + RestCsrfPreventionFilter filter = new RestCsrfPreventionFilter(); + try { + filter.init(new DatanodeHttpServer + .MapBasedFilterConfig(restCsrfClassName, restCsrfParams)); + } catch (ServletException e) { + throw new IllegalStateException( + "Failed to initialize RestCsrfPreventionFilter.", e); + } + return(filter); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/package-info.java new file mode 100644 index 0000000000000..666a90d029e09 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Data node HTTP classes. + */ +package org.apache.hadoop.hdfs.server.datanode.web; \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 76c0660ebde51..0f29aa081b190 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -319,6 +319,13 @@ + + dfs.datanode.httpserver.filter.handlers + org.apache.hadoop.hdfs.server.datanode.web.RestCsrfPreventionFilterHandler + Comma separated list of Netty servlet-style filter handlers to inject into the Datanode WebHDFS I/O path + + + dfs.default.chunk.view.size 32768 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index b8f05904cc38c..6a7d345cad97f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -210,6 +210,7 @@ The following properties control CSRF prevention. | `dfs.webhdfs.rest-csrf.custom-header` | The name of a custom header that HTTP requests must send when protection against cross-site request forgery (CSRF) is enabled for WebHDFS by setting dfs.webhdfs.rest-csrf.enabled to true. The WebHDFS client also uses this property to determine whether or not it needs to send the custom CSRF prevention header in its HTTP requests. | `X-XSRF-HEADER` | | `dfs.webhdfs.rest-csrf.methods-to-ignore` | A comma-separated list of HTTP methods that do not require HTTP requests to include a custom header when protection against cross-site request forgery (CSRF) is enabled for WebHDFS by setting dfs.webhdfs.rest-csrf.enabled to true. The WebHDFS client also uses this property to determine whether or not it needs to send the custom CSRF prevention header in its HTTP requests. | `GET,OPTIONS,HEAD,TRACE` | | `dfs.webhdfs.rest-csrf.browser-useragents-regex` | A comma-separated list of regular expressions used to match against an HTTP request's User-Agent header when protection against cross-site request forgery (CSRF) is enabled for WebHDFS by setting dfs.webhdfs.reset-csrf.enabled to true. If the incoming User-Agent matches any of these regular expressions, then the request is considered to be sent by a browser, and therefore CSRF prevention is enforced. If the request's User-Agent does not match any of these regular expressions, then the request is considered to be sent by something other than a browser, such as scripted automation. In this case, CSRF is not a potential attack vector, so the prevention is not enforced. This helps achieve backwards-compatibility with existing automation that has not been updated to send the CSRF prevention header. | `^Mozilla.*,^Opera.*` | +| `dfs.datanode.httpserver.filter.handlers` | Comma separated list of Netty servlet-style filter handlers to inject into the Datanode WebHDFS I/O path | `org.apache.hadoop.hdfs.server.datanode.web.RestCsrfPreventionFilterHandler` | The following is an example `curl` call that uses the `-H` option to include the custom header in the request. @@ -233,6 +234,15 @@ The following properties control WebHDFS retry and failover policy. | `dfs.http.client.failover.sleep.base.millis` | Specify the base amount of time in milliseconds upon which the exponentially increased sleep time between retries or failovers is calculated for WebHDFS client. | `500` | | `dfs.http.client.failover.sleep.max.millis` | Specify the upper bound of sleep time in milliseconds between retries or failovers for WebHDFS client. | `15000` | +WebHDFS Request Filtering +------------------------------------- +One may control directionality of data in the WebHDFS protocol allowing only writing data from insecure networks. To enable, one must ensure `dfs.datanode.httpserver.filter.handlers` includes `org.apache.hadoop.hdfs.server.datanode.web.HostRestrictingAuthorizationFilterHandler`. Configuration of the `HostRestrictingAuthorizationFilter` is controlled via the following properties. + +| Property | Description | Default Value | +|:---- |:---- |:---- +| `dfs.datanode.httpserver.filter.handlers` | Comma separated list of Netty servlet-style filter handlers to inject into the Datanode WebHDFS I/O path | `org.apache.hadoop.hdfs.server.datanode.web.RestCsrfPreventionFilterHandler` | +| `dfs.web.authentication.host.allow.rules` | Rules allowing users to read files in the format of _user_,_network/bits_,_path glob_ newline or `|`-separated. Use `*` for a wildcard of all _users_ or _network/bits_. | nothing - defaults to no one may read via WebHDFS | + File and Directory Operations ----------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestHostRestrictingAuthorizationFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestHostRestrictingAuthorizationFilter.java new file mode 100644 index 0000000000000..bd78a50da9700 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestHostRestrictingAuthorizationFilter.java @@ -0,0 +1,275 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.junit.Test; +import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletContext; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.util.Collections; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Map; + +/** + * Test Host Restriction Filter. + */ +public class TestHostRestrictingAuthorizationFilter { + private Logger log = + LoggerFactory.getLogger(TestHostRestrictingAuthorizationFilter.class); + + /* + * Test running in unrestricted mode + */ + @Test + public void testAcceptAll() throws Exception { + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + Mockito.when(request.getRemoteAddr()).thenReturn(null); + Mockito.when(request.getMethod()).thenReturn("GET"); + Mockito.when(request.getRequestURI()) + .thenReturn(new StringBuffer(WebHdfsFileSystem.PATH_PREFIX + "/user" + + "/ubuntu/foo").toString()); + Mockito.when(request.getQueryString()).thenReturn("op=OPEN"); + Mockito.when(request.getRemoteAddr()).thenReturn("192.168.1.2"); + + HttpServletResponse response = Mockito.mock(HttpServletResponse.class); + + FilterChain chain = new FilterChain() { + @Override + public void doFilter(ServletRequest servletRequest, + ServletResponse servletResponse) + throws IOException, ServletException { + } + }; + + Filter filter = new HostRestrictingAuthorizationFilter(); + + HashMap configs = new HashMap() { + }; + String allowRule = "*,*,/"; + log.trace("Passing configs:\n{}", allowRule); + configs.put("host.allow.rules", allowRule); + configs.put(AuthenticationFilter.AUTH_TYPE, "simple"); + FilterConfig fc = new DummyFilterConfig(configs); + + filter.init(fc); + filter.doFilter(request, response, chain); + Mockito.verify(response, Mockito.times(0)).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN), + Mockito.anyString()); + filter.destroy(); + } + + /* + * Test accepting a GET request for the file checksum when prohibited from + * doing + * a GET open call + */ + @Test + public void testAcceptGETFILECHECKSUM() throws Exception { + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + Mockito.when(request.getRemoteAddr()).thenReturn(null); + Mockito.when(request.getMethod()).thenReturn("GET"); + Mockito.when(request.getRequestURI()) + .thenReturn(new StringBuffer(WebHdfsFileSystem.PATH_PREFIX + "/user" + + "/ubuntu/").toString()); + Mockito.when(request.getQueryString()).thenReturn("op=GETFILECHECKSUM "); + Mockito.when(request.getRemoteAddr()).thenReturn("192.168.1.2"); + + HttpServletResponse response = Mockito.mock(HttpServletResponse.class); + + FilterChain chain = new FilterChain() { + @Override + public void doFilter(ServletRequest servletRequest, + ServletResponse servletResponse) + throws IOException, ServletException { + } + }; + + Filter filter = new HostRestrictingAuthorizationFilter(); + + HashMap configs = new HashMap() { + }; + configs.put(AuthenticationFilter.AUTH_TYPE, "simple"); + FilterConfig fc = new DummyFilterConfig(configs); + + filter.init(fc); + filter.doFilter(request, response, chain); + Mockito.verify(response, Mockito.times(0)).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN), + Mockito.anyString()); + filter.destroy(); + } + + /* + * Test accepting a GET request for reading a file via an open call + */ + @Test + public void testRuleAllowedGet() throws Exception { + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + Mockito.when(request.getRemoteAddr()).thenReturn(null); + Mockito.when(request.getMethod()).thenReturn("GET"); + String queryString = "op=OPEN"; + Mockito.when(request.getRequestURI()) + .thenReturn(new StringBuffer(WebHdfsFileSystem.PATH_PREFIX + "/user" + + "/ubuntu/foo?" + queryString).toString()); + Mockito.when(request.getQueryString()).thenReturn(queryString); + Mockito.when(request.getRemoteAddr()).thenReturn("192.168.1.2"); + + HttpServletResponse response = Mockito.mock(HttpServletResponse.class); + + FilterChain chain = new FilterChain() { + @Override + public void doFilter(ServletRequest servletRequest, + ServletResponse servletResponse) + throws IOException, ServletException { + } + }; + + Filter filter = new HostRestrictingAuthorizationFilter(); + + HashMap configs = new HashMap() { + }; + String allowRule = "ubuntu,127.0.0.1/32,/localbits/*|*,192.168.0.1/22," + + "/user/ubuntu/*"; + log.trace("Passing configs:\n{}", allowRule); + configs.put("host.allow.rules", allowRule); + configs.put(AuthenticationFilter.AUTH_TYPE, "simple"); + FilterConfig fc = new DummyFilterConfig(configs); + + filter.init(fc); + filter.doFilter(request, response, chain); + filter.destroy(); + } + + /* + * Test by default we deny an open call GET request + */ + @Test + public void testRejectsGETs() throws Exception { + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + Mockito.when(request.getRemoteAddr()).thenReturn(null); + Mockito.when(request.getMethod()).thenReturn("GET"); + String queryString = "bar=foo&delegationToken=dt&op=OPEN"; + Mockito.when(request.getRequestURI()) + .thenReturn(new StringBuffer(WebHdfsFileSystem.PATH_PREFIX + "/user" + + "/ubuntu/?" + queryString).toString()); + Mockito.when(request.getQueryString()).thenReturn(queryString); + Mockito.when(request.getRemoteAddr()).thenReturn("192.168.1.2"); + + HttpServletResponse response = Mockito.mock(HttpServletResponse.class); + + FilterChain chain = new FilterChain() { + @Override + public void doFilter(ServletRequest servletRequest, + ServletResponse servletResponse) + throws IOException, ServletException { + } + }; + + Filter filter = new HostRestrictingAuthorizationFilter(); + + HashMap configs = new HashMap() { + }; + configs.put(AuthenticationFilter.AUTH_TYPE, "simple"); + FilterConfig fc = new DummyFilterConfig(configs); + + filter.init(fc); + filter.doFilter(request, response, chain); + filter.destroy(); + } + + /* + * Test acceptable behavior to malformed requests + * Case: no operation (op parameter) specified + */ + @Test + public void testUnexpectedInputMissingOpParameter() throws Exception { + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + Mockito.when(request.getRemoteAddr()).thenReturn(null); + Mockito.when(request.getMethod()).thenReturn("GET"); + Mockito.when(request.getRequestURI()) + .thenReturn(new StringBuffer(WebHdfsFileSystem.PATH_PREFIX + + "/IAmARandomRequest/").toString()); + Mockito.when(request.getQueryString()).thenReturn(null); + Mockito.when(request.getRemoteAddr()).thenReturn("192.168.1.2"); + + HttpServletResponse response = Mockito.mock(HttpServletResponse.class); + + FilterChain chain = new FilterChain() { + @Override + public void doFilter(ServletRequest servletRequest, + ServletResponse servletResponse) + throws IOException, ServletException { + } + }; + + Filter filter = new HostRestrictingAuthorizationFilter(); + + HashMap configs = new HashMap() { + }; + configs.put(AuthenticationFilter.AUTH_TYPE, "simple"); + FilterConfig fc = new DummyFilterConfig(configs); + + filter.init(fc); + filter.doFilter(request, response, chain); + log.error("XXX {}", response.getStatus()); + filter.destroy(); + } + + private static class DummyFilterConfig implements FilterConfig { + final Map map; + + DummyFilterConfig(Map map) { + this.map = map; + } + + @Override + public String getFilterName() { + return "dummy"; + } + + @Override + public String getInitParameter(String arg0) { + return map.get(arg0); + } + + @Override + public Enumeration getInitParameterNames() { + return Collections.enumeration(map.keySet()); + } + + @Override + public ServletContext getServletContext() { + ServletContext context = Mockito.mock(ServletContext.class); + Mockito.when(context.getAttribute(AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE)).thenReturn(null); + return context; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestHostRestrictingAuthorizationFilterHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestHostRestrictingAuthorizationFilterHandler.java new file mode 100644 index 0000000000000..031ac0aa29d35 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestHostRestrictingAuthorizationFilterHandler.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode.web; + +import io.netty.channel.ChannelHandler; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpResponse; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.common.HostRestrictingAuthorizationFilter; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.junit.Test; + +import java.net.InetSocketAddress; +import java.net.SocketAddress; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public class TestHostRestrictingAuthorizationFilterHandler { + + final static String CONFNAME = + HostRestrictingAuthorizationFilter.HDFS_CONFIG_PREFIX + + HostRestrictingAuthorizationFilter.RESTRICTION_CONFIG; + + /* + * Test running in with no ACL rules (restrict all) + */ + @Test + public void testRejectAll() throws Exception { + EmbeddedChannel channel = new CustomEmbeddedChannel("127.0.0.1", 1006, + new HostRestrictingAuthorizationFilterHandler()); + FullHttpRequest httpRequest = + new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, + WebHdfsFileSystem.PATH_PREFIX + "/user/myName/fooFile?op=OPEN"); + // we will send back an error so ensure our write returns false + assertFalse("Should get error back from handler for rejected request", + channel.writeInbound(httpRequest)); + DefaultHttpResponse channelResponse = + (DefaultHttpResponse) channel.outboundMessages().poll(); + assertNotNull("Expected response to exist.", channelResponse); + assertEquals(HttpResponseStatus.FORBIDDEN, channelResponse.getStatus()); + assertFalse(channel.isOpen()); + } + + /* + * Test accepting multiple allowed GET requests to ensure channel can be + * reused + */ + @Test + public void testMultipleAcceptedGETsOneChannel() throws Exception { + Configuration conf = new Configuration(); + conf.set(CONFNAME, "*,*,/allowed"); + HostRestrictingAuthorizationFilter filter = + HostRestrictingAuthorizationFilterHandler.initializeState(conf); + EmbeddedChannel channel = new CustomEmbeddedChannel("127.0.0.1", 1006, + new HostRestrictingAuthorizationFilterHandler(filter)); + FullHttpRequest allowedHttpRequest = + new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, + WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_one?op=OPEN"); + FullHttpRequest allowedHttpRequest2 = + new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, + WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_two?op=OPEN"); + FullHttpRequest allowedHttpRequest3 = + new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, + WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_three?op=OPEN"); + assertTrue("Should successfully accept request", + channel.writeInbound(allowedHttpRequest)); + assertTrue("Should successfully accept request, second time", + channel.writeInbound(allowedHttpRequest2)); + assertTrue("Should successfully accept request, third time", + channel.writeInbound(allowedHttpRequest3)); + } + + /* + * Test accepting multiple allowed GET requests in different channels to a + * single filter instance + */ + @Test + public void testMultipleChannels() throws Exception { + Configuration conf = new Configuration(); + conf.set(CONFNAME, "*,*,/allowed"); + HostRestrictingAuthorizationFilter filter = + HostRestrictingAuthorizationFilterHandler.initializeState(conf); + EmbeddedChannel channel1 = new CustomEmbeddedChannel("127.0.0.1", 1006, + new HostRestrictingAuthorizationFilterHandler(filter)); + EmbeddedChannel channel2 = new CustomEmbeddedChannel("127.0.0.2", 1006, + new HostRestrictingAuthorizationFilterHandler(filter)); + EmbeddedChannel channel3 = new CustomEmbeddedChannel("127.0.0.3", 1006, + new HostRestrictingAuthorizationFilterHandler(filter)); + FullHttpRequest allowedHttpRequest = + new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, + WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_one?op=OPEN"); + FullHttpRequest allowedHttpRequest2 = + new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, + WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_two?op=OPEN"); + FullHttpRequest allowedHttpRequest3 = + new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, + WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_three?op=OPEN"); + assertTrue("Should successfully accept request", + channel1.writeInbound(allowedHttpRequest)); + assertTrue("Should successfully accept request, second time", + channel2.writeInbound(allowedHttpRequest2)); + + // verify closing one channel does not affect remaining channels + channel1.close(); + assertTrue("Should successfully accept request, third time", + channel3.writeInbound(allowedHttpRequest3)); + } + + /* + * Test accepting a GET request for the file checksum + */ + @Test + public void testAcceptGETFILECHECKSUM() throws Exception { + EmbeddedChannel channel = new CustomEmbeddedChannel("127.0.0.1", 1006, + new HostRestrictingAuthorizationFilterHandler()); + FullHttpRequest httpRequest = + new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, + WebHdfsFileSystem.PATH_PREFIX + "/user/myName/fooFile?op" + + "=GETFILECHECKSUM"); + assertTrue("Should successfully accept request", + channel.writeInbound(httpRequest)); + } + + /* + * Custom channel implementation which allows for mocking a client's remote + * address + */ + protected static class CustomEmbeddedChannel extends EmbeddedChannel { + + private InetSocketAddress socketAddress; + + /* + * A normal @{EmbeddedChannel} constructor which takes the remote client + * host and port to mock + */ + public CustomEmbeddedChannel(String host, int port, + final ChannelHandler... handlers) { + super(handlers); + socketAddress = new InetSocketAddress(host, port); + } + + @Override + protected SocketAddress remoteAddress0() { + return this.socketAddress; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithRestCsrfPreventionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithRestCsrfPreventionFilter.java index d5f4a0552022a..a1c27f52dc96f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithRestCsrfPreventionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithRestCsrfPreventionFilter.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_WEBHDFS_REST_CSRF_BROWSER_USERAGENTS_REGEX_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_WEBHDFS_REST_CSRF_ENABLED_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPSERVER_FILTER_HANDLERS; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -76,14 +77,14 @@ public TestWebHdfsWithRestCsrfPreventionFilter(boolean nnRestCsrf, @Parameters public static Iterable data() { return Arrays.asList(new Object[][] { - { false, false, false }, - { true, true, true }, - { true, true, false }, - { true, false, true }, - { true, false, false }, - { false, true, true }, - { false, true, false }, - { false, false, true }}); + {false, false, false}, + {true, true, true}, + {true, true, false}, + {true, false, true}, + {true, false, false}, + {false, true, true}, + {false, true, false}, + {false, false, true}}); } @Before @@ -97,6 +98,9 @@ public void before() throws Exception { Configuration dnConf = new Configuration(nnConf); dnConf.setBoolean(DFS_WEBHDFS_REST_CSRF_ENABLED_KEY, dnRestCsrf); + // By default the datanode loads the CSRF filter handler + dnConf.set(DFS_DATANODE_HTTPSERVER_FILTER_HANDLERS, + "org.apache.hadoop.hdfs.server.datanode.web.RestCsrfPreventionFilterHandler"); cluster.startDataNodes(dnConf, 1, true, null, null, null, null, false); cluster.waitActive(); From b417a4c854e8905ecd61e98d9bb655640a893102 Mon Sep 17 00:00:00 2001 From: Wanqiang Ji Date: Mon, 10 Jun 2019 16:27:32 +0800 Subject: [PATCH 0160/1308] MAPREDUCE-7214. Remove unused pieces related to `mapreduce.job.userlog.retain.hours` Signed-off-by: Akira Ajisaka --- .../hadoop-common/src/site/markdown/DeprecatedProperties.md | 1 - .../src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java | 2 -- .../main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java | 2 -- .../src/test/resources/job_1329348432655_0001_conf.xml | 1 - hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json | 2 -- 5 files changed, 8 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md index abeacafa27826..d693774c715fd 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md @@ -222,7 +222,6 @@ The following table lists the configuration property names that are deprecated i | mapreduce.reduce.class | mapreduce.job.reduce.class | | mapred.used.genericoptionsparser | mapreduce.client.genericoptionsparser.used | | mapred.userlog.limit.kb | mapreduce.task.userlog.limit.kb | -| mapred.userlog.retain.hours | mapreduce.job.userlog.retain.hours | | mapred.working.dir | mapreduce.job.working.dir | | mapred.work.output.dir | mapreduce.task.output.dir | | min.num.spills.for.combine | mapreduce.map.combine.minspills | diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java index fb346983c77ab..8fb6c9341aaf5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java @@ -287,8 +287,6 @@ public interface MRJobConfig { @Deprecated public static final String CACHE_SYMLINK = "mapreduce.job.cache.symlink.create"; - public static final String USER_LOG_RETAIN_HOURS = "mapreduce.job.userlog.retain.hours"; - public static final String MAPREDUCE_JOB_USER_CLASSPATH_FIRST = "mapreduce.job.user.classpath.first"; public static final String MAPREDUCE_JOB_CLASSLOADER = "mapreduce.job.classloader"; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java index dcd3b08ab0031..cf120342dd1b9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java @@ -208,8 +208,6 @@ private static void addDeprecatedKeys() { MRJobConfig.TASK_OUTPUT_DIR), new DeprecationDelta("mapred.userlog.limit.kb", MRJobConfig.TASK_USERLOG_LIMIT), - new DeprecationDelta("mapred.userlog.retain.hours", - MRJobConfig.USER_LOG_RETAIN_HOURS), new DeprecationDelta("mapred.task.profile.params", MRJobConfig.TASK_PROFILE_PARAMS), new DeprecationDelta("io.sort.spill.percent", diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml index 45930f05f8bed..9659e45ec6f55 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml @@ -229,7 +229,6 @@ mapreduce.job.ubertask.maxmaps9 yarn.nodemanager.heartbeat.interval-ms1000 dfs.namenode.secondary.http-address0.0.0.0:9868 -mapreduce.job.userlog.retain.hours24 mapreduce.task.timeout600000 mapreduce.jobhistory.loadedjobs.cache.size1 mapreduce.framework.nameyarn diff --git a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json index 7a52559f59463..8c2e43e1e53ba 100644 --- a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json +++ b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json @@ -4809,7 +4809,6 @@ "mapreduce.job.ubertask.maxmaps" : "9", "yarn.scheduler.maximum-allocation-mb" : "8192", "yarn.nodemanager.heartbeat.interval-ms" : "1000", - "mapreduce.job.userlog.retain.hours" : "24", "dfs.namenode.secondary.http-address" : "0.0.0.0:9868", "mapreduce.task.timeout" : "600000", "mapreduce.framework.name" : "yarn", @@ -9877,7 +9876,6 @@ "mapreduce.job.ubertask.maxmaps" : "9", "yarn.scheduler.maximum-allocation-mb" : "8192", "yarn.nodemanager.heartbeat.interval-ms" : "1000", - "mapreduce.job.userlog.retain.hours" : "24", "dfs.namenode.secondary.http-address" : "0.0.0.0:9868", "mapreduce.task.timeout" : "600000", "mapreduce.framework.name" : "yarn", From b05747901fee6c81cb215732d500b8c9fc1a35eb Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Tue, 11 Jun 2019 12:00:12 +0200 Subject: [PATCH 0161/1308] HDDS-1662. Missing test resources of integrataion-test project in target directory after compile Closes #933 --- hadoop-ozone/integration-test/pom.xml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index ba53e3fb43954..eb1a7fa9cf928 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -132,6 +132,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${basedir}/../../hadoop-hdds/common/src/main/resources + + ${basedir}/src/test/resources + From f7df55f4a89ed2d75d874b32209647ef4f448875 Mon Sep 17 00:00:00 2001 From: bibinchundatt Date: Tue, 11 Jun 2019 22:29:00 +0530 Subject: [PATCH 0162/1308] YARN-9602. Use logger format in Container Executor. Contributed by Abhishek Modi. --- .../server/nodemanager/ContainerExecutor.java | 14 ++--- .../nodemanager/DefaultContainerExecutor.java | 62 +++++++++---------- .../nodemanager/LinuxContainerExecutor.java | 54 ++++++++-------- 3 files changed, 65 insertions(+), 65 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 55836c575c9d4..9506509acf4fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -290,7 +290,7 @@ public int reacquireContainer(ContainerReacquisitionContext ctx) Path pidPath = getPidFilePath(containerId); if (pidPath == null) { - LOG.warn(containerId + " is not active, returning terminated error"); + LOG.warn("{} is not active, returning terminated error", containerId); return ExitCode.TERMINATED.getExitCode(); } @@ -301,7 +301,7 @@ public int reacquireContainer(ContainerReacquisitionContext ctx) throw new IOException("Unable to determine pid for " + containerId); } - LOG.info("Reacquiring " + containerId + " with pid " + pid); + LOG.info("Reacquiring {} with pid {}", containerId, pid); ContainerLivenessContext livenessContext = new ContainerLivenessContext .Builder() @@ -322,7 +322,7 @@ public int reacquireContainer(ContainerReacquisitionContext ctx) while (!file.exists() && msecLeft >= 0) { if (!isContainerActive(containerId)) { - LOG.info(containerId + " was deactivated"); + LOG.info("{} was deactivated", containerId); return ExitCode.TERMINATED.getExitCode(); } @@ -754,7 +754,7 @@ public static String[] getLocalIpAndHost(Container container) { ipAndHost[0] = address.getHostAddress(); ipAndHost[1] = address.getHostName(); } catch (UnknownHostException e) { - LOG.error("Unable to get Local hostname and ip for " + container + LOG.error("Unable to get Local hostname and ip for {}", container .getContainerId(), e); } return ipAndHost; @@ -782,7 +782,7 @@ public void deactivateContainer(ContainerId containerId) { * the Container */ public void pauseContainer(Container container) { - LOG.warn(container.getContainerId() + " doesn't support pausing."); + LOG.warn("{} doesn't support pausing.", container.getContainerId()); throw new UnsupportedOperationException(); } @@ -793,7 +793,7 @@ public void pauseContainer(Container container) { * the Container */ public void resumeContainer(Container container) { - LOG.warn(container.getContainerId() + " doesn't support resume."); + LOG.warn("{} doesn't support resume.", container.getContainerId()); throw new UnsupportedOperationException(); } @@ -835,7 +835,7 @@ public String getProcessId(ContainerId containerID) { try { pid = ProcessIdFileReader.getProcessId(pidFile); } catch (IOException e) { - LOG.error("Got exception reading pid from pid-file " + pidFile, e); + LOG.error("Got exception reading pid from pid-file {}", pidFile, e); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java index f1515289cc83a..c5fc481661bd9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java @@ -167,16 +167,15 @@ public void startLocalizer(LocalizerStartContext ctx) String tokenFn = String.format(TOKEN_FILE_NAME_FMT, locId); Path tokenDst = new Path(appStorageDir, tokenFn); copyFile(nmPrivateContainerTokensPath, tokenDst, user); - LOG.info("Copying from " + nmPrivateContainerTokensPath - + " to " + tokenDst); + LOG.info("Copying from {} to {}", nmPrivateContainerTokensPath, tokenDst); FileContext localizerFc = FileContext.getFileContext(lfs.getDefaultFileSystem(), getConf()); localizerFc.setUMask(lfs.getUMask()); localizerFc.setWorkingDirectory(appStorageDir); - LOG.info("Localizer CWD set to " + appStorageDir + " = " - + localizerFc.getWorkingDirectory()); + LOG.info("Localizer CWD set to {} = {}", appStorageDir, + localizerFc.getWorkingDirectory()); ContainerLocalizer localizer = createContainerLocalizer(user, appId, locId, tokenFn, localDirs, @@ -292,8 +291,8 @@ public int launchContainer(ContainerStartContext ctx) if (pidFile != null) { sb.writeLocalWrapperScript(launchDst, pidFile); } else { - LOG.info("Container " + containerIdStr - + " pid file not set. Returning terminated error"); + LOG.info("Container {} pid file not set. Returning terminated error", + containerIdStr); return ExitCode.TERMINATED.getExitCode(); } @@ -312,8 +311,8 @@ public int launchContainer(ContainerStartContext ctx) if (isContainerActive(containerId)) { shExec.execute(); } else { - LOG.info("Container " + containerIdStr + - " was marked as inactive. Returning terminated error"); + LOG.info("Container {} was marked as inactive. " + + "Returning terminated error", containerIdStr); return ExitCode.TERMINATED.getExitCode(); } } catch (IOException e) { @@ -321,14 +320,14 @@ public int launchContainer(ContainerStartContext ctx) return -1; } int exitCode = shExec.getExitCode(); - LOG.warn("Exit code from container " + containerId + " is : " + exitCode); + LOG.warn("Exit code from container {} is : {}", containerId, exitCode); // 143 (SIGTERM) and 137 (SIGKILL) exit codes means the container was // terminated/killed forcefully. In all other cases, log the // container-executor's output if (exitCode != ExitCode.FORCE_KILLED.getExitCode() && exitCode != ExitCode.TERMINATED.getExitCode()) { - LOG.warn("Exception from container-launch with container ID: " - + containerId + " and exit code: " + exitCode , e); + LOG.warn("Exception from container-launch with container ID: {}" + + " and exit code: {}", containerId, exitCode, e); StringBuilder builder = new StringBuilder(); builder.append("Exception from container-launch.\n") @@ -386,13 +385,13 @@ protected CommandExecutor buildCommandExecutor(String wrapperScriptPath, String[] command = getRunCommand(wrapperScriptPath, containerIdStr, user, pidFile, this.getConf(), resource); - LOG.info("launchContainer: " + Arrays.toString(command)); - return new ShellCommandExecutor( - command, - workDir, - environment, - 0L, - false); + LOG.info("launchContainer: {}", Arrays.toString(command)); + return new ShellCommandExecutor( + command, + workDir, + environment, + 0L, + false); } /** @@ -648,19 +647,19 @@ public void deleteAsUser(DeletionAsUserContext ctx) List baseDirs = ctx.getBasedirs(); if (baseDirs == null || baseDirs.size() == 0) { - LOG.info("Deleting absolute path : " + subDir); + LOG.info("Deleting absolute path : {}", subDir); if (!lfs.delete(subDir, true)) { //Maybe retry - LOG.warn("delete returned false for path: [" + subDir + "]"); + LOG.warn("delete returned false for path: [{}]", subDir); } return; } for (Path baseDir : baseDirs) { Path del = subDir == null ? baseDir : new Path(baseDir, subDir); - LOG.info("Deleting path : " + del); + LOG.info("Deleting path : {}", del); try { if (!lfs.delete(del, true)) { - LOG.warn("delete returned false for path: [" + del + "]"); + LOG.warn("delete returned false for path: [{}]", del); } } catch (FileNotFoundException e) { continue; @@ -743,7 +742,7 @@ protected Path getWorkingDir(List localDirs, String user, try { space = getDiskFreeSpace(curBase); } catch (IOException e) { - LOG.warn("Unable to get Free Space for " + curBase.toString(), e); + LOG.warn("Unable to get Free Space for {}", curBase, e); } availableOnDisk[i++] = space; totalAvailable += space; @@ -823,7 +822,7 @@ void createUserLocalDirs(List localDirs, String user) createDir(getUserCacheDir(new Path(localDir), user), userperms, true, user); } catch (IOException e) { - LOG.warn("Unable to create the user directory : " + localDir, e); + LOG.warn("Unable to create the user directory : {}", localDir, e); continue; } userDirStatus = true; @@ -850,7 +849,7 @@ void createUserLocalDirs(List localDirs, String user) */ void createUserCacheDirs(List localDirs, String user) throws IOException { - LOG.info("Initializing user " + user); + LOG.info("Initializing user {}", user); boolean appcacheDirStatus = false; boolean distributedCacheDirStatus = false; @@ -865,7 +864,7 @@ void createUserCacheDirs(List localDirs, String user) createDir(appDir, appCachePerms, true, user); appcacheDirStatus = true; } catch (IOException e) { - LOG.warn("Unable to create app cache directory : " + appDir, e); + LOG.warn("Unable to create app cache directory : {}", appDir, e); } // create $local.dir/usercache/$user/filecache final Path distDir = getFileCacheDir(localDirPath, user); @@ -873,7 +872,7 @@ void createUserCacheDirs(List localDirs, String user) createDir(distDir, fileperms, true, user); distributedCacheDirStatus = true; } catch (IOException e) { - LOG.warn("Unable to create file cache directory : " + distDir, e); + LOG.warn("Unable to create file cache directory : {}", distDir, e); } } if (!appcacheDirStatus) { @@ -911,7 +910,8 @@ void createAppDirs(List localDirs, String user, String appId) createDir(fullAppDir, appperms, true, user); initAppDirStatus = true; } catch (IOException e) { - LOG.warn("Unable to create app directory " + fullAppDir.toString(), e); + LOG.warn("Unable to create app directory {}", + fullAppDir, e); } } if (!initAppDirStatus) { @@ -942,7 +942,7 @@ void createAppLogDirs(String appId, List logDirs, String user) try { createDir(appLogDir, appLogDirPerms, true, user); } catch (IOException e) { - LOG.warn("Unable to create the app-log directory : " + appLogDir, e); + LOG.warn("Unable to create the app-log directory : {}", appLogDir, e); continue; } appLogDirStatus = true; @@ -976,8 +976,8 @@ void createContainerLogDirs(String appId, String containerId, try { createDir(containerLogDir, containerLogDirPerms, true, user); } catch (IOException e) { - LOG.warn("Unable to create the container-log directory : " - + appLogDir, e); + LOG.warn("Unable to create the container-log directory : {}", + appLogDir, e); continue; } containerLogDirStatus = true; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java index 039a51072be71..137421a3e459e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java @@ -210,8 +210,8 @@ public void setConf(Configuration conf) { YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS, YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LIMIT_USERS); if (!containerLimitUsers) { - LOG.warn(YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS + - ": impersonation without authentication enabled"); + LOG.warn("{}: impersonation without authentication enabled", + YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS); } } @@ -304,8 +304,8 @@ public void init(Context context) throws IOException { false); } catch (PrivilegedOperationException e) { int exitCode = e.getExitCode(); - LOG.warn("Exit code from container executor initialization is : " - + exitCode, e); + LOG.warn("Exit code from container executor initialization is : {}", + exitCode, e); throw new IOException("Linux container executor not configured properly" + " (error=" + exitCode + ")", e); @@ -406,8 +406,8 @@ public void startLocalizer(LocalizerStartContext ctx) } catch (PrivilegedOperationException e) { int exitCode = e.getExitCode(); - LOG.warn("Exit code from container " + locId + " startLocalizer is : " - + exitCode, e); + LOG.warn("Exit code from container {} startLocalizer is : {}", + locId, exitCode, e); throw new IOException("Application " + appId + " initialization failed" + " (exitCode=" + exitCode + ") with output: " + e.getOutput(), e); @@ -530,8 +530,8 @@ private int handleLaunchForLaunchType(ContainerStartContext ctx, numaArgs = op.getArguments(); break; default: - LOG.warn("PrivilegedOperation type unsupported in launch: " - + op.getOperationType()); + LOG.warn("PrivilegedOperation type unsupported in launch: {}", + op.getOperationType()); } } @@ -585,14 +585,14 @@ private int handleLaunchForLaunchType(ContainerStartContext ctx, private int handleExitCode(ContainerExecutionException e, Container container, ContainerId containerId) throws ConfigurationException { int exitCode = e.getExitCode(); - LOG.warn("Exit code from container " + containerId + " is : " + exitCode); + LOG.warn("Exit code from container {} is : {}", containerId, exitCode); // 143 (SIGTERM) and 137 (SIGKILL) exit codes means the container was // terminated/killed forcefully. In all other cases, log the // output if (exitCode != ContainerExecutor.ExitCode.FORCE_KILLED.getExitCode() && exitCode != ContainerExecutor.ExitCode.TERMINATED.getExitCode()) { - LOG.warn("Exception from container-launch with container ID: " - + containerId + " and exit code: " + exitCode, e); + LOG.warn("Exception from container-launch with container ID: {} " + + "and exit code: {}", containerId, exitCode, e); StringBuilder builder = new StringBuilder(); builder.append("Exception from container-launch.\n") @@ -703,7 +703,7 @@ public int reacquireContainer(ContainerReacquisitionContext ctx) resourceHandlerChain.reacquireContainer(containerId); } catch (ResourceHandlerException e) { LOG.warn("ResourceHandlerChain.reacquireContainer failed for " + - "containerId: " + containerId + " Exception: " + e); + "containerId: {} Exception: ", containerId, e); } } @@ -741,8 +741,8 @@ public boolean signalContainer(ContainerSignalContext ctx) .getValue()) { return false; } - LOG.warn("Error in signalling container " + pid + " with " + signal - + "; exit = " + retCode, e); + LOG.warn("Error in signalling container {} with {}; exit = {}", + pid, signal, retCode, e); logOutput(e.getOutput()); throw new IOException("Problem signalling container " + pid + " with " + signal + "; output: " + e.getOutput() + " and exitCode: " @@ -775,8 +775,8 @@ public boolean reapContainer(ContainerReapContext ctx) throws IOException { if (retCode != 0) { return false; } - LOG.warn("Error in reaping container " - + container.getContainerId().toString() + " exit = " + retCode, e); + LOG.warn("Error in reaping container {} exit = {}", + container.getContainerId(), retCode, e); logOutput(e.getOutput()); throw new IOException("Error in reaping container " + container.getContainerId().toString() + " exit = " + retCode, e); @@ -804,8 +804,8 @@ public IOStreamPair execContainer(ContainerExecContext ctx) if (retCode != 0) { return new IOStreamPair(null, null); } - LOG.warn("Error in executing container interactive shell" - + ctx + " exit = " + retCode, e); + LOG.warn("Error in executing container interactive shell {} exit = {}", + ctx, retCode, e); logOutput(e.getOutput()); throw new ContainerExecutionException( "Error in executing container interactive shel" + ctx.getContainer() @@ -837,12 +837,12 @@ public void deleteAsUser(DeletionAsUserContext ctx) { List pathsToDelete = new ArrayList(); if (baseDirs == null || baseDirs.size() == 0) { - LOG.info("Deleting absolute path : " + dir); + LOG.info("Deleting absolute path : {}", dir); pathsToDelete.add(dirString); } else { for (Path baseDir : baseDirs) { Path del = dir == null ? baseDir : new Path(baseDir, dir); - LOG.info("Deleting path : " + del); + LOG.info("Deleting path : {}", del); pathsToDelete.add(del.toString()); deleteAsUserOp.appendArgs(baseDir.toUri().getPath()); } @@ -857,8 +857,8 @@ public void deleteAsUser(DeletionAsUserContext ctx) { false); } catch (PrivilegedOperationException e) { int exitCode = e.getExitCode(); - LOG.error("DeleteAsUser for " + StringUtils.join(" ", pathsToDelete) - + " returned with exit code: " + exitCode, e); + LOG.error("DeleteAsUser for {} returned with exit code: {}", + StringUtils.join(" ", pathsToDelete), exitCode, e); } } @@ -894,8 +894,8 @@ protected File[] readDirAsUser(String user, Path dir) { } } } catch (PrivilegedOperationException e) { - LOG.error("ListAsUser for " + dir + " returned with exit code: " - + e.getExitCode(), e); + LOG.error("ListAsUser for {} returned with exit code: {}", + dir, e.getExitCode(), e); } return files.toArray(new File[files.size()]); @@ -971,14 +971,14 @@ public void removeDockerContainer(String containerId) { if (DockerCommandExecutor.isRemovable( DockerCommandExecutor.getContainerStatus(containerId, privOpExecutor, nmContext))) { - LOG.info("Removing Docker container : " + containerId); + LOG.info("Removing Docker container : {}", containerId); DockerRmCommand dockerRmCommand = new DockerRmCommand(containerId, ResourceHandlerModule.getCgroupsRelativeRoot()); DockerCommandExecutor.executeDockerCommand(dockerRmCommand, containerId, null, privOpExecutor, false, nmContext); } } catch (ContainerExecutionException e) { - LOG.warn("Unable to remove docker container: " + containerId); + LOG.warn("Unable to remove docker container: {}", containerId); } } @@ -1005,7 +1005,7 @@ public synchronized void updateYarnSysFS(Context ctx, String user, List localDirs = dirsHandler.getLocalDirs(); if (file.exists()) { if (!file.delete()) { - LOG.warn("Unable to delete " + sysFSPath.toString()); + LOG.warn("Unable to delete {}", sysFSPath); } } if (file.createNewFile()) { From 6d80b9bc3ff3ba8073e3faf64551b9109d2aa2ad Mon Sep 17 00:00:00 2001 From: bibinchundatt Date: Tue, 11 Jun 2019 22:49:21 +0530 Subject: [PATCH 0163/1308] YARN-9594. Fix missing break statement in ContainerScheduler#handle. Contributed by lujie. --- .../containermanager/scheduler/ContainerScheduler.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java index 854ad532bcd3c..3a6d29635b07e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java @@ -190,6 +190,7 @@ public void handle(ContainerSchedulerEvent event) { startPendingContainers(maxOppQueueLength <= 0); metrics.setQueuedContainers(queuedOpportunisticContainers.size(), queuedGuaranteedContainers.size()); + break; default: LOG.error("Unknown event arrived at ContainerScheduler: " + event.toString()); From f918e3fe6245702a38995df767da06bc8f143377 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Elek?= Date: Tue, 11 Jun 2019 19:30:22 +0200 Subject: [PATCH 0164/1308] HDDS-1669. SCM startup is failing if network-topology-default.xml is part of a jar Closes #946 --- .../hadoop/hdds/scm/net/NodeSchemaLoader.java | 89 ++++++++++--------- .../hdds/scm/net/NodeSchemaManager.java | 6 +- hadoop-ozone/integration-test/pom.xml | 11 +-- 3 files changed, 53 insertions(+), 53 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java index 3e1a7109621fc..8d7abedf2e74a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java @@ -34,8 +34,7 @@ import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; -import java.net.URISyntaxException; -import java.net.URL; +import java.io.InputStream; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -105,7 +104,14 @@ public NodeSchemaLoadResult loadSchemaFromFile(String schemaFilePath) throws IllegalArgumentException, FileNotFoundException { try { File schemaFile = new File(schemaFilePath); - if (!schemaFile.exists()) { + + if (schemaFile.exists()) { + LOG.info("Load network topology schema file " + + schemaFile.getAbsolutePath()); + try (FileInputStream inputStream = new FileInputStream(schemaFile)) { + return loadSchemaFromStream(schemaFilePath, inputStream); + } + } else { // try to load with classloader ClassLoader classloader = Thread.currentThread().getContextClassLoader(); @@ -113,55 +119,61 @@ public NodeSchemaLoadResult loadSchemaFromFile(String schemaFilePath) classloader = NodeSchemaLoader.class.getClassLoader(); } if (classloader != null) { - URL url = classloader.getResource(schemaFilePath); - if (url != null) { - schemaFile = new File(url.toURI()); + try (InputStream stream = classloader + .getResourceAsStream(schemaFilePath)) { + if (stream != null) { + LOG.info("Loading file from " + classloader + .getResources(schemaFilePath)); + return loadSchemaFromStream(schemaFilePath, stream); + } } } - } - if (!schemaFile.exists()) { - String msg = "Network topology layer schema file " + - schemaFilePath + "[" + schemaFile.getAbsolutePath() + - "] is not found."; - LOG.warn(msg); - throw new FileNotFoundException(msg); } - LOG.info("Load network topology schema file " + - schemaFile.getCanonicalPath()); - if (FilenameUtils.getExtension(schemaFilePath).toLowerCase() - .compareTo("yaml") == 0) { - return loadSchemaFromYaml(schemaFile); - } else { - return loadSchema(schemaFile); - } + String msg = "Network topology layer schema file " + + schemaFilePath + "[" + schemaFile.getAbsolutePath() + + "] is not found."; + LOG.warn(msg); + throw new FileNotFoundException(msg); + } catch (FileNotFoundException e) { throw e; - } catch (ParserConfigurationException | IOException | SAXException | - URISyntaxException e) { + } catch (ParserConfigurationException | IOException | SAXException e) { throw new IllegalArgumentException("Failed to load network topology node" - + " schema file: " + schemaFilePath + " , error:" + e.getMessage()); + + " schema file: " + schemaFilePath + " , error:" + e.getMessage(), + e); + } + } + + private NodeSchemaLoadResult loadSchemaFromStream(String schemaFilePath, + InputStream stream) + throws ParserConfigurationException, SAXException, IOException { + if (FilenameUtils.getExtension(schemaFilePath).toLowerCase() + .compareTo("yaml") == 0) { + return loadSchemaFromYaml(stream); + } else { + return loadSchema(stream); } } /** * Load network topology layer schemas from a XML configuration file. - * @param schemaFile schema file + * @param inputStream schema file as an inputStream * @return all valid node schemas defined in schema file * @throws ParserConfigurationException ParserConfigurationException happen * @throws IOException no such schema file * @throws SAXException xml file has some invalid elements * @throws IllegalArgumentException xml file content is logically invalid */ - private NodeSchemaLoadResult loadSchema(File schemaFile) throws + private NodeSchemaLoadResult loadSchema(InputStream inputStream) throws ParserConfigurationException, SAXException, IOException { - LOG.info("Loading network topology layer schema file " + schemaFile); + LOG.info("Loading network topology layer schema file"); // Read and parse the schema file. DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); dbf.setIgnoringComments(true); DocumentBuilder builder = dbf.newDocumentBuilder(); - Document doc = builder.parse(schemaFile); + Document doc = builder.parse(inputStream); Element root = doc.getDocumentElement(); if (!CONFIGURATION_TAG.equals(root.getTagName())) { @@ -200,14 +212,14 @@ private NodeSchemaLoadResult loadSchema(File schemaFile) throws /** * Load network topology layer schemas from a YAML configuration file. - * @param schemaFile schema file + * @param schemaFile as inputStream * @return all valid node schemas defined in schema file * @throws ParserConfigurationException ParserConfigurationException happen * @throws IOException no such schema file * @throws SAXException xml file has some invalid elements * @throws IllegalArgumentException xml file content is logically invalid */ - private NodeSchemaLoadResult loadSchemaFromYaml(File schemaFile) { + private NodeSchemaLoadResult loadSchemaFromYaml(InputStream schemaFile) { LOG.info("Loading network topology layer schema file {}", schemaFile); NodeSchemaLoadResult finalSchema; @@ -215,13 +227,12 @@ private NodeSchemaLoadResult loadSchemaFromYaml(File schemaFile) { Yaml yaml = new Yaml(); NodeSchema nodeTree; - try (FileInputStream fileInputStream = new FileInputStream(schemaFile)) { - nodeTree = yaml.loadAs(fileInputStream, NodeSchema.class); - } + nodeTree = yaml.loadAs(schemaFile, NodeSchema.class); + List schemaList = new ArrayList<>(); if (nodeTree.getType() != LayerType.ROOT) { throw new IllegalArgumentException("First layer is not a ROOT node." - + " schema file: " + schemaFile.getAbsolutePath()); + + " schema file."); } schemaList.add(nodeTree); if (nodeTree.getSublayer() != null) { @@ -232,11 +243,11 @@ private NodeSchemaLoadResult loadSchemaFromYaml(File schemaFile) { if (nodeTree.getType() == LayerType.LEAF_NODE && nodeTree.getSublayer() != null) { throw new IllegalArgumentException("Leaf node in the middle of path." - + " schema file: " + schemaFile.getAbsolutePath()); + + " schema file."); } if (nodeTree.getType() == LayerType.ROOT) { throw new IllegalArgumentException("Multiple root nodes are defined." - + " schema file: " + schemaFile.getAbsolutePath()); + + " schema file."); } schemaList.add(nodeTree); if (nodeTree.getSublayer() != null) { @@ -246,12 +257,10 @@ private NodeSchemaLoadResult loadSchemaFromYaml(File schemaFile) { } } finalSchema = new NodeSchemaLoadResult(schemaList, true); - } catch (RuntimeException e) { - throw e; } catch (Exception e) { throw new IllegalArgumentException("Fail to load network topology node" - + " schema file: " + schemaFile.getAbsolutePath() + " , error:" - + e.getMessage()); + + " schema file: " + schemaFile + " , error:" + + e.getMessage(), e); } return finalSchema; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java index 680c7be2d81b5..c60c2c80aa9e7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java @@ -70,9 +70,9 @@ public void init(Configuration conf) { maxLevel = allSchema.size(); } catch (Throwable e) { String msg = "Failed to load schema file:" + schemaFile - + ", error:" + e.getMessage(); - LOG.error(msg); - throw new RuntimeException(msg); + + ", error: " + e.getMessage(); + LOG.error(msg, e); + throw new RuntimeException(msg, e); } } diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index eb1a7fa9cf928..ff18fd484593b 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -127,14 +127,5 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - - - ${basedir}/../../hadoop-hdds/common/src/main/resources - - - ${basedir}/src/test/resources - - - + From 60c95e9b6a899e37ecdc8bce7bb6d9ed0dc7a6be Mon Sep 17 00:00:00 2001 From: bibinchundatt Date: Tue, 11 Jun 2019 23:10:41 +0530 Subject: [PATCH 0165/1308] YARN-9565. RMAppImpl#ranNodes not cleared on FinalTransition. Contributed by Bilwa S T. --- .../hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java | 1 + .../server/resourcemanager/rmapp/TestRMAppTransitions.java | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index ce67f2e8e1312..3f9f9c8103d5e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -1522,6 +1522,7 @@ private void completeAndCleanupApp(RMAppImpl app) { app.handler.handle( new RMNodeCleanAppEvent(nodeId, app.applicationId)); } + app.ranNodes.clear(); // Recovered apps that are completed were not added to scheduler, so no // need to remove them from scheduler. if (app.recoveredFinalState == null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index e5d7e3fb637b1..2e176cc9763e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.LogAggregationContext; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.URL; @@ -569,6 +570,7 @@ protected RMApp testCreateAppSubmittedRecovery( protected RMApp testCreateAppAccepted( ApplicationSubmissionContext submissionContext) throws IOException { RMApp application = testCreateAppSubmittedNoRecovery(submissionContext); + NodeId nodeId = NodeId.newInstance("host", 1234); // SUBMITTED => ACCEPTED event RMAppEventType.APP_ACCEPTED RMAppEvent event = new RMAppEvent(application.getApplicationId(), @@ -576,6 +578,8 @@ protected RMApp testCreateAppAccepted( application.handle(event); assertStartTimeSet(application); assertAppState(RMAppState.ACCEPTED, application); + application.handle( + new RMAppRunningOnNodeEvent(application.getApplicationId(), nodeId)); return application; } @@ -1089,6 +1093,7 @@ public void testAppFinishedFinished() throws Exception { rmDispatcher.await(); assertTimesAtFinish(application); assertAppState(RMAppState.FINISHED, application); + Assert.assertEquals(0, application.getRanNodes().size()); StringBuilder diag = application.getDiagnostics(); Assert.assertEquals("application diagnostics is not correct", "", diag.toString()); From 2263ead3657fbb7ce641dcde9b40f15113b21720 Mon Sep 17 00:00:00 2001 From: bibinchundatt Date: Tue, 11 Jun 2019 23:20:28 +0530 Subject: [PATCH 0166/1308] YARN-9557. Application fails in diskchecker when ReadWriteDiskValidator is configured. Contributed by Bilwa S T. --- .../localizer/ContainerLocalizer.java | 11 +++++-- .../localizer/TestContainerLocalizer.java | 29 +++++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java index 859f0c3219643..07ed9fa5a8c56 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java @@ -133,7 +133,7 @@ public ContainerLocalizer(FileContext lfs, String user, String appId, this.localDirs = localDirs; this.localizerId = localizerId; this.recordFactory = recordFactory; - this.conf = new YarnConfiguration(); + this.conf = initConfiguration(); this.diskValidator = DiskValidatorFactory.getInstance( YarnConfiguration.DEFAULT_DISK_VALIDATOR); this.appCacheDirContextName = String.format(APPCACHE_CTXT_FMT, appId); @@ -142,6 +142,12 @@ public ContainerLocalizer(FileContext lfs, String user, String appId, "token file name cannot be null"); } + @VisibleForTesting + @Private + Configuration initConfiguration() { + return new YarnConfiguration(); + } + @Private @VisibleForTesting public LocalizationProtocol getProxy(final InetSocketAddress nmAddr) { @@ -250,7 +256,8 @@ Callable download(Path destDirPath, LocalResource rsrc, if (rsrc.getVisibility() == LocalResourceVisibility.PRIVATE) { createParentDirs(destDirPath); } - diskValidator.checkStatus(new File(destDirPath.toUri().getRawPath())); + diskValidator + .checkStatus(new File(destDirPath.getParent().toUri().getRawPath())); return new FSDownloadWrapper(lfs, ugi, conf, destDirPath, rsrc); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java index 3777867e0ff96..016f801da73e2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java @@ -70,12 +70,14 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.URL; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -235,6 +237,33 @@ public void testMainFailure() throws Exception { } } + @Test + public void testDiskCheckFailure() throws Exception { + Configuration conf = new Configuration(); + conf.set(YarnConfiguration.DISK_VALIDATOR, "read-write"); + FileContext lfs = FileContext.getLocalFSFileContext(conf); + Path fileCacheDir = lfs.makeQualified(new Path(basedir, "filecache")); + lfs.mkdir(fileCacheDir, FsPermission.getDefault(), true); + RecordFactory recordFactory = mock(RecordFactory.class); + ContainerLocalizer localizer = new ContainerLocalizer(lfs, + UserGroupInformation.getCurrentUser().getUserName(), "application_01", + "container_01", String.format(ContainerExecutor.TOKEN_FILE_NAME_FMT, + "container_01"), new ArrayList<>(), recordFactory){ + @Override + Configuration initConfiguration() { + return conf; + } + }; + LocalResource rsrc = mock(LocalResource.class); + Path destDirPath = new Path(fileCacheDir, "11"); + try { + localizer.download(destDirPath, rsrc, + UserGroupInformation.getCurrentUser()); + } catch (DiskErrorException ex) { + fail(ex.getCause().toString()); + } + } + @Test @SuppressWarnings("unchecked") public void testLocalizerTokenIsGettingRemoved() throws Exception { From 3c9a5e7b16cb59fe48f660be3fb1fa3a140ad78a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Tue, 11 Jun 2019 20:12:05 +0200 Subject: [PATCH 0167/1308] HDDS-1635. Maintain docker entrypoint and envtoconf inside ozone project (#894) --- .../dev-support/bin/dist-layout-stitching | 1 + .../dist/src/main/dockerbin/entrypoint.sh | 149 +++++++++++++++++ .../dist/src/main/dockerbin/envtoconf.py | 117 ++++++++++++++ .../dist/src/main/dockerbin/transformation.py | 150 ++++++++++++++++++ 4 files changed, 417 insertions(+) create mode 100755 hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh create mode 100755 hadoop-ozone/dist/src/main/dockerbin/envtoconf.py create mode 100755 hadoop-ozone/dist/src/main/dockerbin/transformation.py diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching index c9f81bd99769f..6d7ffed9ab1d3 100755 --- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching +++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching @@ -96,6 +96,7 @@ run cp "${ROOT}/hadoop-hdds/common/src/main/resources/network-topology-nodegroup run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop" "bin/" run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd" "bin/" run cp "${ROOT}/hadoop-ozone/common/src/main/bin/ozone" "bin/" +run cp -r "${ROOT}/hadoop-ozone/dist/src/main/dockerbin" "bin/" run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh" "libexec/" run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd" "libexec/" diff --git a/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh b/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh new file mode 100755 index 0000000000000..f90942eeef899 --- /dev/null +++ b/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh @@ -0,0 +1,149 @@ +#!/usr/bin/env bash +## +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +if [ -n "$SLEEP_SECONDS" ]; then + echo "Sleeping for $SLEEP_SECONDS seconds" + sleep "$SLEEP_SECONDS" +fi + +# +# You can wait for an other TCP port with these settings. +# +# Example: +# +# export WAITFOR=localhost:9878 +# +# With an optional parameter, you can also set the maximum +# time of waiting with (in seconds) with WAITFOR_TIMEOUT. +# (The default is 300 seconds / 5 minutes.) +if [ -n "$WAITFOR" ]; then + echo "Waiting for the service $WAITFOR" + WAITFOR_HOST=$(printf "%s\n" "$WAITFOR"| cut -d : -f 1) + WAITFOR_PORT=$(printf "%s\n" "$WAITFOR"| cut -d : -f 2) + for i in $(seq "${WAITFOR_TIMEOUT:-300}" -1 0) ; do + set +e + nc -z "$WAITFOR_HOST" "$WAITFOR_PORT" > /dev/null 2>&1 + result=$? + set -e + if [ $result -eq 0 ] ; then + break + fi + sleep 1 + done + if [ "$i" -eq 0 ]; then + echo "Waiting for service $WAITFOR is timed out." >&2 + exit 1 + f + fi +fi + +if [ -n "$KERBEROS_ENABLED" ]; then + echo "Setting up kerberos!!" + KERBEROS_SERVER=${KERBEROS_SERVER:-krb5} + ISSUER_SERVER=${ISSUER_SERVER:-$KERBEROS_SERVER\:8081} + echo "KDC ISSUER_SERVER => $ISSUER_SERVER" + + if [ -n "$SLEEP_SECONDS" ]; then + echo "Sleeping for $(SLEEP_SECONDS) seconds" + sleep "$SLEEP_SECONDS" + fi + + if [ -z "$KEYTAB_DIR" ]; then + KEYTAB_DIR='/etc/security/keytabs' + fi + while true + do + set +e + STATUS=$(curl -s -o /dev/null -w '%{http_code}' http://"$ISSUER_SERVER"/keytab/test/test) + set -e + if [ "$STATUS" -eq 200 ]; then + echo "Got 200, KDC service ready!!" + break + else + echo "Got $STATUS :( KDC service not ready yet..." + fi + sleep 5 + done + + HOST_NAME=$(hostname -f) + export HOST_NAME + for NAME in ${KERBEROS_KEYTABS}; do + echo "Download $NAME/$HOSTNAME@EXAMPLE.COM keytab file to $KEYTAB_DIR/$NAME.keytab" + wget "http://$ISSUER_SERVER/keytab/$HOST_NAME/$NAME" -O "$KEYTAB_DIR/$NAME.keytab" + klist -kt "$KEYTAB_DIR/$NAME.keytab" + KERBEROS_ENABLED=true + done + + #Optional: let's try to adjust the krb5.conf + sudo sed -i "s/krb5/$KERBEROS_SERVER/g" "/etc/krb5.conf" || true +fi + +CONF_DESTINATION_DIR="${HADOOP_CONF_DIR:-/opt/hadoop/etc/hadoop}" + +#Try to copy the defaults +set +e +if [[ -d "/opt/ozone/etc/hadoop" ]]; then + cp /opt/hadoop/etc/hadoop/* "$CONF_DESTINATION_DIR/" > /dev/null 2>&1 +elif [[ -d "/opt/hadoop/etc/hadoop" ]]; then + cp /opt/hadoop/etc/hadoop/* "$CONF_DESTINATION_DIR/" > /dev/null 2>&1 +fi +set -e + +"$DIR"/envtoconf.py --destination "$CONF_DESTINATION_DIR" + +if [ -n "$ENSURE_SCM_INITIALIZED" ]; then + if [ ! -f "$ENSURE_SCM_INITIALIZED" ]; then + # Improve om and scm start up options + /opt/hadoop/bin/ozone scm --init || /opt/hadoop/bin/ozone scm -init + fi +fi + +if [ -n "$ENSURE_OM_INITIALIZED" ]; then + if [ ! -f "$ENSURE_OM_INITIALIZED" ]; then + # Improve om and scm start up options + /opt/hadoop/bin/ozone om --init || /opt/hadoop/bin/ozone om -createObjectStore + fi +fi + +# Supports byteman script to instrument hadoop process with byteman script +# +# +if [ -n "$BYTEMAN_SCRIPT" ] || [ -n "$BYTEMAN_SCRIPT_URL" ]; then + + export PATH=$PATH:$BYTEMAN_DIR/bin + + if [ -n "$BYTEMAN_SCRIPT_URL" ]; then + wget "$BYTEMAN_SCRIPT_URL" -O /tmp/byteman.btm + export BYTEMAN_SCRIPT=/tmp/byteman.btm + fi + + if [ ! -f "$BYTEMAN_SCRIPT" ]; then + echo "ERROR: The defined $BYTEMAN_SCRIPT does not exist!!!" + exit 255 + fi + + AGENT_STRING="-javaagent:/opt/byteman.jar=script:$BYTEMAN_SCRIPT" + export HADOOP_OPTS="$AGENT_STRING $HADOOP_OPTS" + echo "Process is instrumented with adding $AGENT_STRING to HADOOP_OPTS" +fi + +exec "$@" diff --git a/hadoop-ozone/dist/src/main/dockerbin/envtoconf.py b/hadoop-ozone/dist/src/main/dockerbin/envtoconf.py new file mode 100755 index 0000000000000..0e2c368627640 --- /dev/null +++ b/hadoop-ozone/dist/src/main/dockerbin/envtoconf.py @@ -0,0 +1,117 @@ +#!/usr/bin/python +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""convert environment variables to config""" + +import os +import re + +import argparse + +import sys +import transformation + +class Simple(object): + """Simple conversion""" + def __init__(self, args): + parser = argparse.ArgumentParser() + parser.add_argument("--destination", help="Destination directory", required=True) + self.args = parser.parse_args(args=args) + # copy the default files to file.raw in destination directory + + self.known_formats = ['xml', 'properties', 'yaml', 'yml', 'env', "sh", "cfg", 'conf'] + self.output_dir = self.args.destination + self.excluded_envs = ['HADOOP_CONF_DIR'] + self.configurables = {} + + def destination_file_path(self, name, extension): + """destination file path""" + return os.path.join(self.output_dir, "{}.{}".format(name, extension)) + + def write_env_var(self, name, extension, key, value): + """Write environment variables""" + with open(self.destination_file_path(name, extension) + ".raw", "a") as myfile: + myfile.write("{}: {}\n".format(key, value)) + + def process_envs(self): + """Process environment variables""" + for key in os.environ.keys(): + if key in self.excluded_envs: + continue + pattern = re.compile("[_\\.]") + parts = pattern.split(key) + extension = None + name = parts[0].lower() + if len(parts) > 1: + extension = parts[1].lower() + config_key = key[len(name) + len(extension) + 2:].strip() + if extension and "!" in extension: + splitted = extension.split("!") + extension = splitted[0] + fmt = splitted[1] + config_key = key[len(name) + len(extension) + len(fmt) + 3:].strip() + else: + fmt = extension + + if extension and extension in self.known_formats: + if name not in self.configurables.keys(): + with open(self.destination_file_path(name, extension) + ".raw", "w") as myfile: + myfile.write("") + self.configurables[name] = (extension, fmt) + self.write_env_var(name, extension, config_key, os.environ[key]) + else: + for configurable_name in self.configurables: + if key.lower().startswith(configurable_name.lower()): + self.write_env_var(configurable_name, + self.configurables[configurable_name], + key[len(configurable_name) + 1:], + os.environ[key]) + + def transform(self): + """transform""" + for configurable_name in self.configurables: + name = configurable_name + extension, fmt = self.configurables[name] + + destination_path = self.destination_file_path(name, extension) + + with open(destination_path + ".raw", "r") as myfile: + content = myfile.read() + transformer_func = getattr(transformation, "to_" + fmt) + content = transformer_func(content) + with open(destination_path, "w") as myfile: + myfile.write(content) + + def main(self): + """main""" + + # add the + self.process_envs() + + # copy file.ext.raw to file.ext in the destination directory, and + # transform to the right format (eg. key: value ===> XML) + self.transform() + + +def main(): + """main""" + Simple(sys.argv[1:]).main() + + +if __name__ == '__main__': + Simple(sys.argv[1:]).main() diff --git a/hadoop-ozone/dist/src/main/dockerbin/transformation.py b/hadoop-ozone/dist/src/main/dockerbin/transformation.py new file mode 100755 index 0000000000000..5e708ce2b6546 --- /dev/null +++ b/hadoop-ozone/dist/src/main/dockerbin/transformation.py @@ -0,0 +1,150 @@ +#!/usr/bin/python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""This module transform properties into different format""" +def render_yaml(yaml_root, prefix=""): + """render yaml""" + result = "" + if isinstance(yaml_root, dict): + if prefix: + result += "\n" + for key in yaml_root: + result += "{}{}: {}".format(prefix, key, render_yaml( + yaml_root[key], prefix + " ")) + elif isinstance(yaml_root, list): + result += "\n" + for item in yaml_root: + result += prefix + " - " + render_yaml(item, prefix + " ") + else: + result += "{}\n".format(yaml_root) + return result + + +def to_yaml(content): + """transform to yaml""" + props = process_properties(content) + + keys = props.keys() + yaml_props = {} + for key in keys: + parts = key.split(".") + node = yaml_props + prev_part = None + parent_node = {} + for part in parts[:-1]: + if part.isdigit(): + if isinstance(node, dict): + parent_node[prev_part] = [] + node = parent_node[prev_part] + while len(node) <= int(part): + node.append({}) + parent_node = node + node = node[int(node)] + else: + if part not in node: + node[part] = {} + parent_node = node + node = node[part] + prev_part = part + if parts[-1].isdigit(): + if isinstance(node, dict): + parent_node[prev_part] = [] + node = parent_node[prev_part] + node.append(props[key]) + else: + node[parts[-1]] = props[key] + + return render_yaml(yaml_props) + + +def to_yml(content): + """transform to yml""" + return to_yaml(content) + + +def to_properties(content): + """transform to properties""" + result = "" + props = process_properties(content) + for key, val in props.items(): + result += "{}: {}\n".format(key, val) + return result + + +def to_env(content): + """transform to environment variables""" + result = "" + props = process_properties(content) + for key, val in props: + result += "{}={}\n".format(key, val) + return result + + +def to_sh(content): + """transform to shell""" + result = "" + props = process_properties(content) + for key, val in props: + result += "export {}=\"{}\"\n".format(key, val) + return result + + +def to_cfg(content): + """transform to config""" + result = "" + props = process_properties(content) + for key, val in props: + result += "{}={}\n".format(key, val) + return result + + +def to_conf(content): + """transform to configuration""" + result = "" + props = process_properties(content) + for key, val in props: + result += "export {}={}\n".format(key, val) + return result + + +def to_xml(content): + """transform to xml""" + result = "\n" + props = process_properties(content) + for key in props: + result += "{0}{1}\n". \ + format(key, props[key]) + result += "" + return result + + +def process_properties(content, sep=': ', comment_char='#'): + """ + Read the file passed as parameter as a properties file. + """ + props = {} + for line in content.split("\n"): + sline = line.strip() + if sline and not sline.startswith(comment_char): + key_value = sline.split(sep) + key = key_value[0].strip() + value = sep.join(key_value[1:]).strip().strip('"') + props[key] = value + + return props From 96e1e4174631934e84dc72e3b4ccf21b3f29e73a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Tue, 11 Jun 2019 20:18:12 +0200 Subject: [PATCH 0168/1308] HDDS-1627. Make the version of the used hadoop-runner configurable (#886) --- hadoop-ozone/dist/pom.xml | 1 + .../dist/src/main/compose/ozone-hdfs/.env | 3 ++- .../compose/ozone-hdfs/docker-compose.yaml | 6 +++--- .../dist/src/main/compose/ozone-om-ha/.env | 3 ++- .../compose/ozone-om-ha/docker-compose.yaml | 10 +++++----- .../dist/src/main/compose/ozone-recon/.env | 3 ++- .../compose/ozone-recon/docker-compose.yaml | 8 ++++---- hadoop-ozone/dist/src/main/compose/ozone/.env | 3 ++- .../main/compose/ozone/docker-compose.yaml | 6 +++--- .../dist/src/main/compose/ozoneblockade/.env | 18 ++++++++++++++++++ .../compose/ozoneblockade/docker-compose.yaml | 8 ++++---- .../dist/src/main/compose/ozonefs/.env | 18 ++++++++++++++++++ .../main/compose/ozonefs/docker-compose.yaml | 6 +++--- .../dist/src/main/compose/ozoneperf/.env | 1 + .../compose/ozoneperf/docker-compose.yaml | 8 ++++---- .../dist/src/main/compose/ozones3/.env | 18 ++++++++++++++++++ .../main/compose/ozones3/docker-compose.yaml | 8 ++++---- .../dist/src/main/compose/ozonescripts/.env | 18 ++++++++++++++++++ .../dist/src/main/compose/ozonesecure-mr/.env | 1 + .../ozonesecure-mr/docker-compose.yaml | 8 ++++---- .../dist/src/main/compose/ozonesecure/.env | 1 + .../compose/ozonesecure/docker-compose.yaml | 8 ++++---- .../dist/src/main/compose/ozonetrace/.env | 19 +++++++++++++++++++ .../compose/ozonetrace/docker-compose.yaml | 8 ++++---- 24 files changed, 145 insertions(+), 46 deletions(-) create mode 100644 hadoop-ozone/dist/src/main/compose/ozoneblockade/.env create mode 100644 hadoop-ozone/dist/src/main/compose/ozonefs/.env create mode 100644 hadoop-ozone/dist/src/main/compose/ozones3/.env create mode 100644 hadoop-ozone/dist/src/main/compose/ozonescripts/.env create mode 100644 hadoop-ozone/dist/src/main/compose/ozonetrace/.env diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index f88c055bc2010..4ae92faf500a0 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -29,6 +29,7 @@ UTF-8 true apache/hadoop:${project.version} + jdk11 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env index 47a25e1ce8a49..72f060a3a7c95 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env @@ -14,4 +14,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -HADOOP_VERSION=3 \ No newline at end of file +HADOOP_VERSION=3 +HADOOP_RUNNER_VERSION=${docker.hadoop-runner.version} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml index ad096c349dfbe..f3bbeea8d82d2 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml @@ -37,7 +37,7 @@ services: env_file: - ./docker-config om: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -48,7 +48,7 @@ services: - ./docker-config command: ["ozone","om"] scm: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -59,7 +59,7 @@ services: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION command: ["ozone","scm"] s3g: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env index 67eed25884f9b..16b65a8b56dbb 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env @@ -14,4 +14,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -HDDS_VERSION=${hdds.version} \ No newline at end of file +HDDS_VERSION=${hdds.version} +HADOOP_RUNNER_VERSION=${docker.hadoop-runner.version} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml index 9628990d0eda6..e0d2a4dbdb683 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -27,7 +27,7 @@ services: env_file: - ./docker-config om1: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -40,7 +40,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] om2: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -53,7 +53,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] om3: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -66,7 +66,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] scm: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/.env b/hadoop-ozone/dist/src/main/compose/ozone-recon/.env index 67eed25884f9b..16b65a8b56dbb 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-recon/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/.env @@ -14,4 +14,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -HDDS_VERSION=${hdds.version} \ No newline at end of file +HDDS_VERSION=${hdds.version} +HADOOP_RUNNER_VERSION=${docker.hadoop-runner.version} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml index 7aad15b2ed75e..119db7c4dbe2b 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/hadoop-runner:jdk11 + image: apache/hadoop-runner: privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -28,7 +28,7 @@ services: env_file: - ./docker-config om: - image: apache/hadoop-runner:jdk11 + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -40,7 +40,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] scm: - image: apache/hadoop-runner:jdk11 + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -52,7 +52,7 @@ services: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION command: ["/opt/hadoop/bin/ozone","scm"] recon: - image: apache/hadoop-runner:jdk11 + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozone/.env b/hadoop-ozone/dist/src/main/compose/ozone/.env index 67eed25884f9b..16b65a8b56dbb 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone/.env @@ -14,4 +14,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -HDDS_VERSION=${hdds.version} \ No newline at end of file +HDDS_VERSION=${hdds.version} +HADOOP_RUNNER_VERSION=${docker.hadoop-runner.version} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml index 8ceda47d76487..7567b6dae2a46 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/hadoop-runner:jdk11 + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -28,7 +28,7 @@ services: env_file: - ./docker-config om: - image: apache/hadoop-runner:jdk11 + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop @@ -40,7 +40,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] scm: - image: apache/hadoop-runner:jdk11 + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} privileged: true #required by the profiler volumes: - ../..:/opt/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env b/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env new file mode 100644 index 0000000000000..16b65a8b56dbb --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HDDS_VERSION=${hdds.version} +HADOOP_RUNNER_VERSION=${docker.hadoop-runner.version} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml index 8a242555533d7..2e64779c78351 100644 --- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -26,7 +26,7 @@ services: env_file: - ./docker-config om: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -37,7 +37,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] scm: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -48,7 +48,7 @@ services: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION command: ["/opt/hadoop/bin/ozone","scm"] ozone_client: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: diff --git a/hadoop-ozone/dist/src/main/compose/ozonefs/.env b/hadoop-ozone/dist/src/main/compose/ozonefs/.env new file mode 100644 index 0000000000000..16b65a8b56dbb --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozonefs/.env @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HDDS_VERSION=${hdds.version} +HADOOP_RUNNER_VERSION=${docker.hadoop-runner.version} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-compose.yaml index 86d7aed8e94d6..453fc9c8ccc47 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -26,7 +26,7 @@ services: env_file: - ./docker-config om: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} hostname: om volumes: - ../..:/opt/hadoop @@ -38,7 +38,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] scm: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/.env b/hadoop-ozone/dist/src/main/compose/ozoneperf/.env index cac418ae59ee5..16b65a8b56dbb 100644 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/.env +++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/.env @@ -15,3 +15,4 @@ # limitations under the License. HDDS_VERSION=${hdds.version} +HADOOP_RUNNER_VERSION=${docker.hadoop-runner.version} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml index 768ec4ba1baf1..0dda92bf53203 100644 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -26,7 +26,7 @@ services: env_file: - ./docker-config om: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -37,7 +37,7 @@ services: - ./docker-config command: ["ozone","om"] scm: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -55,7 +55,7 @@ services: ports: - 9090:9090 freon: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop environment: diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/.env b/hadoop-ozone/dist/src/main/compose/ozones3/.env new file mode 100644 index 0000000000000..16b65a8b56dbb --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozones3/.env @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HDDS_VERSION=${hdds.version} +HADOOP_RUNNER_VERSION=${docker.hadoop-runner.version} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml index c18aba346b9f9..aa3f65b2d0b49 100644 --- a/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml @@ -17,7 +17,7 @@ version: "3" services: datanode: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -26,7 +26,7 @@ services: env_file: - ./docker-config om: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -37,7 +37,7 @@ services: - ./docker-config command: ["ozone","om"] scm: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -48,7 +48,7 @@ services: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION command: ["ozone","scm"] s3g: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.env b/hadoop-ozone/dist/src/main/compose/ozonescripts/.env new file mode 100644 index 0000000000000..16b65a8b56dbb --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/.env @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HDDS_VERSION=${hdds.version} +HADOOP_RUNNER_VERSION=${docker.hadoop-runner.version} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env index d634dca5af02b..8eabf5882d274 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env @@ -16,3 +16,4 @@ HDDS_VERSION=${hdds.version} HADOOP_VERSION=3 +HADOOP_RUNNER_VERSION=${docker.hadoop-runner.version} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml index 38ed7d67e0336..917b1708e6d3b 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml @@ -33,7 +33,7 @@ services: - ./docker-config command: ["hadoop", "kms"] datanode: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -42,7 +42,7 @@ services: env_file: - docker-config om: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} hostname: om volumes: - ../..:/opt/hadoop @@ -54,7 +54,7 @@ services: - docker-config command: ["/opt/hadoop/bin/ozone","om"] s3g: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} hostname: s3g volumes: - ../..:/opt/hadoop @@ -64,7 +64,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","s3g"] scm: - image: apache/hadoop-runner:latest + image: apache/hadoop-runner:latest:${HADOOP_RUNNER_VERSION} hostname: scm volumes: - ../..:/opt/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env index d634dca5af02b..8eabf5882d274 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env @@ -16,3 +16,4 @@ HDDS_VERSION=${hdds.version} HADOOP_VERSION=3 +HADOOP_RUNNER_VERSION=${docker.hadoop-runner.version} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml index 65cf15b4aad65..777fa8015facf 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml @@ -35,7 +35,7 @@ services: command: ["hadoop", "kms"] datanode: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -44,7 +44,7 @@ services: env_file: - docker-config om: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} hostname: om volumes: - ../..:/opt/hadoop @@ -56,7 +56,7 @@ services: - docker-config command: ["/opt/hadoop/bin/ozone","om"] s3g: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} hostname: s3g volumes: - ../..:/opt/hadoop @@ -66,7 +66,7 @@ services: - ./docker-config command: ["/opt/hadoop/bin/ozone","s3g"] scm: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} hostname: scm volumes: - ../..:/opt/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozonetrace/.env b/hadoop-ozone/dist/src/main/compose/ozonetrace/.env new file mode 100644 index 0000000000000..8eabf5882d274 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozonetrace/.env @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HDDS_VERSION=${hdds.version} +HADOOP_VERSION=3 +HADOOP_RUNNER_VERSION=${docker.hadoop-runner.version} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonetrace/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonetrace/docker-compose.yaml index c676e57ec300a..d910d8202e482 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonetrace/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonetrace/docker-compose.yaml @@ -23,7 +23,7 @@ services: ports: - 16686:16686 datanode: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -32,7 +32,7 @@ services: env_file: - ./docker-config om: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -43,7 +43,7 @@ services: - ./docker-config command: ["ozone","om"] scm: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: @@ -54,7 +54,7 @@ services: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION command: ["ozone","scm"] s3g: - image: apache/hadoop-runner + image: apache/hadoop-runner:${HADOOP_RUNNER_VERSION} volumes: - ../..:/opt/hadoop ports: From e997f2a34a170dfcd1023b82d1ced85c46b0f1f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Tue, 11 Jun 2019 20:20:21 +0200 Subject: [PATCH 0169/1308] HDDS-1659. Define the process to add proposal/design docs to the Ozone subproject (#922) * HDDS-1659. Define the process to add proposal/design docs to the Ozone subproject * Remove Site improvements to display proposals --- .../design/ozone-enhancement-proposals.md | 97 +++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md diff --git a/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md b/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md new file mode 100644 index 0000000000000..78b92dbf9d09a --- /dev/null +++ b/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md @@ -0,0 +1,97 @@ +--- +title: Ozone Enhancement Proposals +summary: Definition of the process to share new technical proposals with the Ozone community. +date: 2019-06-07 +jira: HDDS-1659 +status: current +author: Anu Enginner, Marton Elek +--- + +## Problem statement + +Some of the biggers features requires well defined plans before the implementation. Until now it was managed by uploading PDF design docs to selected JIRA. There are multiple problems with the current practice. + + 1. There is no easy way to find existing up-to-date and outdated design docs. + 2. Design docs usually have better description of the problem that the user docs + 3. We need better tools to discuss the design docs in the development phase of the doc + +We propose to follow the same process what we have now, but instead of uploading a PDF to the JIRA, create a PR to merge the proposal document to the documentation project. + +## Non-goals + + * Modify the existing workflow or approval process + * Migrate existing documents + * Make it harder to create design docs (it should be easy to support the creation of proposals for any kind of tasks) + +## Proposed solution + + * Open a dedicated Jira (`HDDS-*` but with specific component) + * Use standard name prefix in the jira (easy to filter on the mailing list) `[OEP] + * Create a PR to merge the design doc (markdown) to `hadoop-hdds/docs/content/proposal` (will be part of the docs) + * Discuss it as before (lazy consesus, except if somebody calls for a real vote) + * Design docs can be updated according to the changes during the implementation + +## Document template + +This the proposed template to document any proposal. It's recommended but not required the use exactly the some structure. Some proposal may require different structure, but we need the following information. + +1. Summary + +> Give a one sentence summary, like the jira title. It will be displayed on the documentation page. Should be enough to understand + +2. Problem statement (Motivation / Abstract) + +> What is the problem and how would you solve it? Think about an abstract of a paper: one paragraph overview. Why will the world better with this change? + +3. Non-goals + + > Very important to define what is outside of the scope of this proposal + +4. Technical Description (Architecture and implementation details) + + > Explain the problem in more details. How can it be reproduced? What is the current solution? What is the limitation of the current solution? + + > How the new proposed solution would solve the problem? Architectural design. + + > Implementation details. What should be changed in the code. Is it a huge change? Do we need to change wire protocol? Backward compatibility? + +5. Alternatives + + > What are the other alternatives you considered and why do yoy prefer the proposed solution The goal of this section is to help people understand why this is the best solution now, and also to prevent churn in the future when old alternatives are reconsidered. + +Note: In some cases 4/5 can be combined. For example if you have multiple proposals, the first version may include multiple solutions. At the end ot the discussion we can move the alternatives to 5. and explain why the community is decided to use the selected option. + +6. Plan + + > Planning to implement the feature. Estimated size of the work? Do we need feature branch? Any migration plan, dependency? If it's not a big new feature it can be one sentence or optional. + +7. References + +## Workflows form other projects + +There are similar process in other open source projects. This document and the template is inspired by the following projects: + + * [Apache Kafka Improvement Proposals](https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals) + * [Apache Spark Project Improvement Proposals](https://spark.apache.org/improvement-proposals.html) + * [Kubernetes Enhancement Proposals](https://github.com/kubernetes/enhancements/tree/master/keps) + +Short summary if the porcesses: + +__Kafka__ process: + + * Create wiki page + * Start discussion on mail thread + * Vote on mail thread + +__Spark__ process: + + * Create JIRA (dedicated label) + * Discuss on the jira page + * Vote on dev list + +*Kubernetes*: + + * Deditaced git repository + * KEPs are committed to the repo + * Well defined approval process managed by SIGs (KEPs are assigned to SIGs) + From 5740eea0818f2ba334b99eed0f085300e6b16905 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Tue, 11 Jun 2019 11:48:35 -0700 Subject: [PATCH 0170/1308] HDFS-14513. FSImage which is saving should be clean while NameNode shutdown. Contributed by He Xiaoqiao. --- .../hadoop/hdfs/server/namenode/FSImage.java | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index f8dff1a0db362..cfba091976eb5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -37,6 +37,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.hadoop.util.ShutdownHookManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -88,6 +89,11 @@ public class FSImage implements Closeable { public static final Logger LOG = LoggerFactory.getLogger(FSImage.class.getName()); + /** + * Priority of the FSImageSaver shutdown hook: {@value}. + */ + public static final int SHUTDOWN_HOOK_PRIORITY = 10; + protected FSEditLog editLog = null; private boolean isUpgradeFinalized = false; @@ -1037,6 +1043,18 @@ public FSImageSaver(SaveNamespaceContext context, StorageDirectory sd, @Override public void run() { + // Deletes checkpoint file in every storage directory when shutdown. + Runnable cancelCheckpointFinalizer = () -> { + try { + deleteCancelledCheckpoint(context.getTxId()); + LOG.info("FSImageSaver clean checkpoint: txid={} when meet " + + "shutdown.", context.getTxId()); + } catch (IOException e) { + LOG.error("FSImageSaver cancel checkpoint threw an exception:", e); + } + }; + ShutdownHookManager.get().addShutdownHook(cancelCheckpointFinalizer, + SHUTDOWN_HOOK_PRIORITY); try { saveFSImage(context, sd, nnf); } catch (SaveNamespaceCancelledException snce) { @@ -1046,6 +1064,13 @@ public void run() { } catch (Throwable t) { LOG.error("Unable to save image for " + sd.getRoot(), t); context.reportErrorOnStorageDirectory(sd); + try { + deleteCancelledCheckpoint(context.getTxId()); + LOG.info("FSImageSaver clean checkpoint: txid={} when meet " + + "Throwable.", context.getTxId()); + } catch (IOException e) { + LOG.error("FSImageSaver cancel checkpoint threw an exception:", e); + } } } From 4fecc2a95e2bd7a4f5ba0b930f1bd6be7227d1b5 Mon Sep 17 00:00:00 2001 From: Siyao Meng Date: Tue, 11 Jun 2019 15:04:18 -0700 Subject: [PATCH 0171/1308] HADOOP-16263. Update BUILDING.txt with macOS native build instructions. Contributed by Siyao Meng. Signed-off-by: Wei-Chiu Chuang --- BUILDING.txt | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/BUILDING.txt b/BUILDING.txt index 8c57a1d8e2d6e..640ee069e56f1 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -380,6 +380,50 @@ export MAVEN_OPTS="-Xms256m -Xmx1536m" ---------------------------------------------------------------------------------- +Building on macOS (without Docker) + +---------------------------------------------------------------------------------- +Installing required dependencies for clean install of macOS 10.14: + +* Install Xcode Command Line Tools + $ xcode-select --install +* Install Homebrew + $ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" +* Install OpenJDK 8 + $ brew tap AdoptOpenJDK/openjdk + $ brew cask install adoptopenjdk8 +* Install maven and tools + $ brew install maven autoconf automake cmake wget +* Install native libraries, only openssl is required to compile native code, +you may optionally install zlib, lz4, etc. + $ brew install openssl +* Protocol Buffers 2.5.0 (required), since 2.5.0 is no longer in Homebrew, +we need to compile it from source + $ wget https://github.com/protocolbuffers/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz + $ tar zxvf protobuf-2.5.0.tar.gz + $ cd protobuf-2.5.0 + $ ./configure + $ make + $ make check + $ make install + $ protoc --version + +Note that building Hadoop 3.1.1/3.1.2/3.2.0 native code from source is broken +on macOS. For 3.1.1/3.1.2, you need to manually backport YARN-8622. For 3.2.0, +you need to backport both YARN-8622 and YARN-9487 in order to build native code. + +---------------------------------------------------------------------------------- +Building command example: + +* Create binary distribution with native code but without documentation: + $ mvn package -Pdist,native -DskipTests -Dmaven.javadoc.skip \ + -Dopenssl.prefix=/usr/local/opt/openssl + +Note that the command above manually specified the openssl library and include +path. This is necessary at least for Homebrewed OpenSSL. + +---------------------------------------------------------------------------------- + Building on Windows ---------------------------------------------------------------------------------- From 4ea6c2f457496461afc63f38ef4cef3ab0efce49 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Tue, 11 Jun 2019 18:41:08 -0400 Subject: [PATCH 0172/1308] HADOOP-16354. Enable AuthFilter as default for WebHDFS. Contributed by Prabhu Joseph --- .../server/ProxyUserAuthenticationFilter.java | 88 +++++++++++- .../TestProxyUserAuthenticationFilter.java | 2 +- .../java/org/apache/hadoop/hdfs/DFSUtil.java | 27 ++++ .../hadoop/hdfs/server/common/JspHelper.java | 2 +- .../server/namenode/NameNodeHttpServer.java | 50 ------- .../apache/hadoop/hdfs/web/AuthFilter.java | 115 +--------------- .../hdfs/web/AuthFilterInitializer.java | 69 ++++++++++ .../hadoop/hdfs/web/TestAuthFilter.java | 125 ++++++------------ 8 files changed, 231 insertions(+), 247 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilterInitializer.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java index 42902b31601fd..bd04efeed2202 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java @@ -18,12 +18,18 @@ import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.HttpExceptionUtils; +import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.security.Principal; +import java.util.ArrayList; import java.util.Enumeration; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; import javax.servlet.ServletException; @@ -41,7 +47,7 @@ public class ProxyUserAuthenticationFilter extends AuthenticationFilter { private static final Logger LOG = LoggerFactory.getLogger( ProxyUserAuthenticationFilter.class); - private static final String DO_AS = "doAs"; + private static final String DO_AS = "doas"; public static final String PROXYUSER_PREFIX = "proxyuser"; @Override @@ -54,8 +60,9 @@ public void init(FilterConfig filterConfig) throws ServletException { @Override protected void doFilter(FilterChain filterChain, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { + final HttpServletRequest lowerCaseRequest = toLowerCase(request); + String doAsUser = lowerCaseRequest.getParameter(DO_AS); - String doAsUser = request.getParameter(DO_AS); if (doAsUser != null && !doAsUser.equals(request.getRemoteUser())) { LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ", doAsUser, request.getRemoteUser(), request.getRemoteAddr()); @@ -111,5 +118,82 @@ protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) return conf; } + static boolean containsUpperCase(final Iterable strings) { + for(String s : strings) { + for(int i = 0; i < s.length(); i++) { + if (Character.isUpperCase(s.charAt(i))) { + return true; + } + } + } + return false; + } + + public static HttpServletRequest toLowerCase( + final HttpServletRequest request) { + @SuppressWarnings("unchecked") + final Map original = (Map) + request.getParameterMap(); + if (!containsUpperCase(original.keySet())) { + return request; + } + + final Map> m = new HashMap>(); + for (Map.Entry entry : original.entrySet()) { + final String key = StringUtils.toLowerCase(entry.getKey()); + List strings = m.get(key); + if (strings == null) { + strings = new ArrayList(); + m.put(key, strings); + } + for (String v : entry.getValue()) { + strings.add(v); + } + } + + return new HttpServletRequestWrapper(request) { + private Map parameters = null; + + @Override + public Map getParameterMap() { + if (parameters == null) { + parameters = new HashMap(); + for (Map.Entry> entry : m.entrySet()) { + final List a = entry.getValue(); + parameters.put(entry.getKey(), a.toArray(new String[a.size()])); + } + } + return parameters; + } + + @Override + public String getParameter(String name) { + final List a = m.get(name); + return a == null ? null : a.get(0); + } + + @Override + public String[] getParameterValues(String name) { + return getParameterMap().get(name); + } + + @Override + public Enumeration getParameterNames() { + final Iterator i = m.keySet().iterator(); + return new Enumeration() { + @Override + public boolean hasMoreElements() { + return i.hasNext(); + } + + @Override + public String nextElement() { + return i.next(); + } + }; + } + }; + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authentication/server/TestProxyUserAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authentication/server/TestProxyUserAuthenticationFilter.java index 019ab798c64cb..16c0e1eb112ac 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authentication/server/TestProxyUserAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authentication/server/TestProxyUserAuthenticationFilter.java @@ -105,7 +105,7 @@ public void doFilter(ServletRequest servletRequest, HttpServletRequest request = Mockito.mock(HttpServletRequest.class); Mockito.when(request.getRemoteUser()).thenReturn("knox"); - Mockito.when(request.getParameter("doAs")).thenReturn("testuser"); + Mockito.when(request.getParameter("doas")).thenReturn("testuser"); Mockito.when(request.getRemoteAddr()).thenReturn("127.0.0.1"); Mockito.when(request.getUserPrincipal()).thenReturn(new Principal() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index aa643c84927f5..3cdf66d4f318a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -53,6 +53,7 @@ import java.util.Comparator; import java.util.Date; import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -63,6 +64,7 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.cli.PosixParser; +import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -78,13 +80,16 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.web.AuthFilterInitializer; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ToolRunner; @@ -1609,6 +1614,28 @@ public static HttpServer2.Builder httpServerTemplateForNNAndJN( String spnegoKeytabFileKey) throws IOException { HttpConfig.Policy policy = getHttpPolicy(conf); + String filterInitializerConfKey = "hadoop.http.filter.initializers"; + String initializers = conf.get(filterInitializerConfKey, ""); + + String[] parts = initializers.split(","); + Set target = new LinkedHashSet(); + for (String filterInitializer : parts) { + filterInitializer = filterInitializer.trim(); + if (filterInitializer.equals( + AuthenticationFilterInitializer.class.getName()) || + filterInitializer.equals( + ProxyUserAuthenticationFilterInitializer.class.getName()) || + filterInitializer.isEmpty()) { + continue; + } + target.add(filterInitializer); + } + target.add(AuthFilterInitializer.class.getName()); + initializers = StringUtils.join(target, ","); + conf.set(filterInitializerConfKey, initializers); + + LOG.info("Filter initializers set : " + initializers); + HttpServer2.Builder builder = new HttpServer2.Builder().setName(name) .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))) .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index 2c65c3fe2cecf..2f249655eed8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -139,7 +139,7 @@ public static UserGroupInformation getUGI(ServletContext context, // filter ugi.setAuthenticationMethod(secureAuthMethod); } - if (doAsUserFromQuery != null) { + if (doAsUserFromQuery != null && !doAsUserFromQuery.equals(remoteUser)) { // create and attempt to authorize a proxy user ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi); ProxyUsers.authorize(ugi, getRemoteAddr(request)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index e8874d31a442f..478bdd3b8a008 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -22,10 +22,7 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.util.HashMap; -import java.util.Iterator; import java.util.Map; -import java.util.Map.Entry; import javax.servlet.ServletContext; @@ -41,7 +38,6 @@ import org.apache.hadoop.hdfs.server.common.TokenVerifier; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; -import org.apache.hadoop.hdfs.web.AuthFilter; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.resources.AclPermissionParam; import org.apache.hadoop.hdfs.web.resources.Param; @@ -49,8 +45,6 @@ import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.http.RestCsrfPreventionFilter; /** @@ -183,50 +177,6 @@ void start() throws IOException { NetUtils.getHostPortString(httpsAddress)); } } - - private static Map getAuthFilterParams(Configuration conf, - String hostname, String httpKeytab) throws IOException { - Map params = new HashMap(); - // Select configs beginning with 'dfs.web.authentication.' - Iterator> iterator = conf.iterator(); - while (iterator.hasNext()) { - Entry kvPair = iterator.next(); - if (kvPair.getKey().startsWith(AuthFilter.CONF_PREFIX)) { - params.put(kvPair.getKey(), kvPair.getValue()); - } - } - String principalInConf = conf - .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY); - if (principalInConf != null && !principalInConf.isEmpty()) { - params - .put( - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, - SecurityUtil.getServerPrincipal(principalInConf, hostname)); - } else if (UserGroupInformation.isSecurityEnabled()) { - HttpServer2.LOG.error( - "WebHDFS and security are enabled, but configuration property '" + - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY + - "' is not set."); - } - if (httpKeytab != null && !httpKeytab.isEmpty()) { - params.put( - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, - httpKeytab); - } else if (UserGroupInformation.isSecurityEnabled()) { - HttpServer2.LOG.error( - "WebHDFS and security are enabled, but configuration property '" + - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY + - "' is not set."); - } - String anonymousAllowed = conf - .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED); - if (anonymousAllowed != null && !anonymousAllowed.isEmpty()) { - params.put( - DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED, - anonymousAllowed); - } - return params; - } /** * Joins the httpserver. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilter.java index a8b7bd434b806..b7f4d8389378b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilter.java @@ -18,138 +18,37 @@ package org.apache.hadoop.hdfs.web; import java.io.IOException; -import java.util.ArrayList; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Properties; import javax.servlet.FilterChain; -import javax.servlet.FilterConfig; import javax.servlet.ServletException; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletRequestWrapper; import org.apache.hadoop.hdfs.web.resources.DelegationParam; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; -import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; -import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; -import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilter; /** * Subclass of {@link AuthenticationFilter} that * obtains Hadoop-Auth configuration for webhdfs. */ -public class AuthFilter extends AuthenticationFilter { - public static final String CONF_PREFIX = "dfs.web.authentication."; - - /** - * Returns the filter configuration properties, - * including the ones prefixed with {@link #CONF_PREFIX}. - * The prefix is removed from the returned property names. - * - * @param prefix parameter not used. - * @param config parameter contains the initialization values. - * @return Hadoop-Auth configuration properties. - * @throws ServletException - */ - @Override - protected Properties getConfiguration(String prefix, FilterConfig config) - throws ServletException { - final Properties p = super.getConfiguration(CONF_PREFIX, config); - // if not set, configure based on security enabled - if (p.getProperty(AUTH_TYPE) == null) { - p.setProperty(AUTH_TYPE, UserGroupInformation.isSecurityEnabled()? - KerberosAuthenticationHandler.TYPE: PseudoAuthenticationHandler.TYPE); - } - // if not set, enable anonymous for pseudo authentication - if (p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED) == null) { - p.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true"); - } - //set cookie path - p.setProperty(COOKIE_PATH, "/"); - return p; - } +public class AuthFilter extends ProxyUserAuthenticationFilter { @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain) throws IOException, ServletException { - final HttpServletRequest httpRequest = toLowerCase((HttpServletRequest)request); + final HttpServletRequest httpRequest = ProxyUserAuthenticationFilter. + toLowerCase((HttpServletRequest)request); final String tokenString = httpRequest.getParameter(DelegationParam.NAME); - if (tokenString != null) { + if (tokenString != null && httpRequest.getServletPath().startsWith( + WebHdfsFileSystem.PATH_PREFIX)) { //Token is present in the url, therefore token will be used for //authentication, bypass kerberos authentication. filterChain.doFilter(httpRequest, response); return; } - super.doFilter(httpRequest, response, filterChain); + super.doFilter(request, response, filterChain); } - private static HttpServletRequest toLowerCase(final HttpServletRequest request) { - @SuppressWarnings("unchecked") - final Map original = (Map)request.getParameterMap(); - if (!ParamFilter.containsUpperCase(original.keySet())) { - return request; - } - - final Map> m = new HashMap>(); - for(Map.Entry entry : original.entrySet()) { - final String key = StringUtils.toLowerCase(entry.getKey()); - List strings = m.get(key); - if (strings == null) { - strings = new ArrayList(); - m.put(key, strings); - } - for(String v : entry.getValue()) { - strings.add(v); - } - } - - return new HttpServletRequestWrapper(request) { - private Map parameters = null; - - @Override - public Map getParameterMap() { - if (parameters == null) { - parameters = new HashMap(); - for(Map.Entry> entry : m.entrySet()) { - final List a = entry.getValue(); - parameters.put(entry.getKey(), a.toArray(new String[a.size()])); - } - } - return parameters; - } - - @Override - public String getParameter(String name) { - final List a = m.get(name); - return a == null? null: a.get(0); - } - - @Override - public String[] getParameterValues(String name) { - return getParameterMap().get(name); - } - - @Override - public Enumeration getParameterNames() { - final Iterator i = m.keySet().iterator(); - return new Enumeration() { - @Override - public boolean hasMoreElements() { - return i.hasNext(); - } - @Override - public String nextElement() { - return i.next(); - } - }; - } - }; - } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilterInitializer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilterInitializer.java new file mode 100644 index 0000000000000..c1a13bf182a50 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilterInitializer.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.web; + +import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.http.FilterContainer; +import org.apache.hadoop.http.FilterInitializer; +import org.apache.hadoop.security.AuthenticationFilterInitializer; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; +import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; + +/** + * Filter initializer to initialize {@link AuthFilter}. + */ +public class AuthFilterInitializer extends FilterInitializer { + + private String configPrefix; + + public AuthFilterInitializer() { + this.configPrefix = "hadoop.http.authentication."; + } + + protected Map createFilterConfig(Configuration conf) { + Map filterConfig = AuthenticationFilterInitializer + .getFilterConfigMap(conf, configPrefix); + + for (Map.Entry entry : conf.getPropsWithPrefix( + ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) { + filterConfig.put("proxyuser" + entry.getKey(), entry.getValue()); + } + + if (filterConfig.get("type") == null) { + filterConfig.put("type", UserGroupInformation.isSecurityEnabled() ? + KerberosAuthenticationHandler.TYPE : + PseudoAuthenticationHandler.TYPE); + } + + //set cookie path + filterConfig.put("cookie.path", "/"); + return filterConfig; + } + + @Override + public void initFilter(FilterContainer container, Configuration conf) { + Map filterConfig = createFilterConfig(conf); + container.addFilter("AuthFilter", AuthFilter.class.getName(), + filterConfig); + } + +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java index 9818461808874..7f88416bd2915 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java @@ -17,100 +17,55 @@ */ package org.apache.hadoop.hdfs.web; -import java.util.Collections; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.Map; -import java.util.Properties; - -import javax.servlet.FilterConfig; -import javax.servlet.ServletContext; -import javax.servlet.ServletException; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.http.FilterContainer; import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; -import org.junit.Assert; import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + public class TestAuthFilter { - - private static class DummyFilterConfig implements FilterConfig { - final Map map; - - DummyFilterConfig(Map map) { - this.map = map; - } - - @Override - public String getFilterName() { - return "dummy"; - } - @Override - public String getInitParameter(String arg0) { - return map.get(arg0); - } - @Override - public Enumeration getInitParameterNames() { - return Collections.enumeration(map.keySet()); - } - @Override - public ServletContext getServletContext() { - return null; - } - } - - @Test - public void testGetConfiguration() throws ServletException { - AuthFilter filter = new AuthFilter(); - Map m = new HashMap(); - m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, - "xyz/thehost@REALM"); - m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, - "thekeytab"); - FilterConfig config = new DummyFilterConfig(m); - Properties p = filter.getConfiguration("random", config); - Assert.assertEquals("xyz/thehost@REALM", - p.getProperty("kerberos.principal")); - Assert.assertEquals("thekeytab", p.getProperty("kerberos.keytab")); - Assert.assertEquals("true", - p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED)); - } - - @Test - public void testGetSimpleAuthDisabledConfiguration() throws ServletException { - AuthFilter filter = new AuthFilter(); - Map m = new HashMap(); - m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED, - "false"); - FilterConfig config = new DummyFilterConfig(m); - Properties p = filter.getConfiguration("random", config); - Assert.assertEquals("false", - p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED)); - } - - @Test - public void testGetSimpleAuthDefaultConfiguration() throws ServletException { - AuthFilter filter = new AuthFilter(); - Map m = new HashMap(); - - FilterConfig config = new DummyFilterConfig(m); - Properties p = filter.getConfiguration("random", config); - Assert.assertEquals("true", - p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED)); - } + + private static final String PREFIX = "hadoop.http.authentication."; @Test - public void testGetCustomAuthConfiguration() throws ServletException { - AuthFilter filter = new AuthFilter(); - Map m = new HashMap(); + public void testGetConfiguration() { + Configuration conf = new Configuration(); + conf.set(PREFIX + "type", "kerberos"); + conf.set(PREFIX + "kerberos.keytab", "thekeytab"); + conf.set(PREFIX + "kerberos.principal", "xyz/thehost@REALM"); - m.put(AuthFilter.CONF_PREFIX + AuthFilter.AUTH_TYPE, "com.yourclass"); - m.put(AuthFilter.CONF_PREFIX + "alt-kerberos.param", "value"); - FilterConfig config = new DummyFilterConfig(m); + FilterContainer container = Mockito.mock(FilterContainer.class); + Mockito.doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocationOnMock) { + Object[] args = invocationOnMock.getArguments(); - Properties p = filter.getConfiguration(AuthFilter.CONF_PREFIX, config); - Assert.assertEquals("com.yourclass", p.getProperty(AuthFilter.AUTH_TYPE)); - Assert.assertEquals("value", p.getProperty("alt-kerberos.param")); + assertEquals("AuthFilter", args[0]); + assertEquals(AuthFilter.class.getName(), args[1]); + + Map conf = (Map) args[2]; + assertEquals("/", conf.get("cookie.path")); + assertEquals("kerberos", conf.get("type")); + assertNull(conf.get("cookie.domain")); + assertEquals("xyz/thehost@REALM", conf.get("kerberos.principal")); + assertEquals("thekeytab", conf.get("kerberos.keytab")); + assertEquals("true", + conf.get(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED)); + + return null; + } + }).when(container).addFilter(Mockito.any(), Mockito.any(), Mockito.any()); + + new AuthFilterInitializer().initFilter(container, conf); } + } From 23c037906f7aa4912fe89724750b2d634d37f231 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Elek?= Date: Wed, 12 Jun 2019 08:52:16 +0200 Subject: [PATCH 0173/1308] Revert "HDDS-1659. Define the process to add proposal/design docs to the Ozone subproject (#922)" This reverts commit e997f2a34a170dfcd1023b82d1ced85c46b0f1f1. --- .../design/ozone-enhancement-proposals.md | 97 ------------------- 1 file changed, 97 deletions(-) delete mode 100644 hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md diff --git a/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md b/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md deleted file mode 100644 index 78b92dbf9d09a..0000000000000 --- a/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Ozone Enhancement Proposals -summary: Definition of the process to share new technical proposals with the Ozone community. -date: 2019-06-07 -jira: HDDS-1659 -status: current -author: Anu Enginner, Marton Elek ---- - -## Problem statement - -Some of the biggers features requires well defined plans before the implementation. Until now it was managed by uploading PDF design docs to selected JIRA. There are multiple problems with the current practice. - - 1. There is no easy way to find existing up-to-date and outdated design docs. - 2. Design docs usually have better description of the problem that the user docs - 3. We need better tools to discuss the design docs in the development phase of the doc - -We propose to follow the same process what we have now, but instead of uploading a PDF to the JIRA, create a PR to merge the proposal document to the documentation project. - -## Non-goals - - * Modify the existing workflow or approval process - * Migrate existing documents - * Make it harder to create design docs (it should be easy to support the creation of proposals for any kind of tasks) - -## Proposed solution - - * Open a dedicated Jira (`HDDS-*` but with specific component) - * Use standard name prefix in the jira (easy to filter on the mailing list) `[OEP] - * Create a PR to merge the design doc (markdown) to `hadoop-hdds/docs/content/proposal` (will be part of the docs) - * Discuss it as before (lazy consesus, except if somebody calls for a real vote) - * Design docs can be updated according to the changes during the implementation - -## Document template - -This the proposed template to document any proposal. It's recommended but not required the use exactly the some structure. Some proposal may require different structure, but we need the following information. - -1. Summary - -> Give a one sentence summary, like the jira title. It will be displayed on the documentation page. Should be enough to understand - -2. Problem statement (Motivation / Abstract) - -> What is the problem and how would you solve it? Think about an abstract of a paper: one paragraph overview. Why will the world better with this change? - -3. Non-goals - - > Very important to define what is outside of the scope of this proposal - -4. Technical Description (Architecture and implementation details) - - > Explain the problem in more details. How can it be reproduced? What is the current solution? What is the limitation of the current solution? - - > How the new proposed solution would solve the problem? Architectural design. - - > Implementation details. What should be changed in the code. Is it a huge change? Do we need to change wire protocol? Backward compatibility? - -5. Alternatives - - > What are the other alternatives you considered and why do yoy prefer the proposed solution The goal of this section is to help people understand why this is the best solution now, and also to prevent churn in the future when old alternatives are reconsidered. - -Note: In some cases 4/5 can be combined. For example if you have multiple proposals, the first version may include multiple solutions. At the end ot the discussion we can move the alternatives to 5. and explain why the community is decided to use the selected option. - -6. Plan - - > Planning to implement the feature. Estimated size of the work? Do we need feature branch? Any migration plan, dependency? If it's not a big new feature it can be one sentence or optional. - -7. References - -## Workflows form other projects - -There are similar process in other open source projects. This document and the template is inspired by the following projects: - - * [Apache Kafka Improvement Proposals](https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals) - * [Apache Spark Project Improvement Proposals](https://spark.apache.org/improvement-proposals.html) - * [Kubernetes Enhancement Proposals](https://github.com/kubernetes/enhancements/tree/master/keps) - -Short summary if the porcesses: - -__Kafka__ process: - - * Create wiki page - * Start discussion on mail thread - * Vote on mail thread - -__Spark__ process: - - * Create JIRA (dedicated label) - * Discuss on the jira page - * Vote on dev list - -*Kubernetes*: - - * Deditaced git repository - * KEPs are committed to the repo - * Well defined approval process managed by SIGs (KEPs are assigned to SIGs) - From 3b31694c3535f746b59ae1765f265ee8f1078c2c Mon Sep 17 00:00:00 2001 From: Ajay Yadav <7813154+ajayydv@users.noreply.github.com> Date: Wed, 12 Jun 2019 06:51:34 -0700 Subject: [PATCH 0174/1308] HDDS-1545. Cli to add,remove,get and delete acls for Ozone objects. Contributed by Ajay Kumar. (#920) --- .../org/apache/hadoop/ozone/OzoneAcl.java | 24 ++++ .../ozone/security/acl/OzoneAclConfig.java | 6 + .../ozone/security/acl/OzoneObjInfo.java | 6 +- .../src/main/proto/OzoneManagerProtocol.proto | 18 +-- .../apache/hadoop/ozone/TestOzoneAcls.java | 42 +++++++ .../ozone/security/acl/TestOzoneObjInfo.java | 69 +++++++++++- .../main/smoketest/basic/ozone-shell.robot | 62 ++++++++++- .../smoketest/security/ozone-secure-fs.robot | 50 ++++++++- .../OzoneManagerRequestHandler.java | 3 +- .../ozShell/bucket/AddAclBucketHandler.java | 101 +++++++++++++++++ .../web/ozShell/bucket/BucketCommands.java | 6 +- .../ozShell/bucket/GetAclBucketHandler.java | 84 ++++++++++++++ .../bucket/RemoveAclBucketHandler.java | 101 +++++++++++++++++ .../ozShell/bucket/SetAclBucketHandler.java | 101 +++++++++++++++++ .../web/ozShell/keys/AddAclKeyHandler.java | 104 ++++++++++++++++++ .../web/ozShell/keys/GetAclKeyHandler.java | 87 +++++++++++++++ .../ozone/web/ozShell/keys/KeyCommands.java | 6 +- .../web/ozShell/keys/RemoveAclKeyHandler.java | 104 ++++++++++++++++++ .../web/ozShell/keys/SetAclKeyHandler.java | 103 +++++++++++++++++ .../ozShell/volume/AddAclVolumeHandler.java | 98 +++++++++++++++++ .../ozShell/volume/GetAclVolumeHandler.java | 78 +++++++++++++ .../volume/RemoveAclVolumeHandler.java | 98 +++++++++++++++++ .../ozShell/volume/SetAclVolumeHandler.java | 101 +++++++++++++++++ .../web/ozShell/volume/VolumeCommands.java | 6 +- 24 files changed, 1436 insertions(+), 22 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index 8ee33b4e9990b..2fba29e4896eb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -134,6 +134,30 @@ public static OzoneAcl parseAcl(String acl) throws IllegalArgumentException { return new OzoneAcl(aclType, parts[1], acls); } + /** + * Parses an ACL string and returns the ACL object. + * + * @param acls - Acl String , Ex. user:anu:rw + * + * @return - Ozone ACLs + */ + public static List parseAcls(String acls) + throws IllegalArgumentException { + if ((acls == null) || acls.isEmpty()) { + throw new IllegalArgumentException("ACLs cannot be null or empty"); + } + String[] parts = acls.trim().split(","); + if (parts.length < 1) { + throw new IllegalArgumentException("ACLs are not in expected format"); + } + List ozAcls = new ArrayList<>(); + + for(String acl:parts) { + ozAcls.add(parseAcl(acl)); + } + return ozAcls; + } + public static OzoneAclInfo toProtobuf(OzoneAcl acl) { OzoneAclInfo.Builder builder = OzoneAclInfo.newBuilder() .setName(acl.getName()) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java index 9641eda18dac9..b51af56a4bb14 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java @@ -40,6 +40,9 @@ public class OzoneAclConfig { "OzoneManager." ) public void setUserDefaultRights(String userRights) { + if(userRights == null) { + userRights = "ALL"; + } this.userDefaultRights = ACLType.valueOf(userRights); } @@ -51,6 +54,9 @@ public void setUserDefaultRights(String userRights) { "OzoneManager." ) public void setGroupDefaultRights(String groupRights) { + if(groupRights == null) { + groupRights = "ALL"; + } this.groupDefaultRights = ACLType.valueOf(groupRights); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java index cbb9fb8e21a6d..537134a539814 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java @@ -75,8 +75,8 @@ public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj Builder builder = new Builder() .setResType(ResourceType.valueOf(proto.getResType().name())) .setStoreType(StoreType.valueOf(proto.getStoreType().name())); - String[] tokens = StringUtils.splitPreserveAllTokens(proto.getPath(), - OZONE_URI_DELIMITER); + String[] tokens = StringUtils.split(proto.getPath(), + OZONE_URI_DELIMITER, 3); if(tokens == null) { throw new IllegalArgumentException("Unexpected path:" + proto.getPath()); } @@ -94,7 +94,7 @@ public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj builder.setBucketName(tokens[1]); break; case KEY: - if (tokens.length != 3) { + if (tokens.length < 3) { throw new IllegalArgumentException("Unexpected argument for " + "Ozone key. Path:" + proto.getPath()); } diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index 303241e27e2ed..21cacf6ebe552 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -507,15 +507,15 @@ message OzoneAclInfo { } enum OzoneAclRights { - CREATE = 1; - LIST = 2; - DELETE = 3; - READ = 4; - WRITE = 5; - READ_ACL = 6; - WRITE_ACL = 7; - ALL = 8; - NONE = 9; + READ = 1; + WRITE = 2; + CREATE = 3; + LIST = 4; + DELETE = 5; + READ_ACL = 6; + WRITE_ACL = 7; + ALL = 8; + NONE = 9; } required OzoneAclType type = 1; required string name = 2; diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java index 5d9a05dd9774e..b9207f4f81f7e 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java @@ -20,10 +20,12 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.test.LambdaTestUtils; import org.junit.Test; import java.util.HashMap; +import java.util.List; import java.util.Set; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.*; @@ -202,4 +204,44 @@ public void testAclValues() throws Exception { " is not", () -> OzoneAcl.parseAcl("world::rwdlncxncxdfsfgbny")); } + @Test + public void testBitSetToListConversion() throws Exception { + OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); + + List rights = acl.getAclList(); + assertTrue(rights.size() == 2); + assertTrue(rights.contains(READ)); + assertTrue(rights.contains(WRITE)); + assertFalse(rights.contains(CREATE)); + + acl = OzoneAcl.parseAcl("user:bilbo:a"); + + rights = acl.getAclList(); + assertTrue(rights.size() == 1); + assertTrue(rights.contains(ALL)); + assertFalse(rights.contains(WRITE)); + assertFalse(rights.contains(CREATE)); + + acl = OzoneAcl.parseAcl("user:bilbo:cxy"); + rights = acl.getAclList(); + assertTrue(rights.size() == 3); + assertTrue(rights.contains(CREATE)); + assertTrue(rights.contains(READ_ACL)); + assertTrue(rights.contains(WRITE_ACL)); + assertFalse(rights.contains(WRITE)); + assertFalse(rights.contains(READ)); + + List acls = OzoneAcl.parseAcls("user:bilbo:cxy,group:hadoop:a"); + assertTrue(acls.size() == 2); + rights = acls.get(0).getAclList(); + assertTrue(rights.size() == 3); + assertTrue(rights.contains(CREATE)); + assertTrue(rights.contains(READ_ACL)); + assertTrue(rights.contains(WRITE_ACL)); + assertFalse(rights.contains(WRITE)); + assertFalse(rights.contains(READ)); + rights = acls.get(1).getAclList(); + assertTrue(rights.contains(ALL)); + } + } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java index 93dfc4dba5a77..ab24b1b592563 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java @@ -16,8 +16,11 @@ */ package org.apache.hadoop.ozone.security.acl; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.junit.Test; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType.*; import static org.junit.Assert.*; import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType; @@ -76,13 +79,73 @@ public void testGetKeyName() { objInfo = getBuilder(volume, bucket, key).build(); assertEquals(objInfo.getKeyName(), key); - objInfo =getBuilder(volume, null, null).build(); + objInfo = getBuilder(volume, null, null).build(); assertEquals(objInfo.getKeyName(), null); - objInfo =getBuilder(null, bucket, null).build(); + objInfo = getBuilder(null, bucket, null).build(); assertEquals(objInfo.getKeyName(), null); - objInfo =getBuilder(null, null, key).build(); + objInfo = getBuilder(null, null, key).build(); + assertEquals(objInfo.getKeyName(), key); + } + + @Test + public void testFromProtobufOp() { + // Key with long path. + key = "dir1/dir2/dir3/dir4/dir5/abc.txt"; + OzoneManagerProtocolProtos.OzoneObj protoObj = OzoneManagerProtocolProtos. + OzoneObj.newBuilder() + .setResType(KEY) + .setStoreType(OzoneManagerProtocolProtos.OzoneObj.StoreType.OZONE) + .setPath(volume + OZONE_URI_DELIMITER + + bucket + OZONE_URI_DELIMITER + key) + .build(); + + objInfo = OzoneObjInfo.fromProtobuf(protoObj); + assertEquals(objInfo.getKeyName(), key); + objInfo = getBuilder(volume, null, null).build(); + assertEquals(objInfo.getKeyName(), null); + objInfo = getBuilder(null, bucket, null).build(); + assertEquals(objInfo.getKeyName(), null); + objInfo = getBuilder(null, null, key).build(); + assertEquals(objInfo.getKeyName(), key); + + // Key with long path. + key = "dir1/dir2/dir3/dir4/dir5/abc.txt"; + protoObj = OzoneManagerProtocolProtos. + OzoneObj.newBuilder() + .setResType(KEY) + .setStoreType(OzoneManagerProtocolProtos.OzoneObj.StoreType.OZONE) + .setPath(OZONE_URI_DELIMITER + volume + OZONE_URI_DELIMITER + + bucket + OZONE_URI_DELIMITER + key) + .build(); + + objInfo = OzoneObjInfo.fromProtobuf(protoObj); + assertEquals(objInfo.getKeyName(), key); + objInfo = getBuilder(volume, null, null).build(); + assertEquals(objInfo.getKeyName(), null); + objInfo = getBuilder(null, bucket, null).build(); + assertEquals(objInfo.getKeyName(), null); + objInfo = getBuilder(null, null, key).build(); + assertEquals(objInfo.getKeyName(), key); + + // Key with long path. + key = "dir1/dir2/dir3/dir4/dir5/"; + protoObj = OzoneManagerProtocolProtos. + OzoneObj.newBuilder() + .setResType(KEY) + .setStoreType(OzoneManagerProtocolProtos.OzoneObj.StoreType.OZONE) + .setPath(OZONE_URI_DELIMITER + volume + OZONE_URI_DELIMITER + + bucket + OZONE_URI_DELIMITER + key) + .build(); + + objInfo = OzoneObjInfo.fromProtobuf(protoObj); + assertEquals(objInfo.getKeyName(), key); + objInfo = getBuilder(volume, null, null).build(); + assertEquals(objInfo.getKeyName(), null); + objInfo = getBuilder(null, bucket, null).build(); + assertEquals(objInfo.getKeyName(), null); + objInfo = getBuilder(null, null, key).build(); assertEquals(objInfo.getKeyName(), key); } } \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot index b66e9f8e91b62..ee3c6e60e6d9c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot +++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot @@ -25,11 +25,20 @@ Test Timeout 2 minute RpcClient with port Test ozone shell o3:// om:9862 rpcwoport +RpcClient volume acls + Test Volume Acls o3:// om:9862 rpcwoport2 + +RpcClient bucket acls + Test Bucket Acls o3:// om:9862 rpcwoport2 + +RpcClient key acls + Test Key Acls o3:// om:9862 rpcwoport2 + RpcClient without host - Test ozone shell o3:// ${EMPTY} rpcwport + Test ozone shell o3:// ${EMPTY} rpcwport RpcClient without scheme - Test ozone shell ${EMPTY} ${EMPTY} rpcwoscheme + Test ozone shell ${EMPTY} ${EMPTY} rpcwoscheme *** Keywords *** @@ -60,6 +69,39 @@ Test ozone shell Execute ozone sh bucket delete ${protocol}${server}/${volume}/bb1 Execute ozone sh volume delete ${protocol}${server}/${volume} --user bilbo +Test Volume Acls + [arguments] ${protocol} ${server} ${volume} + Execute ozone sh volume create ${protocol}${server}/${volume} + ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclList\" : . \"ALL\" . + ${result} = Execute ozone sh volume addacl ${protocol}${server}/${volume} -a user:superuser1:rwxy + ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + ${result} = Execute ozone sh volume removeacl ${protocol}${server}/${volume} -a user:superuser1:xy + ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclList\" : . \"READ\", \"WRITE\" + ${result} = Execute ozone sh volume setacl ${protocol}${server}/${volume} -al user:superuser1:rwxy,group:superuser1:a + ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclList\" : . \"ALL\" + +Test Bucket Acls + [arguments] ${protocol} ${server} ${volume} + Execute ozone sh bucket create ${protocol}${server}/${volume}/bb1 + ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclList\" : . \"ALL\" . + ${result} = Execute ozone sh bucket addacl ${protocol}${server}/${volume}/bb1 -a user:superuser1:rwxy + ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + ${result} = Execute ozone sh bucket removeacl ${protocol}${server}/${volume}/bb1 -a user:superuser1:xy + ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclList\" : . \"READ\", \"WRITE\" + ${result} = Execute ozone sh bucket setacl ${protocol}${server}/${volume}/bb1 -al user:superuser1:rwxy,group:superuser1:a + ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclList\" : . \"ALL\" + + Test key handling [arguments] ${protocol} ${server} ${volume} Execute ozone sh key put ${protocol}${server}/${volume}/bb1/key1 /opt/hadoop/NOTICE.txt @@ -74,3 +116,19 @@ Test key handling ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[].keyName' Should Be Equal ${result} key2 Execute ozone sh key delete ${protocol}${server}/${volume}/bb1/key2 + +Test key Acls + [arguments] ${protocol} ${server} ${volume} + Execute ozone sh key put ${protocol}${server}/${volume}/bb1/key2 /opt/hadoop/NOTICE.txt + ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclList\" : . \"ALL\" . + ${result} = Execute ozone sh key addacl ${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:rwxy + ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + ${result} = Execute ozone sh key removeacl ${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:xy + ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclList\" : . \"READ\", \"WRITE\" + ${result} = Execute ozone sh key setacl ${protocol}${server}/${volume}/bb1/key2 -al user:superuser1:rwxy,group:superuser1:a + ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclList\" : . \"ALL\" \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot index 004d2a9312140..92cf4cdcc0816 100644 --- a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot +++ b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot @@ -28,6 +28,7 @@ Setup volume names ${random} Generate Random String 2 [NUMBERS] Set Suite Variable ${volume1} fstest${random} Set Suite Variable ${volume2} fstest2${random} + Set Suite Variable ${volume3} fstest3${random} *** Test Cases *** Create volume bucket with wrong credentials @@ -46,4 +47,51 @@ Create volume bucket with credentials Execute ozone sh bucket create o3://om/${volume2}/bucket3 Check volume from ozonefs - ${result} = Execute ozone fs -ls o3fs://bucket1.${volume1}/ \ No newline at end of file + ${result} = Execute ozone fs -ls o3fs://bucket1.${volume1}/ + +Test Volume Acls + ${result} = Execute ozone sh volume create ${volume3} + Should not contain ${result} Failed + ${result} = Execute ozone sh volume getacl ${volume3} + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclList\" : . \"ALL\" . + ${result} = Execute ozone sh volume addacl ${volume3} -a user:superuser1:rwxy + ${result} = Execute ozone sh volume getacl ${volume3} + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + ${result} = Execute ozone sh volume removeacl ${volume3} -a user:superuser1:xy + ${result} = Execute ozone sh volume getacl ${volume3} + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclList\" : . \"READ\", \"WRITE\" + ${result} = Execute ozone sh volume setacl ${volume3} -al user:superuser1:rwxy,group:superuser1:a + ${result} = Execute ozone sh volume getacl ${volume3} + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclList\" : . \"ALL\" + +Test Bucket Acls + ${result} = Execute ozone sh bucket create ${volume3}/bk1 + Should not contain ${result} Failed + ${result} = Execute ozone sh bucket getacl ${volume3}/bk1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclList\" : . \"ALL\" . + ${result} = Execute ozone sh bucket addacl ${volume3}/bk1 -a user:superuser1:rwxy + ${result} = Execute ozone sh bucket getacl ${volume3}/bk1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + ${result} = Execute ozone sh bucket removeacl ${volume3}/bk1 -a user:superuser1:xy + ${result} = Execute ozone sh bucket getacl ${volume3}/bk1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclList\" : . \"READ\", \"WRITE\" + ${result} = Execute ozone sh bucket setacl ${volume3}/bk1 -al user:superuser1:rwxy,group:superuser1:a + ${result} = Execute ozone sh bucket getacl ${volume3}/bk1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclList\" : . \"ALL\" + +Test key Acls + Execute ozone sh key put ${volume3}/bk1/key1 /opt/hadoop/NOTICE.txt + ${result} = Execute ozone sh key getacl ${volume3}/bk1/key1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclList\" : . \"ALL\" . + ${result} = Execute ozone sh key addacl ${volume3}/bk1/key1 -a user:superuser1:rwxy + ${result} = Execute ozone sh key getacl ${volume3}/bk1/key1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + ${result} = Execute ozone sh key removeacl ${volume3}/bk1/key1 -a user:superuser1:xy + ${result} = Execute ozone sh key getacl ${volume3}/bk1/key1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclList\" : . \"READ\", \"WRITE\" + ${result} = Execute ozone sh key setacl ${volume3}/bk1/key1 -al user:superuser1:rwxy,group:superuser1:a + ${result} = Execute ozone sh key getacl ${volume3}/bk1/key1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclList\" : . \"ALL\" \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 6ea1a2b6d7cb8..69f3b1c688d29 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -411,7 +411,8 @@ private RemoveAclResponse removeAcl(RemoveAclRequest req) private SetAclResponse setAcl(SetAclRequest req) throws IOException { List ozoneAcl = new ArrayList<>(); - req.getAclList().forEach(a -> ozoneAcl.add(OzoneAcl.fromProtobuf(a))); + req.getAclList().forEach(a -> + ozoneAcl.add(OzoneAcl.fromProtobuf(a))); boolean response = impl.setAcl(OzoneObjInfo.fromProtobuf(req.getObj()), ozoneAcl); return SetAclResponse.newBuilder().setResponse(response).build(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java new file mode 100644 index 0000000000000..6b32f6400bd18 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.ozShell.bucket; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; +import org.apache.hadoop.ozone.web.ozShell.Shell; +import org.apache.hadoop.ozone.web.utils.JsonUtils; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.util.Objects; + +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; + +/** + * Add acl handler for bucket. + */ +@Command(name = "addacl", + description = "Add a new Acl.") +public class AddAclBucketHandler extends Handler { + + @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) + private String uri; + + @CommandLine.Option(names = {"--acl", "-a"}, + required = true, + description = "new acl." + + "r = READ," + + "w = WRITE," + + "c = CREATE," + + "d = DELETE," + + "l = LIST," + + "a = ALL," + + "n = NONE," + + "x = READ_AC," + + "y = WRITE_AC" + + "Ex user:user1:rw or group:hadoop:rw") + private String acl; + + @CommandLine.Option(names = {"--store", "-s"}, + required = false, + description = "store type. i.e OZONE or S3") + private String storeType; + + /** + * Executes the Client Calls. + */ + @Override + public Void call() throws Exception { + Objects.requireNonNull(acl, "New acl to be added not specified."); + OzoneAddress address = new OzoneAddress(uri); + address.ensureBucketAddress(); + OzoneClient client = address.createClient(createOzoneConfiguration()); + + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + + if (isVerbose()) { + System.out.printf("Volume Name : %s%n", volumeName); + System.out.printf("Bucket Name : %s%n", bucketName); + } + + OzoneObj obj = OzoneObjInfo.Builder.newBuilder() + .setBucketName(bucketName) + .setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.BUCKET) + .setStoreType(storeType == null ? OZONE : + OzoneObj.StoreType.valueOf(storeType)) + .build(); + + boolean result = client.getObjectStore().addAcl(obj, + OzoneAcl.parseAcl(acl)); + + System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( + JsonUtils.toJsonString("Acl set successfully: " + result))); + client.close(); + return null; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java index 64dc91b55b30e..6c9de4dc03df0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java @@ -39,7 +39,11 @@ ListBucketHandler.class, CreateBucketHandler.class, UpdateBucketHandler.class, - DeleteBucketHandler.class + DeleteBucketHandler.class, + AddAclBucketHandler.class, + RemoveAclBucketHandler.class, + GetAclBucketHandler.class, + SetAclBucketHandler.class }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java new file mode 100644 index 0000000000000..0bb967c62f551 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.ozShell.bucket; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; +import org.apache.hadoop.ozone.web.ozShell.Shell; +import org.apache.hadoop.ozone.web.utils.JsonUtils; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.util.List; + +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; + +/** + * Get acl handler for bucket. + */ +@Command(name = "getacl", + description = "List all acls.") +public class GetAclBucketHandler extends Handler { + + @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) + private String uri; + + @CommandLine.Option(names = {"--store", "-s"}, + required = false, + description = "store type. i.e OZONE or S3") + private String storeType; + + /** + * Executes the Client Calls. + */ + @Override + public Void call() throws Exception { + OzoneAddress address = new OzoneAddress(uri); + address.ensureBucketAddress(); + OzoneClient client = address.createClient(createOzoneConfiguration()); + + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + + if (isVerbose()) { + System.out.printf("Volume Name : %s%n", volumeName); + System.out.printf("Bucket Name : %s%n", bucketName); + } + + OzoneObj obj = OzoneObjInfo.Builder.newBuilder() + .setBucketName(bucketName) + .setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.BUCKET) + .setStoreType(storeType == null ? OZONE : + OzoneObj.StoreType.valueOf(storeType)) + .build(); + + List result = client.getObjectStore().getAcl(obj); + + System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( + JsonUtils.toJsonString(result))); + client.close(); + return null; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java new file mode 100644 index 0000000000000..635c34bd66f55 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.ozShell.bucket; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; +import org.apache.hadoop.ozone.web.ozShell.Shell; +import org.apache.hadoop.ozone.web.utils.JsonUtils; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.util.Objects; + +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; + +/** + * Executes Info bucket. + */ +@Command(name = "removeacl", + description = "Remove an acl.") +public class RemoveAclBucketHandler extends Handler { + + @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) + private String uri; + + @CommandLine.Option(names = {"--acl", "-a"}, + required = true, + description = "Remove acl." + + "r = READ," + + "w = WRITE," + + "c = CREATE," + + "d = DELETE," + + "l = LIST," + + "a = ALL," + + "n = NONE," + + "x = READ_AC," + + "y = WRITE_AC" + + "Ex user:user1:rw or group:hadoop:rw") + private String acl; + + @CommandLine.Option(names = {"--store", "-s"}, + required = false, + description = "store type. i.e OZONE or S3") + private String storeType; + + /** + * Remove acl handler for bucket. + */ + @Override + public Void call() throws Exception { + Objects.requireNonNull(acl, "New acl to be added not specified."); + OzoneAddress address = new OzoneAddress(uri); + address.ensureBucketAddress(); + OzoneClient client = address.createClient(createOzoneConfiguration()); + + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + + if (isVerbose()) { + System.out.printf("Volume Name : %s%n", volumeName); + System.out.printf("Bucket Name : %s%n", bucketName); + } + + OzoneObj obj = OzoneObjInfo.Builder.newBuilder() + .setBucketName(bucketName) + .setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.BUCKET) + .setStoreType(storeType == null ? OZONE : + OzoneObj.StoreType.valueOf(storeType)) + .build(); + + boolean result = client.getObjectStore().removeAcl(obj, + OzoneAcl.parseAcl(acl)); + + System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( + JsonUtils.toJsonString("Acl removed successfully: " + result))); + client.close(); + return null; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java new file mode 100644 index 0000000000000..2fc43f9bd0255 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.ozShell.bucket; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; +import org.apache.hadoop.ozone.web.ozShell.Shell; +import org.apache.hadoop.ozone.web.utils.JsonUtils; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.util.Objects; + +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; + +/** + * Set acl handler for bucket. + */ +@Command(name = "setacl", + description = "Set acls.") +public class SetAclBucketHandler extends Handler { + + @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) + private String uri; + + @CommandLine.Option(names = {"--acls", "-al"}, + required = true, + description = "Comma seperated acls." + + "r = READ," + + "w = WRITE," + + "c = CREATE," + + "d = DELETE," + + "l = LIST," + + "a = ALL," + + "n = NONE," + + "x = READ_AC," + + "y = WRITE_AC" + + "Ex user:user1:rw,user:user2:a,group:hadoop:a") + private String acls; + + @CommandLine.Option(names = {"--store", "-s"}, + required = false, + description = "store type. i.e OZONE or S3") + private String storeType; + + /** + * Executes the Client Calls. + */ + @Override + public Void call() throws Exception { + Objects.requireNonNull(acls, "Acls to be set not specified."); + OzoneAddress address = new OzoneAddress(uri); + address.ensureBucketAddress(); + OzoneClient client = address.createClient(createOzoneConfiguration()); + + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + + if (isVerbose()) { + System.out.printf("Volume Name : %s%n", volumeName); + System.out.printf("Bucket Name : %s%n", bucketName); + } + + OzoneObj obj = OzoneObjInfo.Builder.newBuilder() + .setBucketName(bucketName) + .setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.BUCKET) + .setStoreType(storeType == null ? OZONE : + OzoneObj.StoreType.valueOf(storeType)) + .build(); + + boolean result = client.getObjectStore().setAcl(obj, + OzoneAcl.parseAcls(acls)); + + System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( + JsonUtils.toJsonString("Acl set successfully: " + result))); + client.close(); + return null; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java new file mode 100644 index 0000000000000..13298dceb526f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.ozShell.keys; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; +import org.apache.hadoop.ozone.web.ozShell.Shell; +import org.apache.hadoop.ozone.web.utils.JsonUtils; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.util.Objects; + +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; + +/** + * Add acl handler for key. + */ +@Command(name = "addacl", + description = "Add a new Acl.") +public class AddAclKeyHandler extends Handler { + + @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) + private String uri; + + @CommandLine.Option(names = {"--acl", "-a"}, + required = true, + description = "Add acl." + + "r = READ," + + "w = WRITE," + + "c = CREATE," + + "d = DELETE," + + "l = LIST," + + "a = ALL," + + "n = NONE," + + "x = READ_AC," + + "y = WRITE_AC" + + "Ex user:user1:rw or group:hadoop:rw") + private String acl; + + @CommandLine.Option(names = {"--store", "-s"}, + required = false, + description = "store type. i.e OZONE or S3") + private String storeType; + + /** + * Executes the Client Calls. + */ + @Override + public Void call() throws Exception { + Objects.requireNonNull(acl, "New acl to be added not specified."); + OzoneAddress address = new OzoneAddress(uri); + address.ensureKeyAddress(); + OzoneClient client = address.createClient(createOzoneConfiguration()); + + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + String keyName = address.getKeyName(); + + if (isVerbose()) { + System.out.printf("Volume Name : %s%n", volumeName); + System.out.printf("Bucket Name : %s%n", bucketName); + System.out.printf("Key Name : %s%n", keyName); + } + + OzoneObj obj = OzoneObjInfo.Builder.newBuilder() + .setBucketName(bucketName) + .setVolumeName(volumeName) + .setKeyName(address.getKeyName()) + .setResType(OzoneObj.ResourceType.KEY) + .setStoreType(storeType == null ? OZONE : + OzoneObj.StoreType.valueOf(storeType)) + .build(); + + boolean result = client.getObjectStore().addAcl(obj, + OzoneAcl.parseAcl(acl)); + + System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( + JsonUtils.toJsonString("Acl set successfully: " + result))); + client.close(); + return null; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java new file mode 100644 index 0000000000000..edfa66aa30948 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.ozShell.keys; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; +import org.apache.hadoop.ozone.web.ozShell.Shell; +import org.apache.hadoop.ozone.web.utils.JsonUtils; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.util.List; + +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; + +/** + * Get acl handler for Key. + */ +@Command(name = "getacl", + description = "List all acls.") +public class GetAclKeyHandler extends Handler { + + @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) + private String uri; + + @CommandLine.Option(names = {"--store", "-s"}, + required = false, + description = "store type. i.e OZONE or S3") + private String storeType; + + /** + * Executes the Client Calls. + */ + @Override + public Void call() throws Exception { + OzoneAddress address = new OzoneAddress(uri); + address.ensureKeyAddress(); + OzoneClient client = address.createClient(createOzoneConfiguration()); + + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + String keyName = address.getKeyName(); + + if (isVerbose()) { + System.out.printf("Volume Name : %s%n", volumeName); + System.out.printf("Bucket Name : %s%n", bucketName); + System.out.printf("Key Name : %s%n", keyName); + } + + OzoneObj obj = OzoneObjInfo.Builder.newBuilder() + .setBucketName(bucketName) + .setVolumeName(volumeName) + .setKeyName(keyName) + .setResType(OzoneObj.ResourceType.KEY) + .setStoreType(storeType == null ? OZONE : + OzoneObj.StoreType.valueOf(storeType)) + .build(); + + List result = client.getObjectStore().getAcl(obj); + + System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( + JsonUtils.toJsonString(result))); + client.close(); + return null; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java index 405c3c51d0468..4de97c57f2f02 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java @@ -40,7 +40,11 @@ GetKeyHandler.class, PutKeyHandler.class, RenameKeyHandler.class, - DeleteKeyHandler.class + DeleteKeyHandler.class, + AddAclKeyHandler.class, + RemoveAclKeyHandler.class, + SetAclKeyHandler.class, + GetAclKeyHandler.class }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java new file mode 100644 index 0000000000000..1359721642310 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.ozShell.keys; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; +import org.apache.hadoop.ozone.web.ozShell.Shell; +import org.apache.hadoop.ozone.web.utils.JsonUtils; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.util.Objects; + +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; + +/** + * Remove acl handler for key. + */ +@Command(name = "removeacl", + description = "Remove an acl.") +public class RemoveAclKeyHandler extends Handler { + + @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) + private String uri; + + @CommandLine.Option(names = {"--acl", "-a"}, + required = true, + description = "Remove acl." + + "r = READ," + + "w = WRITE," + + "c = CREATE," + + "d = DELETE," + + "l = LIST," + + "a = ALL," + + "n = NONE," + + "x = READ_AC," + + "y = WRITE_AC" + + "Ex user:user1:rw or group:hadoop:rw") + private String acl; + + @CommandLine.Option(names = {"--store", "-s"}, + required = false, + description = "store type. i.e OZONE or S3") + private String storeType; + + /** + * Executes the Client Calls. + */ + @Override + public Void call() throws Exception { + Objects.requireNonNull(acl, "New acl to be added not specified."); + OzoneAddress address = new OzoneAddress(uri); + address.ensureKeyAddress(); + OzoneClient client = address.createClient(createOzoneConfiguration()); + + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + String keyName = address.getKeyName(); + + if (isVerbose()) { + System.out.printf("Volume Name : %s%n", volumeName); + System.out.printf("Bucket Name : %s%n", bucketName); + System.out.printf("Key Name : %s%n", keyName); + } + + OzoneObj obj = OzoneObjInfo.Builder.newBuilder() + .setBucketName(bucketName) + .setVolumeName(volumeName) + .setKeyName(keyName) + .setResType(OzoneObj.ResourceType.KEY) + .setStoreType(storeType == null ? OZONE : + OzoneObj.StoreType.valueOf(storeType)) + .build(); + + boolean result = client.getObjectStore().removeAcl(obj, + OzoneAcl.parseAcl(acl)); + + System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( + JsonUtils.toJsonString("Acl set successfully: " + result))); + client.close(); + return null; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java new file mode 100644 index 0000000000000..397330591ea3a --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.ozShell.keys; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; +import org.apache.hadoop.ozone.web.ozShell.Shell; +import org.apache.hadoop.ozone.web.utils.JsonUtils; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.util.Objects; + +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; + +/** + * Set acl handler for Key. + */ +@Command(name = "setacl", + description = "Set acls.") +public class SetAclKeyHandler extends Handler { + + @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) + private String uri; + + @CommandLine.Option(names = {"--acls", "-al"}, + required = true, + description = "Comma separated acls." + + "r = READ," + + "w = WRITE," + + "c = CREATE," + + "d = DELETE," + + "l = LIST," + + "a = ALL," + + "n = NONE," + + "x = READ_AC," + + "y = WRITE_AC" + + "Ex user:user1:rw,user:user2:a,group:hadoop:a") + private String acls; + + @CommandLine.Option(names = {"--store", "-s"}, + required = false, + description = "store type. i.e OZONE or S3") + private String storeType; + + /** + * Executes the Client Calls. + */ + @Override + public Void call() throws Exception { + Objects.requireNonNull(acls, "New acls to be added not specified."); + OzoneAddress address = new OzoneAddress(uri); + address.ensureKeyAddress(); + OzoneClient client = address.createClient(createOzoneConfiguration()); + + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + String keyName = address.getKeyName(); + + if (isVerbose()) { + System.out.printf("Volume Name : %s%n", volumeName); + System.out.printf("Bucket Name : %s%n", bucketName); + } + + OzoneObj obj = OzoneObjInfo.Builder.newBuilder() + .setBucketName(bucketName) + .setVolumeName(volumeName) + .setKeyName(keyName) + .setResType(OzoneObj.ResourceType.KEY) + .setStoreType(storeType == null ? OZONE : + OzoneObj.StoreType.valueOf(storeType)) + .build(); + + boolean result = client.getObjectStore().setAcl(obj, + OzoneAcl.parseAcls(acls)); + + System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( + JsonUtils.toJsonString("Acl set successfully: " + result))); + client.close(); + return null; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java new file mode 100644 index 0000000000000..acce64860dac5 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.ozShell.volume; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; +import org.apache.hadoop.ozone.web.ozShell.Shell; +import org.apache.hadoop.ozone.web.utils.JsonUtils; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.util.Objects; + +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; + +/** + * Add acl handler for volume. + */ +@Command(name = "addacl", + description = "Add a new Acl.") +public class AddAclVolumeHandler extends Handler { + + @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) + private String uri; + + @CommandLine.Option(names = {"--acl", "-a"}, + required = true, + description = "Add acl." + + "r = READ," + + "w = WRITE," + + "c = CREATE," + + "d = DELETE," + + "l = LIST," + + "a = ALL," + + "n = NONE," + + "x = READ_AC," + + "y = WRITE_AC" + + "Ex user:user1:rw or group:hadoop:rw") + private String acl; + + @CommandLine.Option(names = {"--store", "-s"}, + required = false, + description = "store type. i.e OZONE or S3") + private String storeType; + + /** + * Executes the Client Calls. + */ + @Override + public Void call() throws Exception { + Objects.requireNonNull(acl, "New acl to be added not specified."); + OzoneAddress address = new OzoneAddress(uri); + address.ensureVolumeAddress(); + OzoneClient client = address.createClient(createOzoneConfiguration()); + + String volumeName = address.getVolumeName(); + + if (isVerbose()) { + System.out.printf("Volume Name : %s%n", volumeName); + } + + OzoneObj obj = OzoneObjInfo.Builder.newBuilder() + .setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.VOLUME) + .setStoreType(storeType == null ? OZONE : + OzoneObj.StoreType.valueOf(storeType)) + .build(); + + boolean result = client.getObjectStore().addAcl(obj, + OzoneAcl.parseAcl(acl)); + + System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( + JsonUtils.toJsonString("Acl set successfully: " + result))); + client.close(); + return null; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java new file mode 100644 index 0000000000000..b4be3f8249d40 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.ozShell.volume; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; +import org.apache.hadoop.ozone.web.ozShell.Shell; +import org.apache.hadoop.ozone.web.utils.JsonUtils; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.util.List; + +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; + +/** + * Get acl handler for volume. + */ +@Command(name = "getacl", + description = "List all acls.") +public class GetAclVolumeHandler extends Handler { + + @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) + private String uri; + + @CommandLine.Option(names = {"--store", "-s"}, + required = false, + description = "store type. i.e OZONE or S3") + private String storeType; + + /** + * Executes the Client Calls. + */ + @Override + public Void call() throws Exception { + OzoneAddress address = new OzoneAddress(uri); + address.ensureVolumeAddress(); + OzoneClient client = address.createClient(createOzoneConfiguration()); + String volumeName = address.getVolumeName(); + + if (isVerbose()) { + System.out.printf("Volume Name : %s%n", volumeName); + } + + OzoneObj obj = OzoneObjInfo.Builder.newBuilder() + .setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.VOLUME) + .setStoreType(storeType == null ? OZONE : + OzoneObj.StoreType.valueOf(storeType)) + .build(); + List result = client.getObjectStore().getAcl(obj); + System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( + JsonUtils.toJsonString(result))); + client.close(); + return null; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java new file mode 100644 index 0000000000000..9b3420b3f3a6e --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.ozShell.volume; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; +import org.apache.hadoop.ozone.web.ozShell.Shell; +import org.apache.hadoop.ozone.web.utils.JsonUtils; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.util.Objects; + +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; + +/** + * Remove acl handler for volume. + */ +@Command(name = "removeacl", + description = "Remove an acl.") +public class RemoveAclVolumeHandler extends Handler { + + @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) + private String uri; + + @CommandLine.Option(names = {"--acl", "-a"}, + required = true, + description = "Remove acl." + + "r = READ," + + "w = WRITE," + + "c = CREATE," + + "d = DELETE," + + "l = LIST," + + "a = ALL," + + "n = NONE," + + "x = READ_AC," + + "y = WRITE_AC" + + "Ex user:user1:rw or group:hadoop:rw") + private String acl; + + @CommandLine.Option(names = {"--store", "-s"}, + required = false, + description = "store type. i.e OZONE or S3") + private String storeType; + + /** + * Executes the Client Calls. + */ + @Override + public Void call() throws Exception { + Objects.requireNonNull(acl, "New acl to be added not specified."); + OzoneAddress address = new OzoneAddress(uri); + address.ensureVolumeAddress(); + OzoneClient client = address.createClient(createOzoneConfiguration()); + + String volumeName = address.getVolumeName(); + + if (isVerbose()) { + System.out.printf("Volume Name : %s%n", volumeName); + } + + OzoneObj obj = OzoneObjInfo.Builder.newBuilder() + .setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.VOLUME) + .setStoreType(storeType == null ? OZONE : + OzoneObj.StoreType.valueOf(storeType)) + .build(); + + boolean result = client.getObjectStore().removeAcl(obj, + OzoneAcl.parseAcl(acl)); + + System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( + JsonUtils.toJsonString("Acl removed successfully: " + result))); + client.close(); + return null; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java new file mode 100644 index 0000000000000..e3299e35946fc --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.ozShell.volume; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.ozone.web.ozShell.Handler; +import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; +import org.apache.hadoop.ozone.web.ozShell.Shell; +import org.apache.hadoop.ozone.web.utils.JsonUtils; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.util.Objects; + +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; + +/** + * Set acl handler for volume. + */ +@Command(name = "setacl", + description = "Set acls.") +public class SetAclVolumeHandler extends Handler { + + @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) + private String uri; + + @CommandLine.Option(names = {"--acls", "-al"}, + required = true, + description = "Comma separated acls." + + "r = READ," + + "w = WRITE," + + "c = CREATE," + + "d = DELETE," + + "l = LIST," + + "a = ALL," + + "n = NONE," + + "x = READ_AC," + + "y = WRITE_AC" + + "Ex user:user1:rw,user:user2:a,group:hadoop:a") + private String acls; + + @CommandLine.Option(names = {"--store", "-s"}, + required = false, + description = "store type. i.e OZONE or S3") + private String storeType; + + /** + * Executes the Client Calls. + */ + @Override + public Void call() throws Exception { + Objects.requireNonNull(acls, "New acls to be added not specified."); + OzoneAddress address = new OzoneAddress(uri); + address.ensureVolumeAddress(); + OzoneClient client = address.createClient(createOzoneConfiguration()); + + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + + if (isVerbose()) { + System.out.printf("Volume Name : %s%n", volumeName); + System.out.printf("Bucket Name : %s%n", bucketName); + } + + OzoneObj obj = OzoneObjInfo.Builder.newBuilder() + .setBucketName(bucketName) + .setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.VOLUME) + .setStoreType(storeType == null ? OZONE : + OzoneObj.StoreType.valueOf(storeType)) + .build(); + System.out.printf(" acls" +acls.length() + " " + acls); + boolean result = client.getObjectStore().setAcl(obj, + OzoneAcl.parseAcls(acls)); + + System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter( + JsonUtils.toJsonString("Acl set successfully: " + result))); + client.close(); + return null; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java index 4fb71c3b4171d..833457bcbefbc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java @@ -40,7 +40,11 @@ ListVolumeHandler.class, CreateVolumeHandler.class, UpdateVolumeHandler.class, - DeleteVolumeHandler.class + DeleteVolumeHandler.class, + AddAclVolumeHandler.class, + RemoveAclVolumeHandler.class, + SetAclVolumeHandler.class, + GetAclVolumeHandler.class }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) From 50de0874d0f14526bfb7052f2a2e64543dc0fcbc Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Wed, 12 Jun 2019 10:28:47 -0700 Subject: [PATCH 0175/1308] HDFS-13231. Extend visualization for Decommissioning, Maintenance Mode under Datanode tab in the NameNode UI. Contributed by Stephen O'Donnell. Signed-off-by: Wei-Chiu Chuang --- .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html | 4 ++++ .../hadoop-hdfs/src/main/webapps/static/hadoop.css | 11 ++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 6dff7b23e5599..b88150b387926 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -301,8 +301,12 @@
    • In service
    • Down
    • +
    • Decommissioning
    • Decommissioned
    • Decommissioned & dead
    • +
    +
      +
    • Entering Maintenance
    • In Maintenance
    • In Maintenance & dead
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css index 6fc0289af231f..b3e79e2d5076d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css @@ -235,8 +235,13 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand { content: "\e013"; } +.dfshealth-node-decommissioning:before { + color: #5fa341; + content: "\e090"; +} + .dfshealth-node-decommissioned:before { - color: #bc5f04; + color: #eea236; content: "\e090"; } @@ -246,7 +251,7 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand { } .dfshealth-node-entering-maintenance:before { - color: #eea236; + color: #5fa341; content: "\e136"; } @@ -256,7 +261,7 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand { } .dfshealth-node-down-decommissioned:before { - color: #2e6da6; + color: #c7254e; content: "\e017"; } From cf84881dea11639bed48b4c8e8a785a535510e6d Mon Sep 17 00:00:00 2001 From: Shweta Yakkali Date: Wed, 12 Jun 2019 10:36:34 -0700 Subject: [PATCH 0176/1308] HADOOP-16365. Upgrade jackson-databind to 2.9.9. Contributed by Shweta Yakkali. Signed-off-by: Wei-Chiu Chuang --- hadoop-project/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index dc8cc8d6baf43..636fa4a69f082 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -69,7 +69,7 @@ 1.9.13 - 2.9.8 + 2.9.9 4.5.6 From 1732312f4577901fb1dab33def6d41f60bb7bda9 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Thu, 13 Jun 2019 03:16:42 +0800 Subject: [PATCH 0177/1308] =?UTF-8?q?HDDS-1663.=20Add=20datanode=20to=20ne?= =?UTF-8?q?twork=20topology=20cluster=20during=20node=20regis=E2=80=A6=20(?= =?UTF-8?q?#937)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../org/apache/hadoop/hdds/scm/net/Node.java | 12 ++ .../apache/hadoop/hdds/scm/net/NodeImpl.java | 36 ++++-- hadoop-hdds/server-scm/pom.xml | 3 + .../hadoop/hdds/scm/node/NodeManager.java | 9 ++ .../hadoop/hdds/scm/node/SCMNodeManager.java | 67 +++++++++++ .../scm/server/StorageContainerManager.java | 11 +- .../org/apache/hadoop/hdds/scm/TestUtils.java | 2 +- .../hdds/scm/container/MockNodeManager.java | 5 + .../hdds/scm/node/TestSCMNodeManager.java | 112 ++++++++++++++++++ .../testutils/ReplicationNodeManagerMock.java | 5 + .../src/test/resources/nodegroup-mapping | 24 ++++ .../src/test/resources/rack-mapping | 24 ++++ .../src/main/compose/ozone-net-topology/.env | 17 +++ .../ozone-net-topology/docker-compose.yaml | 110 +++++++++++++++++ .../compose/ozone-net-topology/docker-config | 88 ++++++++++++++ .../compose/ozone-net-topology/network-config | 22 ++++ .../main/compose/ozone-net-topology/test.sh | 35 ++++++ 17 files changed, 573 insertions(+), 9 deletions(-) create mode 100644 hadoop-hdds/server-scm/src/test/resources/nodegroup-mapping create mode 100644 hadoop-hdds/server-scm/src/test/resources/rack-mapping create mode 100644 hadoop-ozone/dist/src/main/compose/ozone-net-topology/.env create mode 100644 hadoop-ozone/dist/src/main/compose/ozone-net-topology/docker-compose.yaml create mode 100644 hadoop-ozone/dist/src/main/compose/ozone-net-topology/docker-config create mode 100644 hadoop-ozone/dist/src/main/compose/ozone-net-topology/network-config create mode 100755 hadoop-ozone/dist/src/main/compose/ozone-net-topology/test.sh diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java index 310b336269830..0007e546770f0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java @@ -32,11 +32,23 @@ public interface Node { * exclude itself. In another words, its parent's full network location */ String getNetworkLocation(); + /** + * Set this node's network location. + * @param location it's network location + */ + void setNetworkLocation(String location); + /** @return this node's self name in network topology. This should be node's * IP or hostname. * */ String getNetworkName(); + /** + * Set this node's name, can be hostname or Ipaddress. + * @param name it's network name + */ + void setNetworkName(String name); + /** @return this node's full path in network topology. It's the concatenation * of location and name. * */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java index a9763b971932e..53b05ea294166 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java @@ -27,11 +27,11 @@ */ public class NodeImpl implements Node { // host:port# - private final String name; + private String name; // string representation of this node's location, such as /dc1/rack1 - private final String location; + private String location; // location + "/" + name - private final String path; + private String path; // which level of the tree the node resides, start from 1 for root private int level; // node's parent @@ -53,10 +53,7 @@ public NodeImpl(String name, String location, int cost) { } this.name = (name == null) ? ROOT : name; this.location = NetUtils.normalize(location); - this.path = this.location.equals(PATH_SEPARATOR_STR) ? - this.location + this.name : - this.location + PATH_SEPARATOR_STR + this.name; - + this.path = getPath(); this.cost = cost; } @@ -84,6 +81,15 @@ public String getNetworkName() { return name; } + /** + * Set this node's name, can be hostname or Ipaddress. + * @param networkName it's network name + */ + public void setNetworkName(String networkName) { + this.name = networkName; + this.path = getPath(); + } + /** * @return this node's network location */ @@ -91,6 +97,16 @@ public String getNetworkLocation() { return location; } + /** + * Set this node's network location. + * @param networkLocation it's network location + */ + @Override + public void setNetworkLocation(String networkLocation) { + this.location = networkLocation; + this.path = getPath(); + } + /** * @return this node's full path in network topology. It's the concatenation * of location and name. @@ -197,4 +213,10 @@ public int hashCode() { public String toString() { return getNetworkFullPath(); } + + private String getPath() { + return this.location.equals(PATH_SEPARATOR_STR) ? + this.location + this.name : + this.location + PATH_SEPARATOR_STR + this.name; + } } \ No newline at end of file diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index b55a224a85116..99d59223871e8 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -145,6 +145,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${basedir}/../../hadoop-hdds/common/src/main/resources + + ${basedir}/src/test/resources +
    diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 6b8d477c03308..6b05772cc81d6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -171,4 +171,13 @@ void processNodeReport(DatanodeDetails datanodeDetails, */ // TODO: We can give better name to this method! List getCommandQueue(UUID dnID); + + /** + * Given datanode host address, returns the DatanodeDetails for the + * node. + * + * @param address node host address + * @return the given datanode, or null if not found + */ + DatanodeDetails getNode(String address); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 2ab7295cd8091..9a5ea11b412eb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -23,6 +23,9 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.net.NetConstants; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.Node; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException; @@ -44,14 +47,19 @@ .StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.metrics2.util.MBeans; +import org.apache.hadoop.net.CachedDNSToSwitchMapping; +import org.apache.hadoop.net.DNSToSwitchMapping; +import org.apache.hadoop.net.TableMapping; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.protocol.VersionResponse; import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; +import org.apache.hadoop.util.ReflectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -93,6 +101,9 @@ public class SCMNodeManager implements NodeManager { // Node manager MXBean private ObjectName nmInfoBean; private final StorageContainerManager scmManager; + private final NetworkTopology clusterMap; + private final DNSToSwitchMapping dnsToSwitchMapping; + private final boolean useHostname; /** * Constructs SCM machine Manager. @@ -108,6 +119,18 @@ public SCMNodeManager(OzoneConfiguration conf, String clusterID, LOG.info("Entering startup safe mode."); registerMXBean(); this.metrics = SCMNodeMetrics.create(this); + this.clusterMap = scmManager.getClusterMap(); + Class dnsToSwitchMappingClass = + conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + TableMapping.class, DNSToSwitchMapping.class); + DNSToSwitchMapping newInstance = ReflectionUtils.newInstance( + dnsToSwitchMappingClass, conf); + this.dnsToSwitchMapping = + ((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance + : new CachedDNSToSwitchMapping(newInstance)); + this.useHostname = conf.getBoolean( + DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, + DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); } private void registerMXBean() { @@ -228,7 +251,19 @@ public RegisteredCommand register( datanodeDetails.setIpAddress(dnAddress.getHostAddress()); } try { + String location; + if (useHostname) { + datanodeDetails.setNetworkName(datanodeDetails.getHostName()); + location = nodeResolve(datanodeDetails.getHostName()); + } else { + datanodeDetails.setNetworkName(datanodeDetails.getIpAddress()); + location = nodeResolve(datanodeDetails.getIpAddress()); + } + if (location != null) { + datanodeDetails.setNetworkLocation(location); + } nodeStateManager.addNode(datanodeDetails); + clusterMap.add(datanodeDetails); // Updating Node Report, as registration is successful processNodeReport(datanodeDetails, nodeReport); LOG.info("Registered Data node : {}", datanodeDetails); @@ -236,6 +271,7 @@ public RegisteredCommand register( LOG.trace("Datanode is already registered. Datanode: {}", datanodeDetails.toString()); } + return RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success) .setDatanodeUUID(datanodeDetails.getUuidString()) .setClusterID(this.clusterID) @@ -515,5 +551,36 @@ public List getCommandQueue(UUID dnID) { return commandQueue.getCommand(dnID); } + /** + * Given datanode address or host name, returns the DatanodeDetails for the + * node. + * + * @param address node host address + * @return the given datanode, or null if not found + */ + @Override + public DatanodeDetails getNode(String address) { + Node node = null; + String location = nodeResolve(address); + if (location != null) { + node = clusterMap.getNode(location + NetConstants.PATH_SEPARATOR_STR + + address); + } + return node == null ? null : (DatanodeDetails)node; + } + private String nodeResolve(String hostname) { + List hosts = new ArrayList<>(1); + hosts.add(hostname); + List resolvedHosts = dnsToSwitchMapping.resolve(hosts); + if (resolvedHosts != null && !resolvedHosts.isEmpty()) { + String location = resolvedHosts.get(0); + LOG.debug("Resolve datanode {} return location {}", hostname, location); + return location; + } else { + LOG.error("Node {} Resolution failed. Please make sure that DNS table " + + "mapping or configured mapping is functional.", hostname); + return null; + } + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index f13dc4e5b6164..ca60a5dd115f6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -372,6 +372,8 @@ public StorageContainerManager(OzoneConfiguration conf, private void initializeSystemManagers(OzoneConfiguration conf, SCMConfigurator configurator) throws IOException { + clusterMap = new NetworkTopologyImpl(conf); + if(configurator.getScmNodeManager() != null) { scmNodeManager = configurator.getScmNodeManager(); } else { @@ -379,7 +381,6 @@ private void initializeSystemManagers(OzoneConfiguration conf, conf, scmStorageConfig.getClusterID(), this, eventQueue); } - clusterMap = new NetworkTopologyImpl(conf); ContainerPlacementPolicy containerPlacementPolicy = ContainerPlacementPolicyFactory.getPolicy(conf, scmNodeManager, clusterMap, true); @@ -1067,4 +1068,12 @@ public Map getContainerStateCount() { public SCMMetadataStore getScmMetadataStore() { return scmMetadataStore; } + + /** + * Returns the SCM network topology cluster. + * @return NetworkTopology + */ + public NetworkTopology getClusterMap() { + return this.clusterMap; + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java index b1dd77e5545f0..0b7437e0288b6 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java @@ -151,7 +151,7 @@ public static DatanodeDetails getDatanodeDetails( * * @return DatanodeDetails */ - private static DatanodeDetails createDatanodeDetails(String uuid, + public static DatanodeDetails createDatanodeDetails(String uuid, String hostname, String ipAddress, String networkLocation) { DatanodeDetails.Port containerPort = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, 0); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index c10bc44068ffd..19fb3a76c6e8f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -451,6 +451,11 @@ public List getCommandQueue(UUID dnID) { return null; } + @Override + public DatanodeDetails getNode(String address) { + return null; + } + /** * A class to declare some values for the nodes so that our tests * won't fail. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 893f62da1f108..60fc2045b022c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -31,6 +32,7 @@ .StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; @@ -56,6 +58,10 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.SECONDS; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic + .NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic + .NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_DEADNODE_INTERVAL; @@ -945,4 +951,110 @@ public void testHandlingSCMCommandEvent() } } + /** + * Test add node into network topology during node register. Datanode + * uses Ip address to resolve network location. + */ + @Test + public void testScmRegisterNodeWithIpAddress() + throws IOException, InterruptedException, AuthenticationException { + testScmRegisterNodeWithNetworkTopology(false); + } + + /** + * Test add node into network topology during node register. Datanode + * uses hostname to resolve network location. + */ + @Test + public void testScmRegisterNodeWithHostname() + throws IOException, InterruptedException, AuthenticationException { + testScmRegisterNodeWithNetworkTopology(true); + } + + /** + * Test add node into a 4-layer network topology during node register. + */ + @Test + public void testScmRegisterNodeWith4LayerNetworkTopology() + throws IOException, InterruptedException, AuthenticationException { + OzoneConfiguration conf = getConf(); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, + MILLISECONDS); + + // create table mapping file + String[] hostNames = {"host1", "host2", "host3", "host4"}; + String[] ipAddress = {"1.2.3.4", "2.3.4.5", "3.4.5.6", "4.5.6.7"}; + String mapFile = this.getClass().getClassLoader() + .getResource("nodegroup-mapping").getPath(); + + // create and register nodes + conf.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + "org.apache.hadoop.net.TableMapping"); + conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile); + conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, + "network-topology-nodegroup.xml"); + final int nodeCount = hostNames.length; + // use default IP address to resolve node + try (SCMNodeManager nodeManager = createNodeManager(conf)) { + DatanodeDetails[] nodes = new DatanodeDetails[nodeCount]; + for (int i = 0; i < nodeCount; i++) { + DatanodeDetails node = TestUtils.createDatanodeDetails( + UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null); + nodeManager.register(node, null, null); + nodes[i] = node; + } + + // verify network topology cluster has all the registered nodes + Thread.sleep(4 * 1000); + NetworkTopology clusterMap = scm.getClusterMap(); + assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY)); + assertEquals(nodeCount, clusterMap.getNumOfLeafNode("")); + assertEquals(4, clusterMap.getMaxLevel()); + List nodeList = nodeManager.getAllNodes(); + nodeList.stream().forEach(node -> + Assert.assertTrue(node.getNetworkLocation().startsWith("/rack1/ng"))); + } + } + + private void testScmRegisterNodeWithNetworkTopology(boolean useHostname) + throws IOException, InterruptedException, AuthenticationException { + OzoneConfiguration conf = getConf(); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, + MILLISECONDS); + + // create table mapping file + String[] hostNames = {"host1", "host2", "host3", "host4"}; + String[] ipAddress = {"1.2.3.4", "2.3.4.5", "3.4.5.6", "4.5.6.7"}; + String mapFile = this.getClass().getClassLoader() + .getResource("rack-mapping").getPath(); + + // create and register nodes + conf.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + "org.apache.hadoop.net.TableMapping"); + conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile); + if (useHostname) { + conf.set(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, "true"); + } + final int nodeCount = hostNames.length; + // use default IP address to resolve node + try (SCMNodeManager nodeManager = createNodeManager(conf)) { + DatanodeDetails[] nodes = new DatanodeDetails[nodeCount]; + for (int i = 0; i < nodeCount; i++) { + DatanodeDetails node = TestUtils.createDatanodeDetails( + UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null); + nodeManager.register(node, null, null); + nodes[i] = node; + } + + // verify network topology cluster has all the registered nodes + Thread.sleep(4 * 1000); + NetworkTopology clusterMap = scm.getClusterMap(); + assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY)); + assertEquals(nodeCount, clusterMap.getNumOfLeafNode("")); + assertEquals(3, clusterMap.getMaxLevel()); + List nodeList = nodeManager.getAllNodes(); + nodeList.stream().forEach(node -> + Assert.assertTrue(node.getNetworkLocation().equals("/rack1"))); + } + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index 35cc1aa901b2a..bc26e3c397a1c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -309,4 +309,9 @@ public void onMessage(CommandForDatanode commandForDatanode, public List getCommandQueue(UUID dnID) { return null; } + + @Override + public DatanodeDetails getNode(String address) { + return null; + } } diff --git a/hadoop-hdds/server-scm/src/test/resources/nodegroup-mapping b/hadoop-hdds/server-scm/src/test/resources/nodegroup-mapping new file mode 100644 index 0000000000000..01f7d5db2d4da --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/resources/nodegroup-mapping @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +host1 /rack1/ng1 +host2 /rack1/ng1 +host3 /rack1/ng2 +host4 /rack1/ng2 +1.2.3.4 /rack1/ng1 +2.3.4.5 /rack1/ng1 +3.4.5.6 /rack1/ng2 +4.5.6.7 /rack1/ng2 \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/resources/rack-mapping b/hadoop-hdds/server-scm/src/test/resources/rack-mapping new file mode 100644 index 0000000000000..47eac9731e78d --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/resources/rack-mapping @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +host1 /rack1 +host2 /rack1 +host3 /rack1 +host4 /rack1 +1.2.3.4 /rack1 +2.3.4.5 /rack1 +3.4.5.6 /rack1 +4.5.6.7 /rack1 \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone-net-topology/.env b/hadoop-ozone/dist/src/main/compose/ozone-net-topology/.env new file mode 100644 index 0000000000000..f1e5de4ad8ae3 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-net-topology/.env @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HDDS_VERSION=0.5.0-SNAPSHOT diff --git a/hadoop-ozone/dist/src/main/compose/ozone-net-topology/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-net-topology/docker-compose.yaml new file mode 100644 index 0000000000000..aa46df962bed7 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-net-topology/docker-compose.yaml @@ -0,0 +1,110 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3" +services: + datanode_1: + image: apache/hadoop-runner:jdk11 + privileged: true #required by the profiler + volumes: + - ../..:/opt/hadoop + ports: + - 9864 + - 9882 + command: ["/opt/hadoop/bin/ozone","datanode"] + env_file: + - ./docker-config + networks: + service_network: + ipv4_address: 10.5.0.4 + datanode_2: + image: apache/hadoop-runner:jdk11 + privileged: true #required by the profiler + volumes: + - ../..:/opt/hadoop + ports: + - 9864 + - 9882 + command: ["/opt/hadoop/bin/ozone","datanode"] + env_file: + - ./docker-config + networks: + service_network: + ipv4_address: 10.5.0.5 + datanode_3: + image: apache/hadoop-runner:jdk11 + privileged: true #required by the profiler + volumes: + - ../..:/opt/hadoop + ports: + - 9864 + - 9882 + command: ["/opt/hadoop/bin/ozone","datanode"] + env_file: + - ./docker-config + networks: + service_network: + ipv4_address: 10.5.0.6 + datanode_4: + image: apache/hadoop-runner:jdk11 + privileged: true #required by the profiler + volumes: + - ../..:/opt/hadoop + ports: + - 9864 + - 9882 + command: ["/opt/hadoop/bin/ozone","datanode"] + env_file: + - ./docker-config + networks: + service_network: + ipv4_address: 10.5.0.7 + om: + image: apache/hadoop-runner:jdk11 + privileged: true #required by the profiler + volumes: + - ../..:/opt/hadoop + ports: + - 9874:9874 + environment: + ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + env_file: + - ./docker-config + command: ["/opt/hadoop/bin/ozone","om"] + networks: + service_network: + ipv4_address: 10.5.0.70 + scm: + image: apache/hadoop-runner:jdk11 + privileged: true #required by the profiler + volumes: + - ../..:/opt/hadoop + ports: + - 9876:9876 + env_file: + - ./docker-config + environment: + ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + command: ["/opt/hadoop/bin/ozone","scm"] + networks: + service_network: + ipv4_address: 10.5.0.71 +networks: + service_network: + driver: bridge + ipam: + config: + - subnet: 10.5.0.0/16 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-net-topology/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-net-topology/docker-config new file mode 100644 index 0000000000000..ea98240ef909c --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-net-topology/docker-config @@ -0,0 +1,88 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +OZONE-SITE.XML_ozone.om.address=om +OZONE-SITE.XML_ozone.om.http-address=om:9874 +OZONE-SITE.XML_ozone.scm.names=scm +OZONE-SITE.XML_ozone.enabled=True +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.block.client.address=scm +OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata +OZONE-SITE.XML_ozone.handler.type=distributed +OZONE-SITE.XML_ozone.scm.client.address=scm +OZONE-SITE.XML_ozone.replication=1 +OZONE-SITE.XML_hdds.datanode.dir=/data/hdds +OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true +HDFS-SITE.XML_rpc.metrics.quantile.enable=true +HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 +HDFS-SITE.XML_net.topology.node.switch.mapping.impl=org.apache.hadoop.net.TableMapping +HDFS-SITE.XML_net.topology.table.file.name=/opt/hadoop/compose/ozone-net-topology/network-config +ASYNC_PROFILER_HOME=/opt/profiler +LOG4J.PROPERTIES_log4j.rootLogger=DEBUG, ARF +LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender +LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR +LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN +LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR +LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN +LOG4J.PROPERTIES_log4j.appender.ARF=org.apache.log4j.RollingFileAppender +LOG4J.PROPERTIES_log4j.appender.ARF.layout=org.apache.log4j.PatternLayout +LOG4J.PROPERTIES_log4j.appender.ARF.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +LOG4J.PROPERTIES_log4j.appender.ARF.file=/opt/hadoop/logs/${module.name}-${user.name}.log +HDDS_DN_OPTS=-Dmodule.name=datanode +HDFS_OM_OPTS=-Dmodule.name=om +HDFS_STORAGECONTAINERMANAGER_OPTS=-Dmodule.name=scm + +#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. +#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm + +#LOG4J2.PROPERTIES_* are for Ozone Audit Logging +LOG4J2.PROPERTIES_monitorInterval=30 +LOG4J2.PROPERTIES_filter=read,write +LOG4J2.PROPERTIES_filter.read.type=MarkerFilter +LOG4J2.PROPERTIES_filter.read.marker=READ +LOG4J2.PROPERTIES_filter.read.onMatch=DENY +LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL +LOG4J2.PROPERTIES_filter.write.type=MarkerFilter +LOG4J2.PROPERTIES_filter.write.marker=WRITE +LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL +LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL +LOG4J2.PROPERTIES_appenders=console, rolling +LOG4J2.PROPERTIES_appender.console.type=Console +LOG4J2.PROPERTIES_appender.console.name=STDOUT +LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout +LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n +LOG4J2.PROPERTIES_appender.rolling.type=RollingFile +LOG4J2.PROPERTIES_appender.rolling.name=RollingFile +LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log +LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz +LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout +LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n +LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies +LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy +LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 +LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy +LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB +LOG4J2.PROPERTIES_loggers=audit +LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger +LOG4J2.PROPERTIES_logger.audit.name=OMAudit +LOG4J2.PROPERTIES_logger.audit.level=INFO +LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling +LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile +LOG4J2.PROPERTIES_rootLogger.level=INFO +LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout +LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/compose/ozone-net-topology/network-config b/hadoop-ozone/dist/src/main/compose/ozone-net-topology/network-config new file mode 100644 index 0000000000000..5c6af824a1944 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-net-topology/network-config @@ -0,0 +1,22 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +10.5.0.4 /rack1 +10.5.0.5 /rack1 +10.5.0.6 /rack1 +10.5.0.7 /rack2 +10.5.0.8 /rack2 +10.5.0.9 /rack2 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-net-topology/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-net-topology/test.sh new file mode 100755 index 0000000000000..f36fb48dfbdd6 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-net-topology/test.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +start_docker_env + +#Due to the limitation of the current auditparser test, it should be the +#first test in a clean cluster. + +execute_robot_test om auditparser + +execute_robot_test scm basic/basic.robot + +stop_docker_env + +generate_report From 205dd2d8e1db46a8d4e1711e7b74e4e5fe162686 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Wed, 12 Jun 2019 18:03:33 -0400 Subject: [PATCH 0178/1308] HADOOP-16367. Fixed MiniYarnCluster AuthenticationFilter initialization. Contributed by Prabhu Joseph --- .../hadoop/yarn/server/MiniYARNCluster.java | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java index fa69f186d9786..19c4eb4e1ab2e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java @@ -25,9 +25,13 @@ import java.net.UnknownHostException; import java.util.Collection; import java.util.Map; +import java.util.Set; +import java.util.LinkedHashSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import org.apache.commons.lang3.StringUtils; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -90,6 +94,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRegistrationEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptUnregistrationEvent; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; +import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer; import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore; import org.apache.hadoop.yarn.server.timeline.TimelineStore; import org.apache.hadoop.yarn.server.timeline.recovery.MemoryTimelineStateStore; @@ -824,6 +829,26 @@ protected synchronized void serviceInit(Configuration conf) @Override protected synchronized void serviceStart() throws Exception { + + // Removing RMAuthenticationFilter as it conflitcs with + // TimelineAuthenticationFilter + Configuration conf = getConfig(); + String filterInitializerConfKey = "hadoop.http.filter.initializers"; + String initializers = conf.get(filterInitializerConfKey, ""); + String[] parts = initializers.split(","); + Set target = new LinkedHashSet(); + for (String filterInitializer : parts) { + filterInitializer = filterInitializer.trim(); + if (filterInitializer.equals( + RMAuthenticationFilterInitializer.class.getName()) + || filterInitializer.isEmpty()) { + continue; + } + target.add(filterInitializer); + } + initializers = StringUtils.join(target, ","); + conf.set(filterInitializerConfKey, initializers); + appHistoryServer.start(); if (appHistoryServer.getServiceState() != STATE.STARTED) { // AHS could have failed. From a43f4440f77ab7a8ad7adcb67c7ab6222458a692 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Wed, 12 Jun 2019 16:25:31 -0700 Subject: [PATCH 0179/1308] =?UTF-8?q?HDDS-1543.=20Implement=20addAcl,remov?= =?UTF-8?q?eAcl,setAcl,getAcl=20for=20Prefix.=20Contr=E2=80=A6=20(#927)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../hadoop/ozone/om/OzoneManagerLock.java | 25 ++ .../ozone/om/exceptions/OMException.java | 5 +- .../hadoop/ozone/security/acl/OzoneObj.java | 16 +- .../ozone/security/acl/OzoneObjInfo.java | 65 +++- .../apache/hadoop/ozone/util/RadixTree.java | 12 +- .../src/main/proto/OzoneManagerProtocol.proto | 19 +- .../hadoop/ozone/util/TestRadixTree.java | 2 - .../rpc/TestOzoneRpcClientAbstract.java | 64 +++- .../apache/hadoop/ozone/om/KeyManager.java | 41 --- .../apache/hadoop/ozone/om/OzoneManager.java | 14 +- .../apache/hadoop/ozone/om/PrefixManager.java | 45 +++ .../hadoop/ozone/om/PrefixManagerImpl.java | 316 ++++++++++++++++++ .../hadoop/ozone/om/fs/OzoneManagerFS.java | 3 +- .../hadoop/ozone/om/TestKeyManagerImpl.java | 237 ++++++++++++- 14 files changed, 786 insertions(+), 78 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManager.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerLock.java index 0e36898c3412b..c569c09823d19 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerLock.java @@ -46,6 +46,9 @@ * * 2 Bucket Lock * + * + * 3 Prefix Lock + * * * * One cannot obtain a lower weight lock while holding a lock with higher @@ -66,6 +69,7 @@ public final class OzoneManagerLock { private static final String VOLUME_LOCK = "volumeLock"; private static final String BUCKET_LOCK = "bucketLock"; + private static final String PREFIX_LOCK = "prefixLock"; private static final String S3_BUCKET_LOCK = "s3BucketLock"; private static final String S3_SECRET_LOCK = "s3SecretetLock"; @@ -77,6 +81,7 @@ public final class OzoneManagerLock { () -> ImmutableMap.of( VOLUME_LOCK, new AtomicInteger(0), BUCKET_LOCK, new AtomicInteger(0), + PREFIX_LOCK, new AtomicInteger(0), S3_BUCKET_LOCK, new AtomicInteger(0), S3_SECRET_LOCK, new AtomicInteger(0) ) @@ -241,4 +246,24 @@ public void releaseS3SecretLock(String awsAccessId) { manager.unlock(awsAccessId); myLocks.get().get(S3_SECRET_LOCK).decrementAndGet(); } + + public void acquirePrefixLock(String prefixPath) { + if (hasAnyPrefixLock()) { + throw new RuntimeException( + "Thread '" + Thread.currentThread().getName() + + "' cannot acquire prefix path lock while holding prefix " + + "path lock(s) for path: " + prefixPath + "."); + } + manager.lock(prefixPath); + myLocks.get().get(PREFIX_LOCK).incrementAndGet(); + } + + private boolean hasAnyPrefixLock() { + return myLocks.get().get(PREFIX_LOCK).get() != 0; + } + + public void releasePrefixLock(String prefixPath) { + manager.unlock(prefixPath); + myLocks.get().get(PREFIX_LOCK).decrementAndGet(); + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java index d3925f332947a..2ee88d8b0957b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java @@ -199,6 +199,9 @@ public enum ResultCodes { PERMISSION_DENIED, // Error codes used during acl validation - TIMEOUT // Error codes used during acl validation + TIMEOUT, // Error codes used during acl validation + + PREFIX_NOT_FOUND, + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java index 74d0aa5f7fa1d..6e9ac25aa7f66 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java @@ -71,6 +71,19 @@ public StoreType getStoreType() { public abstract String getKeyName(); + /** + * Get PrefixName. + * A prefix name is like a key name under the bucket but + * are mainly used for ACL for now and persisted into a separate prefix table. + * + * @return prefix name. + */ + public abstract String getPrefixName(); + + /** + * Get full path of a key or prefix including volume and bucket. + * @return full path of a key or prefix. + */ public abstract String getPath(); /** @@ -79,7 +92,8 @@ public StoreType getStoreType() { public enum ResourceType { VOLUME(OzoneConsts.VOLUME), BUCKET(OzoneConsts.BUCKET), - KEY(OzoneConsts.KEY); + KEY(OzoneConsts.KEY), + PREFIX(OzoneConsts.PREFIX); /** * String value for this Enum. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java index 537134a539814..a45a156effd67 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java @@ -23,32 +23,51 @@ /** * Class representing an ozone object. + * It can be a volume with non-null volumeName (bucketName=null & name=null) + * or a bucket with non-null volumeName and bucketName (name=null) + * or a key with non-null volumeName, bucketName and key name + * (via getKeyName) + * or a prefix with non-null volumeName, bucketName and prefix name + * (via getPrefixName) */ public final class OzoneObjInfo extends OzoneObj { private final String volumeName; private final String bucketName; - private final String keyName; - + private final String name; + /** + * + * @param resType + * @param storeType + * @param volumeName + * @param bucketName + * @param name - keyName/PrefixName + */ private OzoneObjInfo(ResourceType resType, StoreType storeType, - String volumeName, String bucketName, String keyName) { + String volumeName, String bucketName, String name) { super(resType, storeType); this.volumeName = volumeName; this.bucketName = bucketName; - this.keyName = keyName; + this.name = name; } @Override public String getPath() { switch (getResourceType()) { case VOLUME: - return getVolumeName(); + return OZONE_URI_DELIMITER + getVolumeName(); case BUCKET: - return getVolumeName() + OZONE_URI_DELIMITER + getBucketName(); + return OZONE_URI_DELIMITER + getVolumeName() + + OZONE_URI_DELIMITER + getBucketName(); case KEY: - return getVolumeName() + OZONE_URI_DELIMITER + getBucketName() + return OZONE_URI_DELIMITER + getVolumeName() + + OZONE_URI_DELIMITER + getBucketName() + OZONE_URI_DELIMITER + getKeyName(); + case PREFIX: + return OZONE_URI_DELIMITER + getVolumeName() + + OZONE_URI_DELIMITER + getBucketName() + + OZONE_URI_DELIMITER + getPrefixName(); default: throw new IllegalArgumentException("Unknown resource " + "type" + getResourceType()); @@ -67,9 +86,15 @@ public String getBucketName() { @Override public String getKeyName() { - return keyName; + return name; } + @Override + public String getPrefixName() { + return name; + } + + public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj proto) { Builder builder = new Builder() @@ -88,7 +113,7 @@ public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj case BUCKET: if (tokens.length < 2) { throw new IllegalArgumentException("Unexpected argument for " + - "Ozone key. Path:" + proto.getPath()); + "Ozone bucket. Path:" + proto.getPath()); } builder.setVolumeName(tokens[0]); builder.setBucketName(tokens[1]); @@ -102,6 +127,15 @@ public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj builder.setBucketName(tokens[1]); builder.setKeyName(tokens[2]); break; + case PREFIX: + if (tokens.length < 3) { + throw new IllegalArgumentException("Unexpected argument for " + + "Ozone Prefix. Path:" + proto.getPath()); + } + builder.setVolumeName(tokens[0]); + builder.setBucketName(tokens[1]); + builder.setPrefixName(tokens[2]); + break; default: throw new IllegalArgumentException("Unexpected type for " + "Ozone key. Type:" + proto.getResType()); @@ -118,7 +152,7 @@ public static class Builder { private OzoneObj.StoreType storeType; private String volumeName; private String bucketName; - private String keyName; + private String name; public static Builder newBuilder() { return new Builder(); @@ -145,14 +179,17 @@ public Builder setBucketName(String bucket) { } public Builder setKeyName(String key) { - this.keyName = key; + this.name = key; + return this; + } + + public Builder setPrefixName(String prefix) { + this.name = prefix; return this; } public OzoneObjInfo build() { - return new OzoneObjInfo(resType, storeType, volumeName, bucketName, - keyName); + return new OzoneObjInfo(resType, storeType, volumeName, bucketName, name); } } - } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java index 72e9ab3f5e776..597f58db3fd6e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java @@ -202,9 +202,15 @@ public String getLongestPrefix(String path) { break; } } - return level >= 1 ? - Paths.get(root.getName()).resolve(p.subpath(0, level)).toString() : - root.getName(); + + if (level >= 1) { + Path longestMatch = + Paths.get(root.getName()).resolve(p.subpath(0, level)); + String ret = longestMatch.toString(); + return path.endsWith("/") ? ret + "/" : ret; + } else { + return root.getName(); + } } // root of a radix tree has a name of "/" and may optionally has it value. diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index 21cacf6ebe552..2c4766a89524f 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -276,6 +276,7 @@ enum Status { NOT_A_FILE = 47; PERMISSION_DENIED = 48; TIMEOUT = 49; + PREFIX_NOT_FOUND=50; } @@ -507,15 +508,15 @@ message OzoneAclInfo { } enum OzoneAclRights { - READ = 1; - WRITE = 2; - CREATE = 3; - LIST = 4; - DELETE = 5; - READ_ACL = 6; - WRITE_ACL = 7; - ALL = 8; - NONE = 9; + READ = 1; + WRITE = 2; + CREATE = 3; + LIST = 4; + DELETE = 5; + READ_ACL = 6; + WRITE_ACL = 7; + ALL = 8; + NONE = 9; } required OzoneAclType type = 1; required string name = 2; diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java index ceed5346f8134..57b02681deb03 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java @@ -84,7 +84,6 @@ public void testGetLongestPrefixPath() { assertEquals("g", lpn.getName()); lpn.setValue(100); - List> lpq = ROOT.getLongestPrefixPath("/a/b/c/d/g/q"); RadixNode lqn = lpp.get(lpq.size()-1); @@ -93,7 +92,6 @@ public void testGetLongestPrefixPath() { assertEquals("g", lqn.getName()); assertEquals(100, (int)lqn.getValue()); - assertEquals("/a/", RadixTree.radixPathToString( ROOT.getLongestPrefixPath("/a/g"))); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index b0f7888c3be56..d24b3da5d2b52 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.BitSet; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -2200,6 +2201,66 @@ public void testNativeAclsForKey() throws Exception { validateOzoneAcl(ozObj); } + @Test + public void testNativeAclsForPrefix() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + String prefix1 = "PF" + UUID.randomUUID().toString() + "/"; + String key1 = prefix1 + "KEY" + UUID.randomUUID().toString(); + + String prefix2 = "PF" + UUID.randomUUID().toString() + "/"; + String key2 = prefix2 + "KEY" + UUID.randomUUID().toString(); + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + assertNotNull("Bucket creation failed", bucket); + + writeKey(key1, bucket); + writeKey(key2, bucket); + + OzoneObj ozObj = new OzoneObjInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setPrefixName(prefix1) + .setResType(OzoneObj.ResourceType.PREFIX) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); + + // add acl + BitSet aclRights1 = new BitSet(); + aclRights1.set(ACLType.READ.ordinal()); + OzoneAcl user1Acl = new OzoneAcl(ACLIdentityType.USER, + "user1", aclRights1); + assertTrue(store.addAcl(ozObj, user1Acl)); + + // get acl + List aclsGet = store.getAcl(ozObj); + Assert.assertEquals(1, aclsGet.size()); + Assert.assertEquals(user1Acl, aclsGet.get(0)); + + // remove acl + Assert.assertTrue(store.removeAcl(ozObj, user1Acl)); + aclsGet = store.getAcl(ozObj); + Assert.assertEquals(0, aclsGet.size()); + + // set acl + BitSet aclRights2 = new BitSet(); + aclRights2.set(ACLType.ALL.ordinal()); + OzoneAcl group1Acl = new OzoneAcl(ACLIdentityType.GROUP, + "group1", aclRights2); + List acls = new ArrayList<>(); + acls.add(user1Acl); + acls.add(group1Acl); + Assert.assertTrue(store.setAcl(ozObj, acls)); + + // get acl + aclsGet = store.getAcl(ozObj); + Assert.assertEquals(2, aclsGet.size()); + } + /** * Helper function to get default acl list for current user. * @@ -2218,8 +2279,7 @@ private List getAclList(OzoneConfiguration conf) listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, ugi.getUserName(), userRights)); //Group ACLs of the User - List userGroups = Arrays.asList(UserGroupInformation - .createRemoteUser(ugi.getUserName()).getGroupNames()); + List userGroups = Arrays.asList(ugi.getGroupNames()); userGroups.stream().forEach((group) -> listOfAcls.add( new OzoneAcl(ACLIdentityType.GROUP, group, groupRights))); return listOfAcls; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index 51c0cfae631b3..1259f715d6698 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -18,7 +18,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -36,7 +35,6 @@ .KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .KeyLocation; -import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.utils.BackgroundService; import java.io.IOException; @@ -286,43 +284,4 @@ OmMultipartUploadCompleteInfo completeMultipartUpload(OmKeyArgs omKeyArgs, OmMultipartUploadListParts listParts(String volumeName, String bucketName, String keyName, String uploadID, int partNumberMarker, int maxParts) throws IOException; - - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * - * @throws IOException if there is error. - * */ - boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException; - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * - * @throws IOException if there is error. - * */ - boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException; - - /** - * Acls to be set for given Ozone object. This operations reset ACL for - * given object to list of ACLs provided in argument. - * @param obj Ozone object. - * @param acls List of acls. - * - * @throws IOException if there is error. - * */ - boolean setAcl(OzoneObj obj, List acls) throws IOException; - - /** - * Returns list of ACLs for given Ozone object. - * @param obj Ozone object. - * - * @throws IOException if there is error. - * */ - List getAcl(OzoneObj obj) throws IOException; - } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 598525489e121..9ce581b5865fe 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -237,6 +237,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private final VolumeManager volumeManager; private final BucketManager bucketManager; private final KeyManager keyManager; + private final PrefixManagerImpl prefixManager; private final OMMetrics metrics; private OzoneManagerHttpServer httpServer; private final OMStorage omStorage; @@ -365,6 +366,8 @@ private OzoneManager(OzoneConfiguration conf) throws IOException, new ScmClient(scmBlockClient, scmContainerClient), metadataManager, configuration, omStorage.getOmId(), blockTokenMgr, getKmsProvider()); + prefixManager = new PrefixManagerImpl(metadataManager); + shutdownHook = () -> { saveOmMetrics(); }; @@ -3033,6 +3036,8 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { return bucketManager.addAcl(obj, acl); case KEY: return keyManager.addAcl(obj, acl); + case PREFIX: + return prefixManager.addAcl(obj, acl); default: throw new OMException("Unexpected resource type: " + obj.getResourceType(), INVALID_REQUEST); @@ -3057,11 +3062,13 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { switch (obj.getResourceType()) { case VOLUME: return volumeManager.removeAcl(obj, acl); - case BUCKET: return bucketManager.removeAcl(obj, acl); case KEY: return keyManager.removeAcl(obj, acl); + case PREFIX: + return prefixManager.removeAcl(obj, acl); + default: throw new OMException("Unexpected resource type: " + obj.getResourceType(), INVALID_REQUEST); @@ -3090,6 +3097,8 @@ public boolean setAcl(OzoneObj obj, List acls) throws IOException { return bucketManager.setAcl(obj, acls); case KEY: return keyManager.setAcl(obj, acls); + case PREFIX: + return prefixManager.setAcl(obj, acls); default: throw new OMException("Unexpected resource type: " + obj.getResourceType(), INVALID_REQUEST); @@ -3116,6 +3125,9 @@ public List getAcl(OzoneObj obj) throws IOException { return bucketManager.getAcl(obj); case KEY: return keyManager.getAcl(obj); + case PREFIX: + return prefixManager.getAcl(obj); + default: throw new OMException("Unexpected resource type: " + obj.getResourceType(), INVALID_REQUEST); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManager.java new file mode 100644 index 0000000000000..a505b8d7fc062 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManager.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; + +import java.util.List; + +/** + * Handles prefix commands. + * //TODO: support OzoneManagerFS for ozfs optimization using prefix tree. + */ +public interface PrefixManager extends IOzoneAcl { + + /** + * Returns the metadataManager. + * @return OMMetadataManager. + */ + OMMetadataManager getMetadataManager(); + + /** + * Get the list of path components that match with obj's path. + * longest prefix. + * Note: the number of the entries include a root "/" + * so if you have a longtest prefix path /a/b/c/ + * the returned list will be ["/", "a", "b", "c"] + * @param path ozone object path + * @return list of longest path components that matches obj's path. + */ + List getLongestPrefixPath(String path); +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java new file mode 100644 index 0000000000000..b9aff890982d4 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java @@ -0,0 +1,316 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om; + +import com.google.common.base.Strings; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.util.RadixNode; +import org.apache.hadoop.ozone.util.RadixTree; +import org.apache.hadoop.utils.db.*; +import org.apache.hadoop.utils.db.Table.KeyValue; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PREFIX_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.PREFIX; + +/** + * Implementation of PreManager. + */ +public class PrefixManagerImpl implements PrefixManager { + private static final Logger LOG = + LoggerFactory.getLogger(PrefixManagerImpl.class); + + private static final List EMPTY_ACL_LIST = new ArrayList<>(); + private final OMMetadataManager metadataManager; + + // In-memory prefix tree to optimize ACL evaluation + private RadixTree prefixTree; + + public PrefixManagerImpl(OMMetadataManager metadataManager) { + this.metadataManager = metadataManager; + loadPrefixTree(); + } + + private void loadPrefixTree() { + prefixTree = new RadixTree<>(); + try (TableIterator> iterator = + getMetadataManager().getPrefixTable().iterator()) { + iterator.seekToFirst(); + while (iterator.hasNext()) { + KeyValue kv = iterator.next(); + prefixTree.insert(kv.getKey(), kv.getValue()); + } + } catch (IOException ex) { + LOG.error("Fail to load prefix tree"); + } + } + + + @Override + public OMMetadataManager getMetadataManager() { + return metadataManager; + } + + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * @throws IOException if there is error. + */ + @Override + public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + validateOzoneObj(obj); + + String prefixPath = obj.getPath(); + metadataManager.getLock().acquirePrefixLock(prefixPath); + try { + OmPrefixInfo prefixInfo = + metadataManager.getPrefixTable().get(prefixPath); + List list = null; + if (prefixInfo != null) { + list = prefixInfo.getAcls(); + } + + if (list == null) { + list = new ArrayList<>(); + list.add(acl); + } else { + boolean found = false; + for (OzoneAcl a: list) { + if (a.getName().equals(acl.getName()) && + a.getType() == acl.getType()) { + found = true; + a.getAclBitSet().or(acl.getAclBitSet()); + break; + } + } + if (!found) { + list.add(acl); + } + } + + OmPrefixInfo.Builder upiBuilder = OmPrefixInfo.newBuilder(); + upiBuilder.setName(prefixPath).setAcls(list); + if (prefixInfo != null && prefixInfo.getMetadata() != null) { + upiBuilder.addAllMetadata(prefixInfo.getMetadata()); + } + prefixInfo = upiBuilder.build(); + // Persist into prefix table first + metadataManager.getPrefixTable().put(prefixPath, prefixInfo); + // update the in-memory prefix tree + prefixTree.insert(prefixPath, prefixInfo); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Add acl operation failed for prefix path:{} acl:{}", + prefixPath, acl, ex); + } + throw ex; + } finally { + metadataManager.getLock().releasePrefixLock(prefixPath); + } + return true; + } + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * @throws IOException if there is error. + */ + @Override + public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + validateOzoneObj(obj); + String prefixPath = obj.getPath(); + metadataManager.getLock().acquirePrefixLock(prefixPath); + try { + OmPrefixInfo prefixInfo = + metadataManager.getPrefixTable().get(prefixPath); + List list = null; + if (prefixInfo != null) { + list = prefixInfo.getAcls(); + } + + if (list == null) { + LOG.debug("acl {} does not exist for prefix path {}", acl, prefixPath); + return false; + } + + boolean found = false; + for (OzoneAcl a: list) { + if (a.getName().equals(acl.getName()) + && a.getType() == acl.getType()) { + found = true; + a.getAclBitSet().andNot(acl.getAclBitSet()); + if (a.getAclBitSet().isEmpty()) { + list.remove(a); + } + break; + } + } + if (!found) { + LOG.debug("acl {} does not exist for prefix path {}", acl, prefixPath); + return false; + } + + if (!list.isEmpty()) { + OmPrefixInfo.Builder upiBuilder = OmPrefixInfo.newBuilder(); + upiBuilder.setName(prefixPath).setAcls(list); + if (prefixInfo != null && prefixInfo.getMetadata() != null) { + upiBuilder.addAllMetadata(prefixInfo.getMetadata()); + } + prefixInfo = upiBuilder.build(); + metadataManager.getPrefixTable().put(prefixPath, prefixInfo); + prefixTree.insert(prefixPath, prefixInfo); + } else { + // Remove prefix entry in table and prefix tree if the # of acls is 0 + metadataManager.getPrefixTable().delete(prefixPath); + prefixTree.removePrefixPath(prefixPath); + } + + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Remove prefix acl operation failed for prefix path:{}" + + " acl:{}", prefixPath, acl, ex); + } + throw ex; + } finally { + metadataManager.getLock().releasePrefixLock(prefixPath); + } + return true; + } + + /** + * Acls to be set for given Ozone object. This operations reset ACL for given + * object to list of ACLs provided in argument. + * + * @param obj Ozone object. + * @param acls List of acls. + * @throws IOException if there is error. + */ + @Override + public boolean setAcl(OzoneObj obj, List acls) throws IOException { + validateOzoneObj(obj); + String prefixPath = obj.getPath(); + metadataManager.getLock().acquirePrefixLock(prefixPath); + try { + OmPrefixInfo prefixInfo = + metadataManager.getPrefixTable().get(prefixPath); + OmPrefixInfo.Builder upiBuilder = OmPrefixInfo.newBuilder(); + upiBuilder.setName(prefixPath).setAcls(acls); + if (prefixInfo != null && prefixInfo.getMetadata() != null) { + upiBuilder.addAllMetadata(prefixInfo.getMetadata()); + } + prefixInfo = upiBuilder.build(); + prefixTree.insert(prefixPath, prefixInfo); + metadataManager.getPrefixTable().put(prefixPath, prefixInfo); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Set prefix acl operation failed for prefix path:{} acls:{}", + prefixPath, acls, ex); + } + throw ex; + } finally { + metadataManager.getLock().releasePrefixLock(prefixPath); + } + return true; + } + + /** + * Returns list of ACLs for given Ozone object. + * + * @param obj Ozone object. + * @throws IOException if there is error. + */ + @Override + public List getAcl(OzoneObj obj) throws IOException { + validateOzoneObj(obj); + String prefixPath = obj.getPath(); + metadataManager.getLock().acquirePrefixLock(prefixPath); + try { + String longestPrefix = prefixTree.getLongestPrefix(prefixPath); + if (prefixPath.equals(longestPrefix)) { + RadixNode lastNode = + prefixTree.getLastNodeInPrefixPath(prefixPath); + if (lastNode != null && lastNode.getValue() != null) { + return lastNode.getValue().getAcls(); + } + } + } finally { + metadataManager.getLock().releasePrefixLock(prefixPath); + } + return EMPTY_ACL_LIST; + } + + @Override + public List getLongestPrefixPath(String path) { + String prefixPath = prefixTree.getLongestPrefix(path); + metadataManager.getLock().acquirePrefixLock(prefixPath); + try { + return prefixTree.getLongestPrefixPath(prefixPath).stream() + .map(c -> c.getValue()).collect(Collectors.toList()); + } finally { + metadataManager.getLock().releasePrefixLock(prefixPath); + } + } + + /** + * Helper method to validate ozone object. + * @param obj + * */ + private void validateOzoneObj(OzoneObj obj) throws OMException { + Objects.requireNonNull(obj); + + if (!obj.getResourceType().equals(PREFIX)) { + throw new IllegalArgumentException("Unexpected argument passed to " + + "PrefixManager. OzoneObj type:" + obj.getResourceType()); + } + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + String prefixName = obj.getPrefixName(); + + if (Strings.isNullOrEmpty(volume)) { + throw new OMException("Volume name is required.", VOLUME_NOT_FOUND); + } + if (Strings.isNullOrEmpty(bucket)) { + throw new OMException("Bucket name is required.", BUCKET_NOT_FOUND); + } + if (Strings.isNullOrEmpty(prefixName)) { + throw new OMException("Prefix name is required.", PREFIX_NOT_FOUND); + } + if (!prefixName.endsWith("/")) { + throw new OMException("Invalid prefix name: " + prefixName, + PREFIX_NOT_FOUND); + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java index 46ba58dfe763d..bff883dc7fef4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.fs; +import org.apache.hadoop.ozone.om.IOzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; @@ -29,7 +30,7 @@ /** * Ozone Manager FileSystem interface. */ -public interface OzoneManagerFS { +public interface OzoneManagerFS extends IOzoneAcl { OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException; void createDirectory(OmKeyArgs args) throws IOException; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index fb323fe0c63e3..e9e6b2504a85b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -23,6 +23,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.BitSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -47,21 +48,38 @@ import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.server.SCMConfigurator; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.junit.After; +import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; -import org.junit.AfterClass; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; import org.mockito.Mockito; -import static org.apache.hadoop.ozone.OzoneConfigKeys.*; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; /** @@ -69,6 +87,7 @@ */ public class TestKeyManagerImpl { + private static PrefixManager prefixManager; private static KeyManagerImpl keyManager; private static VolumeManagerImpl volumeManager; private static BucketManagerImpl bucketManager; @@ -82,6 +101,9 @@ public class TestKeyManagerImpl { private static final String BUCKET_NAME = "bucket1"; private static final String VOLUME_NAME = "vol1"; + @Rule + public ExpectedException exception = ExpectedException.none(); + @BeforeClass public static void setUp() throws Exception { conf = new OzoneConfiguration(); @@ -105,6 +127,8 @@ public static void setUp() throws Exception { keyManager = new KeyManagerImpl(scm.getBlockProtocolServer(), metadataManager, conf, "om1", null); + prefixManager = new PrefixManagerImpl(metadataManager); + Mockito.when(mockScmBlockLocationProtocol .allocateBlock(Mockito.anyLong(), Mockito.anyInt(), Mockito.any(ReplicationType.class), @@ -323,6 +347,213 @@ public void testOpenFile() throws IOException { } } + + @Test + public void testPrefixAclOps() throws IOException { + String volumeName = "vol1"; + String bucketName = "bucket1"; + String prefix1 = "pf1/"; + + OzoneObj ozPrefix1 = new OzoneObjInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setPrefixName(prefix1) + .setResType(OzoneObj.ResourceType.PREFIX) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); + + OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1", + ACLType.READ); + prefixManager.addAcl(ozPrefix1, ozAcl1); + + List ozAclGet = prefixManager.getAcl(ozPrefix1); + Assert.assertEquals(1, ozAclGet.size()); + Assert.assertEquals(ozAcl1, ozAclGet.get(0)); + + List acls = new ArrayList<>(); + OzoneAcl ozAcl2 = new OzoneAcl(ACLIdentityType.USER, "admin", + ACLType.ALL); + + BitSet rwRights = new BitSet(); + rwRights.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); + rwRights.set(IAccessAuthorizer.ACLType.READ.ordinal()); + OzoneAcl ozAcl3 = new OzoneAcl(ACLIdentityType.GROUP, "dev", + rwRights); + + BitSet wRights = new BitSet(); + wRights.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); + OzoneAcl ozAcl4 = new OzoneAcl(ACLIdentityType.GROUP, "dev", + wRights); + + BitSet rRights = new BitSet(); + rRights.set(IAccessAuthorizer.ACLType.READ.ordinal()); + OzoneAcl ozAcl5 = new OzoneAcl(ACLIdentityType.GROUP, "dev", + rRights); + + acls.add(ozAcl2); + acls.add(ozAcl3); + + prefixManager.setAcl(ozPrefix1, acls); + ozAclGet = prefixManager.getAcl(ozPrefix1); + Assert.assertEquals(2, ozAclGet.size()); + + int matchEntries = 0; + for (OzoneAcl acl : ozAclGet) { + if (acl.getType() == ACLIdentityType.GROUP) { + Assert.assertEquals(ozAcl3, acl); + matchEntries++; + } + if (acl.getType() == ACLIdentityType.USER) { + Assert.assertEquals(ozAcl2, acl); + matchEntries++; + } + } + Assert.assertEquals(2, matchEntries); + + boolean result = prefixManager.removeAcl(ozPrefix1, ozAcl4); + Assert.assertEquals(true, result); + + ozAclGet = prefixManager.getAcl(ozPrefix1); + Assert.assertEquals(2, ozAclGet.size()); + + result = prefixManager.removeAcl(ozPrefix1, ozAcl3); + Assert.assertEquals(true, result); + ozAclGet = prefixManager.getAcl(ozPrefix1); + Assert.assertEquals(1, ozAclGet.size()); + + Assert.assertEquals(ozAcl2, ozAclGet.get(0)); + + // add dev:w + prefixManager.addAcl(ozPrefix1, ozAcl4); + ozAclGet = prefixManager.getAcl(ozPrefix1); + Assert.assertEquals(2, ozAclGet.size()); + + // add dev:r and validate the acl bitset combined + prefixManager.addAcl(ozPrefix1, ozAcl5); + ozAclGet = prefixManager.getAcl(ozPrefix1); + Assert.assertEquals(2, ozAclGet.size()); + + matchEntries = 0; + for (OzoneAcl acl : ozAclGet) { + if (acl.getType() == ACLIdentityType.GROUP) { + Assert.assertEquals(ozAcl3, acl); + matchEntries++; + } + if (acl.getType() == ACLIdentityType.USER) { + Assert.assertEquals(ozAcl2, acl); + matchEntries++; + } + } + Assert.assertEquals(2, matchEntries); + } + + @Test + public void testInvalidPrefixAcl() throws IOException { + String volumeName = "vol1"; + String bucketName = "bucket1"; + String prefix1 = "pf1/"; + + // Invalid prefix not ending with "/" + String invalidPrefix = "invalid/pf"; + OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1", + ACLType.READ); + + OzoneObj ozInvalidPrefix = new OzoneObjInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setPrefixName(invalidPrefix) + .setResType(OzoneObj.ResourceType.PREFIX) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); + + // add acl with invalid prefix name + exception.expect(OMException.class); + exception.expectMessage("Invalid prefix name"); + prefixManager.addAcl(ozInvalidPrefix, ozAcl1); + + OzoneObj ozPrefix1 = new OzoneObjInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setPrefixName(prefix1) + .setResType(OzoneObj.ResourceType.PREFIX) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); + + + List ozAclGet = prefixManager.getAcl(ozPrefix1); + Assert.assertEquals(1, ozAclGet.size()); + Assert.assertEquals(ozAcl1, ozAclGet.get(0)); + + // get acl with invalid prefix name + exception.expect(OMException.class); + exception.expectMessage("Invalid prefix name"); + ozAclGet = prefixManager.getAcl(ozInvalidPrefix); + Assert.assertEquals(null, ozAcl1); + + // set acl with invalid prefix name + List ozoneAcls = new ArrayList(); + ozoneAcls.add(ozAcl1); + exception.expect(OMException.class); + exception.expectMessage("Invalid prefix name"); + prefixManager.setAcl(ozInvalidPrefix, ozoneAcls); + + // remove acl with invalid prefix name + exception.expect(OMException.class); + exception.expectMessage("Invalid prefix name"); + prefixManager.removeAcl(ozInvalidPrefix, ozAcl1); + } + + @Test + public void testLongestPrefixPath() throws IOException { + String volumeName = "vol1"; + String bucketName = "bucket1"; + String prefix1 = "pf1/pf11/pf111/pf1111/"; + String file1 = "pf1/pf11/file1"; + String file2 = "pf1/pf11/pf111/pf1111/file2"; + + OzoneObj ozPrefix1 = new OzoneObjInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setPrefixName(prefix1) + .setResType(OzoneObj.ResourceType.PREFIX) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); + + OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1", + ACLType.READ); + prefixManager.addAcl(ozPrefix1, ozAcl1); + + OzoneObj ozFile1 = new OzoneObjInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(file1) + .setResType(OzoneObj.ResourceType.KEY) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); + + List prefixInfos = + prefixManager.getLongestPrefixPath(ozFile1.getPath()); + Assert.assertEquals(5, prefixInfos.size()); + + OzoneObj ozFile2 = new OzoneObjInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setPrefixName(file2) + .setResType(OzoneObj.ResourceType.KEY) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); + + prefixInfos = + prefixManager.getLongestPrefixPath(ozFile2.getPath()); + Assert.assertEquals(7, prefixInfos.size()); + // Only the last node has acl on it + Assert.assertEquals(ozAcl1, prefixInfos.get(6).getAcls().get(0)); + // All other nodes don't have acl value associate with it + for (int i = 0; i < 6; i++) { + Assert.assertEquals(null, prefixInfos.get(i)); + } + } + @Test public void testLookupFile() throws IOException { String keyName = RandomStringUtils.randomAlphabetic(5); From 88c53d516c0cfebf7558af5901d7e828aed0465e Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Wed, 12 Jun 2019 17:45:42 -0700 Subject: [PATCH 0180/1308] HDDS-1620. Implement Volume Write Requests to use Cache and DoubleBuffer. (#884) --- .../org/apache/hadoop/utils/db/Table.java | 10 + .../apache/hadoop/utils/db/TypedTable.java | 9 +- .../hadoop/utils/db/cache/CacheKey.java | 2 +- .../hadoop/utils/db/cache/CacheValue.java | 2 +- .../utils/db/cache/PartialTableCache.java | 6 + .../hadoop/utils/db/cache/TableCache.java | 9 + .../utils/db/cache/TestPartialTableCache.java | 8 +- .../hadoop/ozone/om/helpers/OmVolumeArgs.java | 23 ++ .../src/main/proto/OzoneManagerProtocol.proto | 2 +- hadoop-ozone/ozone-manager/pom.xml | 2 +- .../ozone/om/OmMetadataManagerImpl.java | 26 +- .../apache/hadoop/ozone/om/OzoneManager.java | 18 +- .../om/ratis/OzoneManagerDoubleBuffer.java | 6 +- .../om/ratis/OzoneManagerStateMachine.java | 53 +--- .../ratis/utils/OzoneManagerRatisUtils.java | 26 +- .../ozone/om/request/OMClientRequest.java | 6 +- .../request/bucket/OMBucketCreateRequest.java | 3 +- .../bucket/OMBucketSetPropertyRequest.java | 23 +- .../request/volume/OMVolumeCreateRequest.java | 196 +++++++++++++ .../request/volume/OMVolumeDeleteRequest.java | 207 ++++++++++++++ .../om/request/volume/OMVolumeRequest.java | 98 +++++++ .../volume/OMVolumeSetOwnerRequest.java | 212 ++++++++++++++ .../volume/OMVolumeSetQuotaRequest.java | 166 +++++++++++ .../ozone/om/request/volume/package-info.java | 22 ++ .../bucket/OMBucketCreateResponse.java | 2 + .../bucket/OMBucketDeleteResponse.java | 3 + .../bucket/OMBucketSetPropertyResponse.java | 3 + .../{ => volume}/OMVolumeCreateResponse.java | 30 +- .../{ => volume}/OMVolumeDeleteResponse.java | 29 +- .../volume/OMVolumeSetOwnerResponse.java | 79 ++++++ .../volume/OMVolumeSetQuotaResponse.java | 55 ++++ .../om/response/volume/package-info.java | 22 ++ .../OzoneManagerHARequestHandlerImpl.java | 78 ++---- ...zoneManagerDoubleBufferWithOMResponse.java | 2 +- .../ozone/om/request/TestOMRequestUtils.java | 76 ++++- .../volume/TestOMVolumeCreateRequest.java | 265 ++++++++++++++++++ .../volume/TestOMVolumeDeleteRequest.java | 222 +++++++++++++++ .../volume/TestOMVolumeSetOwnerRequest.java | 204 ++++++++++++++ .../volume/TestOMVolumeSetQuotaRequest.java | 195 +++++++++++++ .../ozone/om/request/volume/package-info.java | 21 ++ .../volume/TestOMVolumeCreateResponse.java | 125 +++++++++ .../volume/TestOMVolumeDeleteResponse.java | 130 +++++++++ .../volume/TestOMVolumeSetOwnerResponse.java | 142 ++++++++++ .../volume/TestOMVolumeSetQuotaResponse.java | 117 ++++++++ 44 files changed, 2778 insertions(+), 157 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java rename hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/{ => volume}/OMVolumeCreateResponse.java (67%) rename hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/{ => volume}/OMVolumeDeleteResponse.java (65%) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java index 35243e8b147ae..29627a66ba757 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java @@ -20,6 +20,8 @@ package org.apache.hadoop.utils.db; import java.io.IOException; +import java.util.Iterator; +import java.util.Map; import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.classification.InterfaceStability; @@ -131,6 +133,14 @@ default void cleanupCache(long epoch) { throw new NotImplementedException("cleanupCache is not implemented"); } + /** + * Return cache iterator maintained for this table. + */ + default Iterator, CacheValue>> + cacheIterator() { + throw new NotImplementedException("cacheIterator is not implemented"); + } + /** * Class used to represent the key and value pair of a db entry. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index 2562b1a6abc9f..fa425fc52aa3b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -19,6 +19,8 @@ package org.apache.hadoop.utils.db; import java.io.IOException; +import java.util.Iterator; +import java.util.Map; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.utils.db.cache.CacheKey; @@ -82,7 +84,7 @@ public boolean isEmpty() throws IOException { @Override public boolean isExist(KEY key) throws IOException { CacheValue cacheValue= cache.get(new CacheKey<>(key)); - return (cacheValue != null && cacheValue.getValue() != null) || + return (cacheValue != null && cacheValue.getCacheValue() != null) || rawTable.isExist(codecRegistry.asRawData(key)); } @@ -109,7 +111,7 @@ public VALUE get(KEY key) throws IOException { return getFromTable(key); } else { // We have a value in cache, return the value. - return cacheValue.getValue(); + return cacheValue.getCacheValue(); } } @@ -156,6 +158,9 @@ public void addCacheEntry(CacheKey cacheKey, cache.put(cacheKey, cacheValue); } + public Iterator, CacheValue>> cacheIterator() { + return cache.iterator(); + } @Override public void cleanupCache(long epoch) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheKey.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheKey.java index f928e4775a546..8879611a208e6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheKey.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheKey.java @@ -33,7 +33,7 @@ public CacheKey(KEY key) { this.key = key; } - public KEY getKey() { + public KEY getCacheKey() { return key; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java index 34f77ae175295..ad227246754d7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java @@ -36,7 +36,7 @@ public CacheValue(Optional value, long epoch) { this.epoch = epoch; } - public VALUE getValue() { + public VALUE getCacheValue() { return value.orNull(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java index fc3009605b4f1..7d16d04df194a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java @@ -20,6 +20,7 @@ package org.apache.hadoop.utils.db.cache; import java.util.Iterator; +import java.util.Map; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; @@ -77,6 +78,11 @@ public int size() { return cache.size(); } + @Override + public Iterator> iterator() { + return cache.entrySet().iterator(); + } + private void evictCache(long epoch) { EpochEntry currentEntry = null; for (Iterator> iterator = epochEntries.iterator(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java index 70e0b33e92974..0536ed7061557 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java @@ -22,6 +22,9 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; +import java.util.Iterator; +import java.util.Map; + /** * Cache used for RocksDB tables. * @param @@ -60,4 +63,10 @@ public interface TableCache> iterator(); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java index f70665960e2e2..736ae1b6161b5 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java @@ -51,7 +51,7 @@ public void testPartialTableCache() { for (int i=0; i < 10; i++) { Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); + tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); } // On a full table cache if some one calls cleanup it is a no-op. @@ -59,7 +59,7 @@ public void testPartialTableCache() { for (int i=5; i < 10; i++) { Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); + tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); } } @@ -95,7 +95,7 @@ public void testPartialTableCacheParallel() throws Exception { // Check we have first 10 entries in cache. for (int i=1; i <= 10; i++) { Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); + tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); } int deleted = 5; @@ -115,7 +115,7 @@ public void testPartialTableCacheParallel() throws Exception { // Check if we have remaining entries. for (int i=6; i <= totalCount; i++) { Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); + tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); } tableCache.cleanup(10); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index 95ed231c2e0a7..2049d72350dc5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -22,6 +22,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; @@ -154,6 +155,28 @@ public Map toAuditMap() { return auditMap; } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + OmVolumeArgs that = (OmVolumeArgs) o; + return creationTime == that.creationTime && + quotaInBytes == that.quotaInBytes && + Objects.equals(adminName, that.adminName) && + Objects.equals(ownerName, that.ownerName) && + Objects.equals(volume, that.volume); + } + + @Override + public int hashCode() { + return Objects.hash(adminName, ownerName, volume, creationTime, + quotaInBytes); + } + /** * Builder for OmVolumeArgs. */ diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index 2c4766a89524f..2ae466ebb2c80 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -287,7 +287,7 @@ message VolumeInfo { optional uint64 quotaInBytes = 4; repeated hadoop.hdds.KeyValue metadata = 5; repeated OzoneAclInfo volumeAcls = 6; - required uint64 creationTime = 7; + optional uint64 creationTime = 7; } /** diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 0f5ae75a8cdad..fa68398b844fc 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -53,7 +53,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.mockito mockito-core - 2.2.0 + 2.28.2 test diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index fb7c3b8c2cda2..c0b7bdc94a15f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -20,7 +20,9 @@ import java.io.IOException; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.BlockID; @@ -62,6 +64,9 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import org.apache.hadoop.utils.db.TypedTable; +import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheValue; import org.eclipse.jetty.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -451,10 +456,27 @@ private boolean startsWith(byte[] firstArray, byte[] secondArray) { public boolean isVolumeEmpty(String volume) throws IOException { String volumePrefix = getVolumeKey(volume + OM_KEY_PREFIX); + // First check in bucket table cache. + Iterator, CacheValue>> iterator = + ((TypedTable< String, OmBucketInfo>) bucketTable).cacheIterator(); + while (iterator.hasNext()) { + Map.Entry< CacheKey< String >, CacheValue< OmBucketInfo > > entry = + iterator.next(); + String key = entry.getKey().getCacheKey(); + OmBucketInfo omBucketInfo = entry.getValue().getCacheValue(); + // Making sure that entry is not for delete bucket request. + if (key.startsWith(volumePrefix) && omBucketInfo != null) { + return false; + } + } + try (TableIterator> bucketIter = bucketTable.iterator()) { KeyValue kv = bucketIter.seek(volumePrefix); - if (kv != null && kv.getKey().startsWith(volumePrefix)) { + // During iteration from DB, check in mean time if this bucket is not + // marked for delete. + if (kv != null && kv.getKey().startsWith(volumePrefix) && + bucketTable.get(kv.getKey()) != null) { return false; // we found at least one bucket with this volume prefix. } } @@ -473,6 +495,8 @@ public boolean isVolumeEmpty(String volume) throws IOException { public boolean isBucketEmpty(String volume, String bucket) throws IOException { String keyPrefix = getBucketKey(volume, bucket); + //TODO: When Key ops are converted in to HA model, use cache also to + // determine bucket is empty or not. try (TableIterator> keyIter = keyTable.iterator()) { KeyValue kv = keyIter.seek(keyPrefix); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 9ce581b5865fe..4041670eaa9ce 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -198,6 +198,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME_DEFAULT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_AUTH_METHOD; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; @@ -276,12 +278,20 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private static String keyProviderUriKeyName = CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH; + // Adding parameters needed for VolumeRequests here, so that during request + // execution, we can get from ozoneManager. + private long maxUserVolumeCount; + private OzoneManager(OzoneConfiguration conf) throws IOException, AuthenticationException { super(OzoneVersionInfo.OZONE_VERSION_INFO); Preconditions.checkNotNull(conf); configuration = conf; + this.maxUserVolumeCount = conf.getInt(OZONE_OM_USER_MAX_VOLUME, + OZONE_OM_USER_MAX_VOLUME_DEFAULT); + Preconditions.checkArgument(this.maxUserVolumeCount > 0, + OZONE_OM_USER_MAX_VOLUME + " value should be greater than zero"); omStorage = new OMStorage(conf); omId = omStorage.getOmId(); if (omStorage.getState() != StorageState.INITIALIZED) { @@ -3201,7 +3211,11 @@ public OMFailoverProxyProvider getOMFailoverProxyProvider() { return null; } - public OMMetrics getOmMetrics() { - return metrics; + /** + * Return maximum volumes count per user. + * @return maxUserVolumeCount + */ + public long getMaxUserVolumeCount() { + return maxUserVolumeCount; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index 4927b4c2e7bdb..810311583c514 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -142,11 +142,13 @@ private void flushTransactions() { } private void cleanupCache(long lastRatisTransactionIndex) { - // As now only bucket transactions are handled only called cleanupCache - // on bucketTable. + // As now only volume and bucket transactions are handled only called + // cleanupCache on bucketTable. // TODO: After supporting all write operations we need to call // cleanupCache on the tables only when buffer has entries for that table. omMetadataManager.getBucketTable().cleanupCache(lastRatisTransactionIndex); + omMetadataManager.getVolumeTable().cleanupCache(lastRatisTransactionIndex); + omMetadataManager.getUserTable().cleanupCache(lastRatisTransactionIndex); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 7160b49adcd45..2577cb5dbebdb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -27,7 +27,6 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis .ContainerStateMachine; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -189,53 +188,21 @@ public void notifyNotLeader(Collection pendingEntries) private TransactionContext handleStartTransactionRequests( RaftClientRequest raftClientRequest, OMRequest omRequest) { - OMRequest newOmRequest = null; - try { - switch (omRequest.getCmdType()) { - case CreateVolume: - case SetVolumeProperty: - case DeleteVolume: - newOmRequest = handler.handleStartTransaction(omRequest); - break; - case AllocateBlock: - return handleAllocateBlock(raftClientRequest, omRequest); - case CreateKey: - return handleCreateKeyRequest(raftClientRequest, omRequest); - case InitiateMultiPartUpload: - return handleInitiateMultipartUpload(raftClientRequest, omRequest); - default: - return TransactionContext.newBuilder() - .setClientRequest(raftClientRequest) - .setStateMachine(this) - .setServerRole(RaftProtos.RaftPeerRole.LEADER) - .setLogData(raftClientRequest.getMessage().getContent()) - .build(); - } - } catch (IOException ex) { - TransactionContext transactionContext = TransactionContext.newBuilder() + switch (omRequest.getCmdType()) { + case AllocateBlock: + return handleAllocateBlock(raftClientRequest, omRequest); + case CreateKey: + return handleCreateKeyRequest(raftClientRequest, omRequest); + case InitiateMultiPartUpload: + return handleInitiateMultipartUpload(raftClientRequest, omRequest); + default: + return TransactionContext.newBuilder() .setClientRequest(raftClientRequest) .setStateMachine(this) .setServerRole(RaftProtos.RaftPeerRole.LEADER) + .setLogData(raftClientRequest.getMessage().getContent()) .build(); - if (ex instanceof OMException) { - IOException ioException = - new IOException(ex.getMessage() + STATUS_CODE + - ((OMException) ex).getResult()); - transactionContext.setException(ioException); - } else { - transactionContext.setException(ex); - } - LOG.error("Exception in startTransaction for cmdType " + - omRequest.getCmdType(), ex); - return transactionContext; } - TransactionContext transactionContext = TransactionContext.newBuilder() - .setClientRequest(raftClientRequest) - .setStateMachine(this) - .setServerRole(RaftProtos.RaftPeerRole.LEADER) - .setLogData(OMRatisHelper.convertRequestToByteString(newOmRequest)) - .build(); - return transactionContext; } private TransactionContext handleInitiateMultipartUpload( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index 696c015db20ed..1fbecdc77534f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -17,11 +17,16 @@ package org.apache.hadoop.ozone.om.ratis.utils; +import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest; import org.apache.hadoop.ozone.om.request.bucket.OMBucketDeleteRequest; import org.apache.hadoop.ozone.om.request.bucket.OMBucketSetPropertyRequest; import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.volume.OMVolumeCreateRequest; +import org.apache.hadoop.ozone.om.request.volume.OMVolumeDeleteRequest; +import org.apache.hadoop.ozone.om.request.volume.OMVolumeSetOwnerRequest; +import org.apache.hadoop.ozone.om.request.volume.OMVolumeSetQuotaRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; @@ -43,10 +48,27 @@ private OzoneManagerRatisUtils() { * @return OMClientRequest * @throws IOException */ - public static OMClientRequest createClientRequest(OMRequest omRequest) - throws IOException { + public static OMClientRequest createClientRequest(OMRequest omRequest) { Type cmdType = omRequest.getCmdType(); switch (cmdType) { + case CreateVolume: + return new OMVolumeCreateRequest(omRequest); + case SetVolumeProperty: + boolean hasQuota = omRequest.getSetVolumePropertyRequest() + .hasQuotaInBytes(); + boolean hasOwner = omRequest.getSetVolumePropertyRequest().hasOwnerName(); + Preconditions.checkState(hasOwner || hasQuota, "Either Quota or owner " + + "should be set in the SetVolumeProperty request"); + Preconditions.checkState(!(hasOwner && hasQuota), "Either Quota or " + + "owner should be set in the SetVolumeProperty request. Should not " + + "set both"); + if (hasQuota) { + return new OMVolumeSetQuotaRequest(omRequest); + } else { + return new OMVolumeSetOwnerRequest(omRequest); + } + case DeleteVolume: + return new OMVolumeDeleteRequest(omRequest); case CreateBucket: return new OMBucketCreateRequest(omRequest); case DeleteBucket: diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index e0c17c4a4dbd8..7ddb1874bad89 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -50,7 +50,7 @@ */ public abstract class OMClientRequest implements RequestAuditor { - private final OMRequest omRequest; + private OMRequest omRequest; public OMClientRequest(OMRequest omRequest) { Preconditions.checkNotNull(omRequest); @@ -69,7 +69,8 @@ public OMClientRequest(OMRequest omRequest) { */ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - return getOmRequest().toBuilder().setUserInfo(getUserInfo()).build(); + omRequest = getOmRequest().toBuilder().setUserInfo(getUserInfo()).build(); + return omRequest; } /** @@ -210,4 +211,5 @@ public Map buildVolumeAuditMap(String volume) { auditMap.put(OzoneConsts.VOLUME, volume); return auditMap; } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 5970e5de5a77d..c4e72c6d67acd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -97,8 +97,9 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } newCreateBucketRequest.setBucketInfo(newBucketInfo.build()); + return getOmRequest().toBuilder().setUserInfo(getUserInfo()) - .setCreateBucketRequest(newCreateBucketRequest.build()).build(); + .setCreateBucketRequest(newCreateBucketRequest.build()).build(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 4c7057cfe2014..fa8c939d2f952 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -73,20 +73,15 @@ public OMBucketSetPropertyRequest(OMRequest omRequest) { public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex) { + SetBucketPropertyRequest setBucketPropertyRequest = getOmRequest().getSetBucketPropertyRequest(); Preconditions.checkNotNull(setBucketPropertyRequest); - OMMetrics omMetrics = ozoneManager.getOmMetrics(); + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumBucketUpdates(); - // This will never be null, on a real Ozone cluster. For tests this might - // be null. using mockito, to set omMetrics object, but still getting - // null. For now added this not null check. - //TODO: Removed not null check from here, once tests got fixed. - if (omMetrics != null) { - omMetrics.incNumBucketUpdates(); - } OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); @@ -113,13 +108,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, volumeName, bucketName, null); } } catch (IOException ex) { - if (omMetrics != null) { - omMetrics.incNumBucketUpdateFails(); - } - auditLog(auditLogger, buildAuditMessage(OMAction.UPDATE_BUCKET, - omBucketArgs.toAuditMap(), ex, userInfo)); LOG.error("Setting bucket property failed for bucket:{} in volume:{}", bucketName, volumeName, ex); + omMetrics.incNumBucketUpdateFails(); + auditLog(auditLogger, buildAuditMessage(OMAction.UPDATE_BUCKET, + omBucketArgs.toAuditMap(), ex, userInfo)); return new OMBucketSetPropertyResponse(omBucketInfo, createErrorOMResponse(omResponse, ex)); } @@ -204,11 +197,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, SetBucketPropertyResponse.newBuilder().build()); return new OMBucketSetPropertyResponse(omBucketInfo, omResponse.build()); } else { - if (omMetrics != null) { - omMetrics.incNumBucketUpdateFails(); - } LOG.error("Setting bucket property failed for bucket:{} in volume:{}", bucketName, volumeName, exception); + omMetrics.incNumBucketUpdateFails(); return new OMBucketSetPropertyResponse(omBucketInfo, createErrorOMResponse(omResponse, exception)); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java new file mode 100644 index 0000000000000..993cd9d88a011 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java @@ -0,0 +1,196 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.volume; + +import java.io.IOException; + +import com.google.common.base.Optional; +import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateVolumeRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateVolumeResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .VolumeInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .VolumeList; +import org.apache.hadoop.util.Time; + +/** + * Handles volume create request. + */ +public class OMVolumeCreateRequest extends OMClientRequest + implements OMVolumeRequest { + private static final Logger LOG = + LoggerFactory.getLogger(OMVolumeCreateRequest.class); + + public OMVolumeCreateRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + + VolumeInfo volumeInfo = + getOmRequest().getCreateVolumeRequest().getVolumeInfo(); + + // Set creation time + VolumeInfo updatedVolumeInfo = + volumeInfo.toBuilder().setCreationTime(Time.now()).build(); + + + return getOmRequest().toBuilder().setCreateVolumeRequest( + CreateVolumeRequest.newBuilder().setVolumeInfo(updatedVolumeInfo)) + .setUserInfo(getUserInfo()) + .build(); + + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long transactionLogIndex) { + + CreateVolumeRequest createVolumeRequest = + getOmRequest().getCreateVolumeRequest(); + Preconditions.checkNotNull(createVolumeRequest); + VolumeInfo volumeInfo = createVolumeRequest.getVolumeInfo(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumVolumeCreates(); + + String volume = volumeInfo.getVolume(); + String owner = volumeInfo.getOwnerName(); + + OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( + OzoneManagerProtocolProtos.Type.CreateVolume).setStatus( + OzoneManagerProtocolProtos.Status.OK).setSuccess(true); + + OmVolumeArgs omVolumeArgs = null; + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + + // Doing this here, so we can do protobuf conversion outside of lock. + try { + omVolumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo); + // check Acl + if (ozoneManager.getAclsEnabled()) { + checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME, + OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.CREATE, volume, + null, null); + } + } catch (IOException ex) { + omMetrics.incNumVolumeCreateFails(); + auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_VOLUME, + buildVolumeAuditMap(volume), ex, userInfo)); + LOG.error("Volume creation failed for user:{} volume:{}", owner, volume, + ex); + return new OMVolumeCreateResponse(omVolumeArgs, null, + createErrorOMResponse(omResponse, ex)); + } + + + + String dbUserKey = omMetadataManager.getUserKey(owner); + String dbVolumeKey = omMetadataManager.getVolumeKey(volume); + VolumeList volumeList = null; + + // acquire lock. + omMetadataManager.getLock().acquireUserLock(owner); + omMetadataManager.getLock().acquireVolumeLock(volume); + + IOException exception = null; + try { + OmVolumeArgs dbVolumeArgs = + omMetadataManager.getVolumeTable().get(dbVolumeKey); + + // Validation: Check if volume already exists + if (dbVolumeArgs != null) { + LOG.debug("volume:{} already exists", omVolumeArgs.getVolume()); + throw new OMException("Volume already exists", + OMException.ResultCodes.VOLUME_ALREADY_EXISTS); + } + + volumeList = omMetadataManager.getUserTable().get(dbUserKey); + volumeList = addVolumeToOwnerList(volumeList, + volume, owner, ozoneManager.getMaxUserVolumeCount()); + + // Update cache: Update user and volume cache. + omMetadataManager.getUserTable().addCacheEntry(new CacheKey<>(dbUserKey), + new CacheValue<>(Optional.of(volumeList), transactionLogIndex)); + + omMetadataManager.getVolumeTable().addCacheEntry( + new CacheKey<>(dbVolumeKey), + new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex)); + + } catch (IOException ex) { + exception = ex; + } finally { + omMetadataManager.getLock().releaseVolumeLock(volumeInfo.getVolume()); + omMetadataManager.getLock().releaseUserLock(dbUserKey); + } + + // Performing audit logging outside of the lock. + auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_VOLUME, + omVolumeArgs.toAuditMap(), exception, userInfo)); + + // return response after releasing lock. + if (exception == null) { + LOG.debug("created volume:{} for user:{}", omVolumeArgs.getVolume(), + owner); + omMetrics.incNumVolumes(); + omResponse.setCreateVolumeResponse(CreateVolumeResponse.newBuilder() + .build()); + return new OMVolumeCreateResponse(omVolumeArgs, volumeList, + omResponse.build()); + } else { + LOG.error("Volume creation failed for user:{} volume:{}", owner, + volumeInfo.getVolume(), exception); + omMetrics.incNumVolumeCreateFails(); + return new OMVolumeCreateResponse(omVolumeArgs, volumeList, + createErrorOMResponse(omResponse, exception)); + } + } + + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java new file mode 100644 index 0000000000000..65f7df51bf2b0 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java @@ -0,0 +1,207 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.volume; + +import java.io.IOException; + +import com.google.common.base.Optional; +import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.volume.OMVolumeDeleteResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .DeleteVolumeRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .DeleteVolumeResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheValue; + + +/** + * Handles volume delete request. + */ +public class OMVolumeDeleteRequest extends OMClientRequest + implements OMVolumeRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMVolumeDeleteRequest.class); + + public OMVolumeDeleteRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long transactionLogIndex) { + + DeleteVolumeRequest deleteVolumeRequest = + getOmRequest().getDeleteVolumeRequest(); + Preconditions.checkNotNull(deleteVolumeRequest); + + String volume = deleteVolumeRequest.getVolumeName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumVolumeDeletes(); + + OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( + OzoneManagerProtocolProtos.Type.DeleteVolume).setStatus( + OzoneManagerProtocolProtos.Status.OK).setSuccess(true); + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + + try { + // check Acl + if (ozoneManager.getAclsEnabled()) { + checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME, + OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.DELETE, volume, + null, null); + } + } catch (IOException ex) { + LOG.error("Volume deletion failed for volume:{}", volume, ex); + omMetrics.incNumVolumeDeleteFails(); + auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_VOLUME, + buildVolumeAuditMap(volume), ex, userInfo)); + return new OMVolumeCreateResponse(null, null, + createErrorOMResponse(omResponse, ex)); + } + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + + OmVolumeArgs omVolumeArgs = null; + String owner = null; + + omMetadataManager.getLock().acquireVolumeLock(volume); + try { + owner = getVolumeInfo(omMetadataManager, volume).getOwnerName(); + } catch (IOException ex) { + LOG.error("Volume deletion failed for volume:{}", volume, ex); + omMetrics.incNumVolumeDeleteFails(); + auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_VOLUME, + buildVolumeAuditMap(volume), ex, userInfo)); + return new OMVolumeDeleteResponse(null, null, null, + createErrorOMResponse(omResponse, ex)); + } finally { + omMetadataManager.getLock().releaseVolumeLock(volume); + } + + // Release and reacquire lock for now it will not be a problem for now, as + // applyTransaction serializes the operation's. + // TODO: Revisit this logic once HDDS-1672 checks in. + + // We cannot acquire user lock holding volume lock, so released volume + // lock, and acquiring user and volume lock. + + omMetadataManager.getLock().acquireUserLock(owner); + omMetadataManager.getLock().acquireVolumeLock(volume); + + String dbUserKey = omMetadataManager.getUserKey(owner); + String dbVolumeKey = omMetadataManager.getVolumeKey(volume); + + IOException exception = null; + OzoneManagerProtocolProtos.VolumeList newVolumeList = null; + try { + if (!omMetadataManager.isVolumeEmpty(volume)) { + LOG.debug("volume:{} is not empty", volume); + throw new OMException(OMException.ResultCodes.VOLUME_NOT_EMPTY); + } + + newVolumeList = omMetadataManager.getUserTable().get(owner); + + // delete the volume from the owner list + // as well as delete the volume entry + newVolumeList = delVolumeFromOwnerList(newVolumeList, volume, owner); + + omMetadataManager.getUserTable().addCacheEntry(new CacheKey<>(dbUserKey), + new CacheValue<>(Optional.of(newVolumeList), transactionLogIndex)); + + omMetadataManager.getVolumeTable().addCacheEntry( + new CacheKey<>(dbVolumeKey), new CacheValue<>(Optional.absent(), + transactionLogIndex)); + + } catch (IOException ex) { + exception = ex; + + } finally { + omMetadataManager.getLock().releaseVolumeLock(volume); + omMetadataManager.getLock().releaseUserLock(owner); + } + + // Performing audit logging outside of the lock. + auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_VOLUME, + buildVolumeAuditMap(volume), exception, userInfo)); + + // return response after releasing lock. + if (exception == null) { + LOG.debug("Volume deleted for user:{} volume:{}", owner, volume); + omMetrics.decNumVolumes(); + omResponse.setDeleteVolumeResponse( + DeleteVolumeResponse.newBuilder().build()); + return new OMVolumeDeleteResponse(volume, owner, newVolumeList, + omResponse.build()); + } else { + LOG.error("Volume deletion failed for user:{} volume:{}", + owner, volume, exception); + omMetrics.incNumVolumeDeleteFails(); + return new OMVolumeDeleteResponse(null, null, null, + createErrorOMResponse(omResponse, exception)); + } + + } + + /** + * Return volume info for the specified volume. This method should be + * called after acquiring volume lock. + * @param omMetadataManager + * @param volume + * @return OmVolumeArgs + * @throws IOException + */ + private OmVolumeArgs getVolumeInfo(OMMetadataManager omMetadataManager, + String volume) throws IOException { + + String dbVolumeKey = omMetadataManager.getVolumeKey(volume); + OmVolumeArgs volumeArgs = + omMetadataManager.getVolumeTable().get(dbVolumeKey); + if (volumeArgs == null) { + throw new OMException("Volume " + volume + " is not found", + OMException.ResultCodes.VOLUME_NOT_FOUND); + } + return volumeArgs; + + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java new file mode 100644 index 0000000000000..417156b93f2bf --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.volume; + +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .VolumeList; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * Defines common methods required for volume requests. + */ +public interface OMVolumeRequest { + + /** + * Delete volume from user volume list. This method should be called after + * acquiring user lock. + * @param volumeList - current volume list owned by user. + * @param volume - volume which needs to deleted from the volume list. + * @param owner + * @return VolumeList - updated volume list for the user. + * @throws IOException + */ + default VolumeList delVolumeFromOwnerList(VolumeList volumeList, + String volume, String owner) throws IOException { + + List prevVolList = new ArrayList<>(); + + if (volumeList != null) { + prevVolList.addAll(volumeList.getVolumeNamesList()); + } else { + // No Volumes for this user + throw new OMException("User not found: " + owner, + OMException.ResultCodes.USER_NOT_FOUND); + } + + // Remove the volume from the list + prevVolList.remove(volume); + VolumeList newVolList = VolumeList.newBuilder() + .addAllVolumeNames(prevVolList).build(); + return newVolList; + } + + + /** + * Add volume to user volume list. This method should be called after + * acquiring user lock. + * @param volumeList - current volume list owned by user. + * @param volume - volume which needs to be added to this list. + * @param owner + * @param maxUserVolumeCount + * @return VolumeList - which is updated volume list. + * @throws OMException - if user has volumes greater than + * maxUserVolumeCount, an exception is thrown. + */ + default VolumeList addVolumeToOwnerList( + VolumeList volumeList, String volume, String owner, + long maxUserVolumeCount) throws IOException { + + // Check the volume count + if (volumeList != null && + volumeList.getVolumeNamesList().size() >= maxUserVolumeCount) { + throw new OMException("Too many volumes for user:" + owner, + OMException.ResultCodes.USER_TOO_MANY_VOLUMES); + } + + List prevVolList = new ArrayList<>(); + if (volumeList != null) { + prevVolList.addAll(volumeList.getVolumeNamesList()); + } + + // Add the new volume to the list + prevVolList.add(volume); + VolumeList newVolList = VolumeList.newBuilder() + .addAllVolumeNames(prevVolList).build(); + + return newVolList; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java new file mode 100644 index 0000000000000..0de2124b54ed9 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java @@ -0,0 +1,212 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.volume; + +import java.io.IOException; +import java.util.Map; + + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import com.google.common.base.Optional; +import com.google.common.base.Preconditions; + +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse; +import org.apache.hadoop.ozone.om.response.volume.OMVolumeSetOwnerResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .SetVolumePropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .SetVolumePropertyResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheValue; + +/** + * Handle set owner request for volume. + */ +public class OMVolumeSetOwnerRequest extends OMClientRequest + implements OMVolumeRequest { + private static final Logger LOG = + LoggerFactory.getLogger(OMVolumeSetOwnerRequest.class); + + public OMVolumeSetOwnerRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long transactionLogIndex) { + + SetVolumePropertyRequest setVolumePropertyRequest = + getOmRequest().getSetVolumePropertyRequest(); + + Preconditions.checkNotNull(setVolumePropertyRequest); + + OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( + OzoneManagerProtocolProtos.Type.SetVolumeProperty).setStatus( + OzoneManagerProtocolProtos.Status.OK).setSuccess(true); + + // In production this will never happen, this request will be called only + // when we have ownerName in setVolumePropertyRequest. + if (!setVolumePropertyRequest.hasOwnerName()) { + omResponse.setStatus(OzoneManagerProtocolProtos.Status.INVALID_REQUEST) + .setSuccess(false); + return new OMVolumeSetOwnerResponse(null, null, null, null, + omResponse.build()); + } + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumVolumeUpdates(); + String volume = setVolumePropertyRequest.getVolumeName(); + String newOwner = setVolumePropertyRequest.getOwnerName(); + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + + Map auditMap = buildVolumeAuditMap(volume); + auditMap.put(OzoneConsts.OWNER, newOwner); + try { + // check Acl + if (ozoneManager.getAclsEnabled()) { + checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME, + OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL, + volume, null, null); + } + } catch (IOException ex) { + LOG.error("Changing volume ownership failed for user:{} volume:{}", + newOwner, volume); + omMetrics.incNumVolumeUpdateFails(); + auditLog(auditLogger, buildAuditMessage(OMAction.SET_OWNER, auditMap, + ex, userInfo)); + return new OMVolumeCreateResponse(null, null, + createErrorOMResponse(omResponse, ex)); + } + + + long maxUserVolumeCount = ozoneManager.getMaxUserVolumeCount(); + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + String dbVolumeKey = omMetadataManager.getVolumeKey(volume); + String oldOwner = null; + OzoneManagerProtocolProtos.VolumeList oldOwnerVolumeList = null; + OzoneManagerProtocolProtos.VolumeList newOwnerVolumeList = null; + OmVolumeArgs omVolumeArgs = null; + IOException exception = null; + + omMetadataManager.getLock().acquireUserLock(newOwner); + omMetadataManager.getLock().acquireVolumeLock(volume); + + boolean needToreleaseOldOwnerLock = false; + try { + omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); + + if (omVolumeArgs == null) { + LOG.debug("Changing volume ownership failed for user:{} volume:{}", + newOwner, volume); + throw new OMException("Volume " + volume + " is not found", + OMException.ResultCodes.VOLUME_NOT_FOUND); + } + + oldOwner = omVolumeArgs.getOwnerName(); + + + // Release and reacquire lock for now it will not be a problem, as + // applyTransaction serializes the operation's. + // TODO: Revisit this logic once HDDS-1672 checks in. + + // releasing volume lock, as to acquire user lock we need to release + // volume lock. + omMetadataManager.getLock().releaseVolumeLock(volume); + omMetadataManager.getLock().acquireUserLock(oldOwner); + omMetadataManager.getLock().acquireVolumeLock(volume); + + needToreleaseOldOwnerLock = true; + oldOwnerVolumeList = + omMetadataManager.getUserTable().get(oldOwner); + + oldOwnerVolumeList = delVolumeFromOwnerList( + oldOwnerVolumeList, volume, oldOwner); + + + newOwnerVolumeList = omMetadataManager.getUserTable().get(newOwner); + newOwnerVolumeList = addVolumeToOwnerList( + newOwnerVolumeList, volume, newOwner, maxUserVolumeCount); + + // Set owner with new owner name. + omVolumeArgs.setOwnerName(newOwner); + + // Update cache. + omMetadataManager.getUserTable().addCacheEntry( + new CacheKey<>(omMetadataManager.getUserKey(newOwner)), + new CacheValue<>(Optional.of(newOwnerVolumeList), + transactionLogIndex)); + omMetadataManager.getUserTable().addCacheEntry( + new CacheKey<>(omMetadataManager.getUserKey(oldOwner)), + new CacheValue<>(Optional.of(oldOwnerVolumeList), + transactionLogIndex)); + omMetadataManager.getVolumeTable().addCacheEntry( + new CacheKey<>(dbVolumeKey), + new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex)); + + } catch (IOException ex) { + exception = ex; + } finally { + omMetadataManager.getLock().releaseVolumeLock(volume); + omMetadataManager.getLock().releaseUserLock(newOwner); + if (needToreleaseOldOwnerLock) { + omMetadataManager.getLock().releaseUserLock(oldOwner); + } + } + + // Performing audit logging outside of the lock. + auditLog(auditLogger, buildAuditMessage(OMAction.SET_OWNER, auditMap, + exception, userInfo)); + + // return response after releasing lock. + if (exception == null) { + LOG.debug("Successfully changed Owner of Volume {} from {} -> {}", volume, + oldOwner, newOwner); + omResponse.setSetVolumePropertyResponse( + SetVolumePropertyResponse.newBuilder().build()); + return new OMVolumeSetOwnerResponse(oldOwner, oldOwnerVolumeList, + newOwnerVolumeList, omVolumeArgs, omResponse.build()); + } else { + LOG.error("Changing volume ownership failed for user:{} volume:{}", + newOwner, volume, exception); + omMetrics.incNumVolumeUpdateFails(); + return new OMVolumeSetOwnerResponse(null, null, null, null, + createErrorOMResponse(omResponse, exception)); + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java new file mode 100644 index 0000000000000..1d7097fc297d9 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java @@ -0,0 +1,166 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.volume; + +import java.io.IOException; +import java.util.Map; + +import com.google.common.base.Optional; +import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.volume.OMVolumeSetQuotaResponse; +import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .SetVolumePropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .SetVolumePropertyResponse; +import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheValue; + + +/** + * Handles set Quota request for volume. + */ +public class OMVolumeSetQuotaRequest extends OMClientRequest { + private static final Logger LOG = + LoggerFactory.getLogger(OMVolumeSetQuotaRequest.class); + + public OMVolumeSetQuotaRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long transactionLogIndex) { + + SetVolumePropertyRequest setVolumePropertyRequest = + getOmRequest().getSetVolumePropertyRequest(); + + Preconditions.checkNotNull(setVolumePropertyRequest); + + OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( + OzoneManagerProtocolProtos.Type.SetVolumeProperty).setStatus( + OzoneManagerProtocolProtos.Status.OK).setSuccess(true); + + + + // In production this will never happen, this request will be called only + // when we have quota in bytes is set in setVolumePropertyRequest. + if (!setVolumePropertyRequest.hasQuotaInBytes()) { + omResponse.setStatus(OzoneManagerProtocolProtos.Status.INVALID_REQUEST) + .setSuccess(false); + return new OMVolumeSetQuotaResponse(null, + omResponse.build()); + } + + String volume = setVolumePropertyRequest.getVolumeName(); + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumVolumeUpdates(); + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + Map auditMap = buildVolumeAuditMap(volume); + auditMap.put(OzoneConsts.QUOTA, + String.valueOf(setVolumePropertyRequest.getQuotaInBytes())); + + try { + // check Acl + if (ozoneManager.getAclsEnabled()) { + checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME, + OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE, volume, + null, null); + } + } catch (IOException ex) { + LOG.error("Changing volume quota failed for volume:{} quota:{}", volume, + setVolumePropertyRequest.getQuotaInBytes(), ex); + omMetrics.incNumVolumeUpdateFails(); + auditLog(auditLogger, buildAuditMessage(OMAction.SET_QUOTA, auditMap, + ex, userInfo)); + return new OMVolumeCreateResponse(null, null, + createErrorOMResponse(omResponse, ex)); + } + + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + + IOException exception = null; + OmVolumeArgs omVolumeArgs = null; + + omMetadataManager.getLock().acquireVolumeLock(volume); + try { + String dbVolumeKey = omMetadataManager.getVolumeKey(volume); + omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); + + if (omVolumeArgs == null) { + LOG.debug("volume:{} does not exist", volume); + throw new OMException(OMException.ResultCodes.VOLUME_NOT_FOUND); + } + + omVolumeArgs.setQuotaInBytes(setVolumePropertyRequest.getQuotaInBytes()); + + // update cache. + omMetadataManager.getVolumeTable().addCacheEntry( + new CacheKey<>(dbVolumeKey), + new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex)); + + } catch (IOException ex) { + exception = ex; + } finally { + omMetadataManager.getLock().releaseVolumeLock(volume); + } + + // Performing audit logging outside of the lock. + auditLog(auditLogger, buildAuditMessage(OMAction.SET_QUOTA, auditMap, + exception, userInfo)); + + // return response after releasing lock. + if (exception == null) { + omResponse.setSetVolumePropertyResponse( + SetVolumePropertyResponse.newBuilder().build()); + return new OMVolumeSetQuotaResponse(omVolumeArgs, omResponse.build()); + } else { + omMetrics.incNumVolumeUpdateFails(); + LOG.error("Changing volume quota failed for volume:{} quota:{}", volume, + setVolumePropertyRequest.getQuotaInBytes(), exception); + return new OMVolumeSetQuotaResponse(null, + createErrorOMResponse(omResponse, exception)); + } + } + + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java new file mode 100644 index 0000000000000..708f70833510a --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Package contains classes related to volume requests. + */ +package org.apache.hadoop.ozone.om.request.volume; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java index 0ad14f961895d..271d2708e9b02 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java @@ -45,6 +45,8 @@ public OMBucketCreateResponse(OmBucketInfo omBucketInfo, public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { + // For OmResponse with failure, this should do nothing. This method is + // not called in failure scenario in OM code. if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { String dbBucketKey = omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java index 1ccce9e5627ff..f76d6296f5d25 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java @@ -44,6 +44,9 @@ public OMBucketDeleteResponse( @Override public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { + + // For OmResponse with failure, this should do nothing. This method is + // not called in failure scenario in OM code. if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { String dbBucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java index 26de757f34de3..3213b7b6ab70f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java @@ -42,6 +42,9 @@ public OMBucketSetPropertyResponse(OmBucketInfo omBucketInfo, @Override public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { + + // For OmResponse with failure, this should do nothing. This method is + // not called in failure scenario in OM code. if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { String dbBucketKey = omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java similarity index 67% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeCreateResponse.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java index 4f7c11586e60f..bfce500ddaa0a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java @@ -16,12 +16,15 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om.response; +package org.apache.hadoop.ozone.om.response.volume; import java.io.IOException; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -47,21 +50,22 @@ public OMVolumeCreateResponse(OmVolumeArgs omVolumeArgs, public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { - String dbVolumeKey = - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()); - String dbUserKey = - omMetadataManager.getUserKey(omVolumeArgs.getOwnerName()); + // For OmResponse with failure, this should do nothing. This method is + // not called in failure scenario in OM code. + if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { + String dbVolumeKey = + omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()); + String dbUserKey = + omMetadataManager.getUserKey(omVolumeArgs.getOwnerName()); - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, dbVolumeKey, - omVolumeArgs); - omMetadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey, - volumeList); - } - - public VolumeList getVolumeList() { - return volumeList; + omMetadataManager.getVolumeTable().putWithBatch(batchOperation, + dbVolumeKey, omVolumeArgs); + omMetadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey, + volumeList); + } } + @VisibleForTesting public OmVolumeArgs getOmVolumeArgs() { return omVolumeArgs; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java similarity index 65% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeDeleteResponse.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java index 44963a36f48e8..1672915e1c217 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMVolumeDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java @@ -16,11 +16,13 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om.response; +package org.apache.hadoop.ozone.om.response.volume; import java.io.IOException; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -46,17 +48,22 @@ public OMVolumeDeleteResponse(String volume, String owner, @Override public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { - String dbUserKey = omMetadataManager.getUserKey(owner); - VolumeList volumeList = updatedVolumeList; - if (updatedVolumeList.getVolumeNamesList().size() == 0) { - omMetadataManager.getUserTable().deleteWithBatch(batchOperation, - dbUserKey); - } else { - omMetadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey, - volumeList); + + // For OmResponse with failure, this should do nothing. This method is + // not called in failure scenario in OM code. + if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { + String dbUserKey = omMetadataManager.getUserKey(owner); + VolumeList volumeList = updatedVolumeList; + if (updatedVolumeList.getVolumeNamesList().size() == 0) { + omMetadataManager.getUserTable().deleteWithBatch(batchOperation, + dbUserKey); + } else { + omMetadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey, + volumeList); + } + omMetadataManager.getVolumeTable().deleteWithBatch(batchOperation, + omMetadataManager.getVolumeKey(volume)); } - omMetadataManager.getVolumeTable().deleteWithBatch(batchOperation, - omMetadataManager.getVolumeKey(volume)); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java new file mode 100644 index 0000000000000..1d3088e852973 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.volume; + +import java.io.IOException; + +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.response.OMClientResponse; + +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .VolumeList; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.utils.db.BatchOperation; + +/** + * Response for set owner request. + */ +public class OMVolumeSetOwnerResponse extends OMClientResponse { + + private String oldOwner; + private VolumeList oldOwnerVolumeList; + private VolumeList newOwnerVolumeList; + private OmVolumeArgs newOwnerVolumeArgs; + + public OMVolumeSetOwnerResponse(String oldOwner, + VolumeList oldOwnerVolumeList, VolumeList newOwnerVolumeList, + OmVolumeArgs newOwnerVolumeArgs, OMResponse omResponse) { + super(omResponse); + this.oldOwner = oldOwner; + this.oldOwnerVolumeList = oldOwnerVolumeList; + this.newOwnerVolumeList = newOwnerVolumeList; + this.newOwnerVolumeArgs = newOwnerVolumeArgs; + } + + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + + // For OmResponse with failure, this should do nothing. This method is + // not called in failure scenario in OM code. + if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { + String oldOwnerKey = omMetadataManager.getUserKey(oldOwner); + String newOwnerKey = + omMetadataManager.getUserKey(newOwnerVolumeArgs.getOwnerName()); + if (oldOwnerVolumeList.getVolumeNamesList().size() == 0) { + omMetadataManager.getUserTable().deleteWithBatch(batchOperation, + oldOwnerKey); + } else { + omMetadataManager.getUserTable().putWithBatch(batchOperation, + oldOwnerKey, oldOwnerVolumeList); + } + omMetadataManager.getUserTable().putWithBatch(batchOperation, newOwnerKey, + newOwnerVolumeList); + + String dbVolumeKey = + omMetadataManager.getVolumeKey(newOwnerVolumeArgs.getVolume()); + omMetadataManager.getVolumeTable().putWithBatch(batchOperation, + dbVolumeKey, newOwnerVolumeArgs); + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java new file mode 100644 index 0000000000000..594075a94a633 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.volume; + +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.utils.db.BatchOperation; + +import java.io.IOException; + +/** + * Response for set quota request. + */ +public class OMVolumeSetQuotaResponse extends OMClientResponse { + private OmVolumeArgs omVolumeArgs; + + public OMVolumeSetQuotaResponse(OmVolumeArgs omVolumeArgs, + OMResponse omResponse) { + super(omResponse); + this.omVolumeArgs = omVolumeArgs; + } + + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + + // For OmResponse with failure, this should do nothing. This method is + // not called in failure scenario in OM code. + if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { + omMetadataManager.getVolumeTable().putWithBatch(batchOperation, + omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), + omVolumeArgs); + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java new file mode 100644 index 0000000000000..478a19de18b49 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Package contains classes related to volume requests. + */ +package org.apache.hadoop.ozone.om.response.volume; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java index 3a6d0df4ed150..0b37874a04d7a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java @@ -96,58 +96,36 @@ public OMResponse handleApplyTransaction(OMRequest omRequest, long transactionLogIndex) { LOG.debug("Received OMRequest: {}, ", omRequest); Type cmdType = omRequest.getCmdType(); - OMResponse.Builder responseBuilder = - OMResponse.newBuilder().setCmdType(cmdType) - .setStatus(Status.OK); - try { - switch (cmdType) { - case CreateVolume: - responseBuilder.setCreateVolumeResponse( - handleCreateVolumeApply(omRequest)); - break; - case SetVolumeProperty: - responseBuilder.setSetVolumePropertyResponse( - handleSetVolumePropertyApply(omRequest)); - break; - case DeleteVolume: - responseBuilder.setDeleteVolumeResponse( - handleDeleteVolumeApply(omRequest)); - break; - case CreateBucket: - case DeleteBucket: - case SetBucketProperty: - //TODO: We don't need to pass transactionID, this will be removed when - // complete write requests is changed to new model. And also we can - // return OMClientResponse, then adding to doubleBuffer can be taken - // care by stateMachine. And also integrate both HA and NON HA code - // paths. - OMClientRequest omClientRequest = - OzoneManagerRatisUtils.createClientRequest(omRequest); - OMClientResponse omClientResponse = - omClientRequest.validateAndUpdateCache(getOzoneManager(), - transactionLogIndex); - - // If any error we have got when validateAndUpdateCache, OMResponse - // Status is set with Error Code other than OK, in that case don't - // add this to double buffer. - if (omClientResponse.getOMResponse().getStatus() == Status.OK) { - ozoneManagerDoubleBuffer.add(omClientResponse, transactionLogIndex); - } - return omClientResponse.getOMResponse(); - default: - // As all request types are not changed so we need to call handle - // here. - return handle(omRequest); - } - responseBuilder.setSuccess(true); - } catch (IOException ex) { - responseBuilder.setSuccess(false); - responseBuilder.setStatus(exceptionToResponseStatus(ex)); - if (ex.getMessage() != null) { - responseBuilder.setMessage(ex.getMessage()); + switch (cmdType) { + case CreateVolume: + case SetVolumeProperty: + case DeleteVolume: + case CreateBucket: + case DeleteBucket: + case SetBucketProperty: + //TODO: We don't need to pass transactionID, this will be removed when + // complete write requests is changed to new model. And also we can + // return OMClientResponse, then adding to doubleBuffer can be taken + // care by stateMachine. And also integrate both HA and NON HA code + // paths. + OMClientRequest omClientRequest = + OzoneManagerRatisUtils.createClientRequest(omRequest); + OMClientResponse omClientResponse = + omClientRequest.validateAndUpdateCache(getOzoneManager(), + transactionLogIndex); + + // If any error we have got when validateAndUpdateCache, OMResponse + // Status is set with Error Code other than OK, in that case don't + // add this to double buffer. + if (omClientResponse.getOMResponse().getStatus() == Status.OK) { + ozoneManagerDoubleBuffer.add(omClientResponse, transactionLogIndex); } + return omClientResponse.getOMResponse(); + default: + // As all request types are not changed so we need to call handle + // here. + return handle(omRequest); } - return responseBuilder.build(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index 4ff5411684126..3b544449ef3ea 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -37,7 +37,7 @@ import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.response.OMVolumeCreateResponse; +import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .CreateBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java index 42aa2073dae54..a78d112fa37e3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.request; + import java.util.ArrayList; import java.util.List; import java.util.UUID; @@ -28,6 +29,10 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .SetVolumePropertyRequest; import org.apache.hadoop.util.Time; /** @@ -60,21 +65,34 @@ public static void addVolumeAndBucketToDB(String volumeName, } /** - * Add's volume creation entry to OM DB. + * Add volume creation entry to OM DB. * @param volumeName * @param omMetadataManager * @throws Exception */ public static void addVolumeToDB(String volumeName, OMMetadataManager omMetadataManager) throws Exception { + addVolumeToDB(volumeName, UUID.randomUUID().toString(), omMetadataManager); + } + + /** + * Add volume creation entry to OM DB. + * @param volumeName + * @param ownerName + * @param omMetadataManager + * @throws Exception + */ + public static void addVolumeToDB(String volumeName, String ownerName, + OMMetadataManager omMetadataManager) throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder().setCreationTime(Time.now()) - .setVolume(volumeName).setAdminName(UUID.randomUUID().toString()) - .setOwnerName(UUID.randomUUID().toString()).build(); + .setVolume(volumeName).setAdminName(ownerName) + .setOwnerName(ownerName).build(); omMetadataManager.getVolumeTable().put( omMetadataManager.getVolumeKey(volumeName), omVolumeArgs); } + public static OzoneManagerProtocolProtos.OMRequest createBucketRequest( String bucketName, String volumeName, boolean isVersionEnabled, OzoneManagerProtocolProtos.StorageTypeProto storageTypeProto) { @@ -103,4 +121,56 @@ public static List< HddsProtos.KeyValue> getMetadataList() { return metadataList; } + + /** + * Add user to user table. + * @param volumeName + * @param ownerName + * @param omMetadataManager + * @throws Exception + */ + public static void addUserToDB(String volumeName, String ownerName, + OMMetadataManager omMetadataManager) throws Exception { + OzoneManagerProtocolProtos.VolumeList volumeList = + OzoneManagerProtocolProtos.VolumeList.newBuilder() + .addVolumeNames(volumeName).build(); + omMetadataManager.getUserTable().put( + omMetadataManager.getUserKey(ownerName), volumeList); + } + + /** + * Create OMRequest for set volume property request with owner set. + * @param volumeName + * @param newOwner + * @return OMRequest + */ + public static OMRequest createSetVolumePropertyRequest(String volumeName, + String newOwner) { + SetVolumePropertyRequest setVolumePropertyRequest = + SetVolumePropertyRequest.newBuilder().setVolumeName(volumeName) + .setOwnerName(newOwner).build(); + + return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) + .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) + .setSetVolumePropertyRequest(setVolumePropertyRequest).build(); + } + + + /** + * Create OMRequest for set volume property request with quota set. + * @param volumeName + * @param quota + * @return OMRequest + */ + public static OMRequest createSetVolumePropertyRequest(String volumeName, + long quota) { + SetVolumePropertyRequest setVolumePropertyRequest = + SetVolumePropertyRequest.newBuilder().setVolumeName(volumeName) + .setQuotaInBytes(quota).build(); + + return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) + .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) + .setSetVolumePropertyRequest(setVolumePropertyRequest).build(); + } + } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java new file mode 100644 index 0000000000000..246c32d228f26 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java @@ -0,0 +1,265 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.volume; + +import java.util.UUID; + +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.mockito.Mockito; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateVolumeRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .VolumeInfo; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +/** + * Tests create volume request. + */ +public class TestOMVolumeCreateRequest { + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OMMetadataManager omMetadataManager; + private AuditLogger auditLogger; + + + @Before + public void setup() throws Exception { + ozoneManager = Mockito.mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L); + auditLogger = Mockito.mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + } + + @After + public void stop() { + omMetrics.unRegister(); + Mockito.framework().clearInlineMocks(); + } + + @Test + public void testPreExecute() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String adminName = UUID.randomUUID().toString(); + String ownerName = UUID.randomUUID().toString(); + doPreExecute(volumeName, adminName, ownerName); + } + + + @Test + public void testValidateAndUpdateCacheWithZeroMaxUserVolumeCount() + throws Exception { + when(ozoneManager.getMaxUserVolumeCount()).thenReturn(0L); + String volumeName = UUID.randomUUID().toString(); + String adminName = "user1"; + String ownerName = "user1"; + + OMRequest originalRequest = createVolumeRequest(volumeName, adminName, + ownerName); + + OMVolumeCreateRequest omVolumeCreateRequest = + new OMVolumeCreateRequest(originalRequest); + + omVolumeCreateRequest.preExecute(ozoneManager); + + try { + OMClientResponse omClientResponse = + omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1); + } catch (IllegalArgumentException ex){ + GenericTestUtils.assertExceptionContains("should be greater than zero", + ex); + } + + } + + @Test + public void testValidateAndUpdateCacheSuccess() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String adminName = "user1"; + String ownerName = "user1"; + + OMRequest originalRequest = createVolumeRequest(volumeName, adminName, + ownerName); + + OMVolumeCreateRequest omVolumeCreateRequest = + new OMVolumeCreateRequest(originalRequest); + + omVolumeCreateRequest.preExecute(ozoneManager); + + String volumeKey = omMetadataManager.getVolumeKey(volumeName); + String ownerKey = omMetadataManager.getUserKey(ownerName); + + // As we have not still called validateAndUpdateCache, get() should + // return null. + + Assert.assertNull(omMetadataManager.getVolumeTable().get(volumeKey)); + Assert.assertNull(omMetadataManager.getUserTable().get(ownerKey)); + + OMClientResponse omClientResponse = + omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1); + + OzoneManagerProtocolProtos.OMResponse omResponse = + omClientResponse.getOMResponse(); + Assert.assertNotNull(omResponse.getCreateVolumeResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omResponse.getStatus()); + + + // Get volumeInfo from request. + VolumeInfo volumeInfo = omVolumeCreateRequest.getOmRequest() + .getCreateVolumeRequest().getVolumeInfo(); + + OmVolumeArgs omVolumeArgs = + omMetadataManager.getVolumeTable().get(volumeKey); + // As request is valid volume table should not have entry. + Assert.assertNotNull(omVolumeArgs); + + // Check data from table and request. + Assert.assertEquals(volumeInfo.getVolume(), omVolumeArgs.getVolume()); + Assert.assertEquals(volumeInfo.getOwnerName(), omVolumeArgs.getOwnerName()); + Assert.assertEquals(volumeInfo.getAdminName(), omVolumeArgs.getAdminName()); + Assert.assertEquals(volumeInfo.getCreationTime(), + omVolumeArgs.getCreationTime()); + + OzoneManagerProtocolProtos.VolumeList volumeList = omMetadataManager + .getUserTable().get(ownerKey); + Assert.assertNotNull(volumeList); + Assert.assertEquals(volumeName, volumeList.getVolumeNames(0)); + + } + + + @Test + public void testValidateAndUpdateCacheWithVolumeAlreadyExists() + throws Exception { + String volumeName = UUID.randomUUID().toString(); + String adminName = "user1"; + String ownerName = "user1"; + + TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + + OMRequest originalRequest = createVolumeRequest(volumeName, adminName, + ownerName); + + OMVolumeCreateRequest omVolumeCreateRequest = + new OMVolumeCreateRequest(originalRequest); + + omVolumeCreateRequest.preExecute(ozoneManager); + + OMClientResponse omClientResponse = + omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1); + + OzoneManagerProtocolProtos.OMResponse omResponse = + omClientResponse.getOMResponse(); + Assert.assertNotNull(omResponse.getCreateVolumeResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS, + omResponse.getStatus()); + + // Check really if we have a volume with the specified volume name. + Assert.assertNotNull(omMetadataManager.getVolumeTable().get( + omMetadataManager.getVolumeKey(volumeName))); + + } + + + private void doPreExecute(String volumeName, + String adminName, String ownerName) throws Exception { + + OMRequest originalRequest = createVolumeRequest(volumeName, adminName, + ownerName); + + OMVolumeCreateRequest omVolumeCreateRequest = + new OMVolumeCreateRequest(originalRequest); + + OMRequest modifiedRequest = omVolumeCreateRequest.preExecute(ozoneManager); + verifyRequest(modifiedRequest, originalRequest); + } + + /** + * Verify modifiedOmRequest and originalRequest. + * @param modifiedRequest + * @param originalRequest + */ + private void verifyRequest(OMRequest modifiedRequest, + OMRequest originalRequest) { + VolumeInfo original = originalRequest.getCreateVolumeRequest() + .getVolumeInfo(); + VolumeInfo updated = modifiedRequest.getCreateVolumeRequest() + .getVolumeInfo(); + + Assert.assertEquals(original.getAdminName(), updated.getAdminName()); + Assert.assertEquals(original.getVolume(), updated.getVolume()); + Assert.assertEquals(original.getOwnerName(), + updated.getOwnerName()); + Assert.assertNotEquals(original.getCreationTime(), + updated.getCreationTime()); + } + + /** + * Create OMRequest for create volume. + * @param volumeName + * @param adminName + * @param ownerName + * @return OMRequest + */ + private OMRequest createVolumeRequest(String volumeName, String adminName, + String ownerName) { + VolumeInfo volumeInfo = VolumeInfo.newBuilder().setVolume(volumeName) + .setAdminName(adminName).setOwnerName(ownerName).build(); + CreateVolumeRequest createVolumeRequest = + CreateVolumeRequest.newBuilder().setVolumeInfo(volumeInfo).build(); + + return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) + .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) + .setCreateVolumeRequest(createVolumeRequest).build(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java new file mode 100644 index 0000000000000..95e098727f0fe --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java @@ -0,0 +1,222 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.volume; + +import java.util.UUID; + +import com.google.common.base.Optional; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.mockito.Mockito; + +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheValue; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .DeleteVolumeRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests delete volume request. + */ +public class TestOMVolumeDeleteRequest { + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OMMetadataManager omMetadataManager; + private AuditLogger auditLogger; + + + @Before + public void setup() throws Exception { + ozoneManager = mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + auditLogger = Mockito.mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + + } + + @After + public void stop() { + omMetrics.unRegister(); + Mockito.framework().clearInlineMocks(); + } + + @Test + public void testPreExecute() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String ownerName = UUID.randomUUID().toString(); + OMRequest originalRequest = deleteVolumeRequest(volumeName, ownerName); + + OMVolumeDeleteRequest omVolumeDeleteRequest = + new OMVolumeDeleteRequest(originalRequest); + + OMRequest modifiedRequest = omVolumeDeleteRequest.preExecute(ozoneManager); + Assert.assertNotEquals(originalRequest, modifiedRequest); + } + + + @Test + public void testValidateAndUpdateCacheSuccess() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String ownerName = "user1"; + + OMRequest originalRequest = deleteVolumeRequest(volumeName, ownerName); + + OMVolumeDeleteRequest omVolumeDeleteRequest = + new OMVolumeDeleteRequest(originalRequest); + + omVolumeDeleteRequest.preExecute(ozoneManager); + + // Add volume and user to DB + TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + + String volumeKey = omMetadataManager.getVolumeKey(volumeName); + String ownerKey = omMetadataManager.getUserKey(ownerName); + + + Assert.assertNotNull(omMetadataManager.getVolumeTable().get(volumeKey)); + Assert.assertNotNull(omMetadataManager.getUserTable().get(ownerKey)); + + OMClientResponse omClientResponse = + omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1); + + OzoneManagerProtocolProtos.OMResponse omResponse = + omClientResponse.getOMResponse(); + Assert.assertNotNull(omResponse.getCreateVolumeResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omResponse.getStatus()); + + + + Assert.assertTrue(omMetadataManager.getUserTable().get(ownerKey) + .getVolumeNamesList().size() == 0); + // As now volume is deleted, table should not have those entries. + Assert.assertNull(omMetadataManager.getVolumeTable().get(volumeKey)); + + } + + + @Test + public void testValidateAndUpdateCacheWithVolumeNotFound() + throws Exception { + String volumeName = UUID.randomUUID().toString(); + String ownerName = "user1"; + + OMRequest originalRequest = deleteVolumeRequest(volumeName, ownerName); + + OMVolumeDeleteRequest omVolumeDeleteRequest = + new OMVolumeDeleteRequest(originalRequest); + + omVolumeDeleteRequest.preExecute(ozoneManager); + + OMClientResponse omClientResponse = + omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1); + + OzoneManagerProtocolProtos.OMResponse omResponse = + omClientResponse.getOMResponse(); + Assert.assertNotNull(omResponse.getCreateVolumeResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, + omResponse.getStatus()); + + } + + + @Test + public void testValidateAndUpdateCacheWithVolumeNotEmpty() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String ownerName = "user1"; + + OMRequest originalRequest = deleteVolumeRequest(volumeName, ownerName); + + OMVolumeDeleteRequest omVolumeDeleteRequest = + new OMVolumeDeleteRequest(originalRequest); + + omVolumeDeleteRequest.preExecute(ozoneManager); + + // Add some bucket to bucket table cache. + String bucketName = UUID.randomUUID().toString(); + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName).build(); + omMetadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), + new CacheValue<>(Optional.of(omBucketInfo), 1L)); + + // Add user and volume to DB. + TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + + OMClientResponse omClientResponse = + omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1L); + + OzoneManagerProtocolProtos.OMResponse omResponse = + omClientResponse.getOMResponse(); + Assert.assertNotNull(omResponse.getCreateVolumeResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_EMPTY, + omResponse.getStatus()); + } + + /** + * Create OMRequest for delete volume. + * @param volumeName + * @param ownerName + * @return OMRequest + */ + private OMRequest deleteVolumeRequest(String volumeName, + String ownerName) { + DeleteVolumeRequest deleteVolumeRequest = + DeleteVolumeRequest.newBuilder().setVolumeName(volumeName) + .setOwner(ownerName).build(); + + return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) + .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) + .setDeleteVolumeRequest(deleteVolumeRequest).build(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java new file mode 100644 index 0000000000000..bd464d893201b --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java @@ -0,0 +1,204 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.volume; + +import java.util.UUID; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.mockito.Mockito; + +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +/** + * Tests set volume property request. + */ +public class TestOMVolumeSetOwnerRequest { + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OMMetadataManager omMetadataManager; + private AuditLogger auditLogger; + + + @Before + public void setup() throws Exception { + ozoneManager = Mockito.mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L); + auditLogger = Mockito.mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + } + + @After + public void stop() { + omMetrics.unRegister(); + Mockito.framework().clearInlineMocks(); + } + + @Test + public void testPreExecute() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String newOwner = "user1"; + OMRequest originalRequest = + TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, newOwner); + + OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = + new OMVolumeSetQuotaRequest(originalRequest); + + OMRequest modifiedRequest = omVolumeSetQuotaRequest.preExecute( + ozoneManager); + Assert.assertNotEquals(modifiedRequest, originalRequest); + } + + + @Test + public void testValidateAndUpdateCacheSuccess() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String ownerName = "user1"; + + TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + + String newOwner = "user2"; + + OMRequest originalRequest = + TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, newOwner); + + OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = + new OMVolumeSetOwnerRequest(originalRequest); + + omVolumeSetOwnerRequest.preExecute(ozoneManager); + + String volumeKey = omMetadataManager.getVolumeKey(volumeName); + String ownerKey = omMetadataManager.getUserKey(ownerName); + String newOwnerKey = omMetadataManager.getUserKey(newOwner); + + + + OMClientResponse omClientResponse = + omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1); + + OzoneManagerProtocolProtos.OMResponse omResponse = + omClientResponse.getOMResponse(); + Assert.assertNotNull(omResponse.getSetVolumePropertyResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omResponse.getStatus()); + + + String fromDBOwner = omMetadataManager + .getVolumeTable().get(volumeKey).getOwnerName(); + Assert.assertEquals(newOwner, fromDBOwner); + + + OzoneManagerProtocolProtos.VolumeList newOwnerVolumeList = + omMetadataManager.getUserTable().get(newOwnerKey); + + Assert.assertNotNull(newOwnerVolumeList); + Assert.assertEquals(volumeName, + newOwnerVolumeList.getVolumeNamesList().get(0)); + + OzoneManagerProtocolProtos.VolumeList oldOwnerVolumeList = + omMetadataManager.getUserTable().get( + omMetadataManager.getUserKey(ownerKey)); + + Assert.assertNotNull(oldOwnerVolumeList); + Assert.assertTrue(oldOwnerVolumeList.getVolumeNamesList().size() == 0); + + } + + + @Test + public void testValidateAndUpdateCacheWithVolumeNotFound() + throws Exception { + String volumeName = UUID.randomUUID().toString(); + String ownerName = "user1"; + + OMRequest originalRequest = + TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + ownerName); + + OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = + new OMVolumeSetOwnerRequest(originalRequest); + + omVolumeSetOwnerRequest.preExecute(ozoneManager); + + OMClientResponse omClientResponse = + omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1); + + OzoneManagerProtocolProtos.OMResponse omResponse = + omClientResponse.getOMResponse(); + Assert.assertNotNull(omResponse.getCreateVolumeResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, + omResponse.getStatus()); + + } + + @Test + public void testInvalidRequest() throws Exception { + String volumeName = UUID.randomUUID().toString(); + + // create request with quota set. + OMRequest originalRequest = + TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + 100L); + + OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = + new OMVolumeSetOwnerRequest(originalRequest); + + omVolumeSetOwnerRequest.preExecute(ozoneManager); + + OMClientResponse omClientResponse = + omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1); + + OzoneManagerProtocolProtos.OMResponse omResponse = + omClientResponse.getOMResponse(); + Assert.assertNotNull(omResponse.getCreateVolumeResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_REQUEST, + omResponse.getStatus()); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java new file mode 100644 index 0000000000000..9e9a1efc358b2 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java @@ -0,0 +1,195 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.volume; + +import java.util.UUID; + +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.mockito.Mockito; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +/** + * Tests set volume property request. + */ +public class TestOMVolumeSetQuotaRequest { + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OMMetadataManager omMetadataManager; + private AuditLogger auditLogger; + + @Before + public void setup() throws Exception { + ozoneManager = Mockito.mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + auditLogger = Mockito.mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + } + + @After + public void stop() { + omMetrics.unRegister(); + Mockito.framework().clearInlineMocks(); + } + + @Test + public void testPreExecute() throws Exception { + String volumeName = UUID.randomUUID().toString(); + long quota = 100L; + OMRequest originalRequest = + TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, quota); + + OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = + new OMVolumeSetQuotaRequest(originalRequest); + + OMRequest modifiedRequest = omVolumeSetQuotaRequest.preExecute( + ozoneManager); + Assert.assertNotEquals(modifiedRequest, originalRequest); + } + + + @Test + public void testValidateAndUpdateCacheSuccess() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String ownerName = "user1"; + long quotaSet = 100L; + + TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + + + OMRequest originalRequest = + TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, quotaSet); + + OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = + new OMVolumeSetQuotaRequest(originalRequest); + + omVolumeSetQuotaRequest.preExecute(ozoneManager); + + String volumeKey = omMetadataManager.getVolumeKey(volumeName); + + + // Get Quota before validateAndUpdateCache. + OmVolumeArgs omVolumeArgs = + omMetadataManager.getVolumeTable().get(volumeKey); + // As request is valid volume table should not have entry. + Assert.assertNotNull(omVolumeArgs); + long quotaBeforeSet = omVolumeArgs.getQuotaInBytes(); + + + OMClientResponse omClientResponse = + omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1); + + OzoneManagerProtocolProtos.OMResponse omResponse = + omClientResponse.getOMResponse(); + Assert.assertNotNull(omResponse.getSetVolumePropertyResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omResponse.getStatus()); + + + long quotaAfterSet = omMetadataManager + .getVolumeTable().get(volumeKey).getQuotaInBytes(); + Assert.assertEquals(quotaSet, quotaAfterSet); + Assert.assertNotEquals(quotaBeforeSet, quotaAfterSet); + + } + + + @Test + public void testValidateAndUpdateCacheWithVolumeNotFound() + throws Exception { + String volumeName = UUID.randomUUID().toString(); + String ownerName = "user1"; + long quota = 100L; + + OMRequest originalRequest = + TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, quota); + + OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = + new OMVolumeSetQuotaRequest(originalRequest); + + omVolumeSetQuotaRequest.preExecute(ozoneManager); + + OMClientResponse omClientResponse = + omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1); + + OzoneManagerProtocolProtos.OMResponse omResponse = + omClientResponse.getOMResponse(); + Assert.assertNotNull(omResponse.getCreateVolumeResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, + omResponse.getStatus()); + + } + + @Test + public void testInvalidRequest() throws Exception { + String volumeName = UUID.randomUUID().toString(); + + // create request with owner set. + OMRequest originalRequest = + TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + "user1"); + + // Creating OMVolumeSetQuotaRequest with SetProperty request set with owner. + OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = + new OMVolumeSetQuotaRequest(originalRequest); + + omVolumeSetQuotaRequest.preExecute(ozoneManager); + + OMClientResponse omClientResponse = + omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1); + + OzoneManagerProtocolProtos.OMResponse omResponse = + omClientResponse.getOMResponse(); + Assert.assertNotNull(omResponse.getCreateVolumeResponse()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_REQUEST, + omResponse.getStatus()); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java new file mode 100644 index 0000000000000..cbe3e2d3c7129 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Package contains test classes for volume requests. + */ +package org.apache.hadoop.ozone.om.request.volume; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java new file mode 100644 index 0000000000000..430afcdd04a90 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.volume; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateVolumeResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .VolumeList; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.utils.db.BatchOperation; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.IOException; +import java.util.UUID; + +import static org.junit.Assert.fail; + +/** + * This class tests OMVolumeCreateResponse. + */ +public class TestOMVolumeCreateResponse { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OMMetadataManager omMetadataManager; + private BatchOperation batchOperation; + + @Before + public void setup() throws Exception { + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + } + + @Test + public void testAddToDBBatch() throws Exception { + + String volumeName = UUID.randomUUID().toString(); + String userName = "user1"; + VolumeList volumeList = VolumeList.newBuilder() + .addVolumeNames(volumeName).build(); + + OMResponse omResponse = OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setSuccess(true) + .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) + .build(); + + OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() + .setOwnerName(userName).setAdminName(userName) + .setVolume(volumeName).setCreationTime(Time.now()).build(); + OMVolumeCreateResponse omVolumeCreateResponse = + new OMVolumeCreateResponse(omVolumeArgs, volumeList, omResponse); + + omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + Assert.assertEquals(omVolumeArgs, + omMetadataManager.getVolumeTable().get( + omMetadataManager.getVolumeKey(volumeName))); + + Assert.assertEquals(volumeList, + omMetadataManager.getUserTable().get( + omMetadataManager.getUserKey(userName))); + } + + @Test + public void testAddToDBBatchNoOp() throws Exception { + + OMResponse omResponse = OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) + .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS) + .setSuccess(false) + .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) + .build(); + + OMVolumeCreateResponse omVolumeCreateResponse = + new OMVolumeCreateResponse(null, null, omResponse); + + try { + omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation); + Assert.assertTrue(omMetadataManager.countRowsInTable( + omMetadataManager.getVolumeTable()) == 0); + } catch (IOException ex) { + fail("testAddToDBBatchFailure failed"); + } + + } + + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java new file mode 100644 index 0000000000000..a47c224258691 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java @@ -0,0 +1,130 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.volume; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateVolumeResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .VolumeList; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.utils.db.BatchOperation; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.IOException; +import java.util.UUID; + +import static org.junit.Assert.fail; + +/** + * This class tests OMVolumeCreateResponse. + */ +public class TestOMVolumeDeleteResponse { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OMMetadataManager omMetadataManager; + private BatchOperation batchOperation; + + @Before + public void setup() throws Exception { + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + } + + @Test + public void testAddToDBBatch() throws Exception { + + String volumeName = UUID.randomUUID().toString(); + String userName = "user1"; + VolumeList volumeList = VolumeList.newBuilder() + .addVolumeNames(volumeName).build(); + + OMResponse omResponse = OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setSuccess(true) + .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) + .build(); + + OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() + .setOwnerName(userName).setAdminName(userName) + .setVolume(volumeName).setCreationTime(Time.now()).build(); + OMVolumeCreateResponse omVolumeCreateResponse = + new OMVolumeCreateResponse(omVolumeArgs, volumeList, omResponse); + + // As we are deleting updated volume list should be empty. + VolumeList updatedVolumeList = VolumeList.newBuilder().build(); + OMVolumeDeleteResponse omVolumeDeleteResponse = + new OMVolumeDeleteResponse(volumeName, userName, updatedVolumeList, + omResponse); + + omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation); + omVolumeDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + Assert.assertNull(null, + omMetadataManager.getVolumeTable().get( + omMetadataManager.getVolumeKey(volumeName))); + + Assert.assertEquals(null, + omMetadataManager.getUserTable().get( + omMetadataManager.getUserKey(userName))); + } + + @Test + public void testAddToDBBatchNoOp() throws Exception { + + OMResponse omResponse = OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) + .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) + .setSuccess(false) + .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) + .build(); + + OMVolumeDeleteResponse omVolumeDeleteResponse = + new OMVolumeDeleteResponse(null, null, null, omResponse); + + try { + omVolumeDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); + } catch (IOException ex) { + fail("testAddToDBBatchFailure failed"); + } + + } + + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java new file mode 100644 index 0000000000000..877e3d7d0c3f5 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java @@ -0,0 +1,142 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.volume; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateVolumeResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .VolumeList; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.utils.db.BatchOperation; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.IOException; +import java.util.UUID; + +import static org.junit.Assert.fail; + +/** + * This class tests OMVolumeCreateResponse. + */ +public class TestOMVolumeSetOwnerResponse { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OMMetadataManager omMetadataManager; + private BatchOperation batchOperation; + + @Before + public void setup() throws Exception { + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + } + + @Test + public void testAddToDBBatch() throws Exception { + + String volumeName = UUID.randomUUID().toString(); + String oldOwner = "user1"; + VolumeList volumeList = VolumeList.newBuilder() + .addVolumeNames(volumeName).build(); + + OMResponse omResponse = OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setSuccess(true) + .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) + .build(); + + OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() + .setOwnerName(oldOwner).setAdminName(oldOwner) + .setVolume(volumeName).setCreationTime(Time.now()).build(); + OMVolumeCreateResponse omVolumeCreateResponse = + new OMVolumeCreateResponse(omVolumeArgs, volumeList, omResponse); + + + + String newOwner = "user2"; + VolumeList newOwnerVolumeList = VolumeList.newBuilder() + .addVolumeNames(volumeName).build(); + VolumeList oldOwnerVolumeList = VolumeList.newBuilder().build(); + OmVolumeArgs newOwnerVolumeArgs = OmVolumeArgs.newBuilder() + .setOwnerName(newOwner).setAdminName(newOwner) + .setVolume(volumeName).setCreationTime(omVolumeArgs.getCreationTime()) + .build(); + + OMVolumeSetOwnerResponse omVolumeSetOwnerResponse = + new OMVolumeSetOwnerResponse(oldOwner, oldOwnerVolumeList, + newOwnerVolumeList, newOwnerVolumeArgs, omResponse); + + omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation); + omVolumeSetOwnerResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + + Assert.assertEquals(newOwnerVolumeArgs, + omMetadataManager.getVolumeTable().get( + omMetadataManager.getVolumeKey(volumeName))); + + Assert.assertEquals(volumeList, + omMetadataManager.getUserTable().get( + omMetadataManager.getUserKey(newOwner))); + } + + @Test + public void testAddToDBBatchNoOp() throws Exception { + + OMResponse omResponse = OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) + .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) + .setSuccess(false) + .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) + .build(); + + OMVolumeSetOwnerResponse omVolumeSetOwnerResponse = + new OMVolumeSetOwnerResponse(null, null, null, null, omResponse); + + try { + omVolumeSetOwnerResponse.addToDBBatch(omMetadataManager, batchOperation); + Assert.assertTrue(omMetadataManager.countRowsInTable( + omMetadataManager.getVolumeTable()) == 0); + } catch (IOException ex) { + fail("testAddToDBBatchFailure failed"); + } + + } + + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java new file mode 100644 index 0000000000000..30e48b2e1946b --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.volume; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateVolumeResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.utils.db.BatchOperation; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.IOException; +import java.util.UUID; + +import static org.junit.Assert.fail; + +/** + * This class tests OMVolumeCreateResponse. + */ +public class TestOMVolumeSetQuotaResponse { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OMMetadataManager omMetadataManager; + private BatchOperation batchOperation; + + @Before + public void setup() throws Exception { + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.newFolder().getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + } + + @Test + public void testAddToDBBatch() throws Exception { + + String volumeName = UUID.randomUUID().toString(); + String userName = "user1"; + + OMResponse omResponse = OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setSuccess(true) + .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) + .build(); + + OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() + .setOwnerName(userName).setAdminName(userName) + .setVolume(volumeName).setCreationTime(Time.now()).build(); + OMVolumeSetQuotaResponse omVolumeSetQuotaResponse = + new OMVolumeSetQuotaResponse(omVolumeArgs, omResponse); + + omVolumeSetQuotaResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + Assert.assertEquals(omVolumeArgs, + omMetadataManager.getVolumeTable().get( + omMetadataManager.getVolumeKey(volumeName))); + + } + + @Test + public void testAddToDBBatchNoOp() throws Exception { + + OMResponse omResponse = OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) + .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) + .setSuccess(false) + .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) + .build(); + + OMVolumeSetQuotaResponse omVolumeSetQuotaResponse = + new OMVolumeSetQuotaResponse(null, omResponse); + + try { + omVolumeSetQuotaResponse.addToDBBatch(omMetadataManager, batchOperation); + Assert.assertTrue(omMetadataManager.countRowsInTable( + omMetadataManager.getVolumeTable()) == 0); + } catch (IOException ex) { + fail("testAddToDBBatchFailure failed"); + } + } + + +} From 970b0b0c02bb8fbe8ff227c78e2332df623d1aea Mon Sep 17 00:00:00 2001 From: Weiwei Yang Date: Thu, 13 Jun 2019 10:44:47 +0800 Subject: [PATCH 0181/1308] YARN-9578. Add limit/actions/summarize options for app activities REST API. Contributed by Tao Yang. --- .../activities/ActivitiesManager.java | 56 ++- .../scheduler/activities/AppAllocation.java | 11 +- .../resourcemanager/webapp/RMWSConsts.java | 13 +- .../webapp/RMWebServiceProtocol.java | 7 +- .../resourcemanager/webapp/RMWebServices.java | 73 +++- .../webapp/dao/AppAllocationInfo.java | 3 +- .../activities/TestActivitiesManager.java | 112 ++++- .../webapp/ActivitiesTestUtils.java | 15 + .../TestRMWebServicesSchedulerActivities.java | 407 ++++++++++++------ ...edulerActivitiesWithMultiNodesEnabled.java | 44 +- .../webapp/DefaultRequestInterceptorREST.java | 3 +- .../webapp/FederationInterceptorREST.java | 3 +- .../router/webapp/RouterWebServices.java | 11 +- .../webapp/BaseRouterWebServicesTest.java | 4 +- .../webapp/MockRESTRequestInterceptor.java | 3 +- .../PassThroughRESTRequestInterceptor.java | 6 +- 16 files changed, 570 insertions(+), 201 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java index 2c314727c9d55..4149ac1565d30 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; @@ -121,7 +122,8 @@ private void setupConfForCleanup(Configuration conf) { public AppActivitiesInfo getAppActivitiesInfo(ApplicationId applicationId, Set requestPriorities, Set allocationRequestIds, - RMWSConsts.ActivitiesGroupBy groupBy) { + RMWSConsts.ActivitiesGroupBy groupBy, int limit, boolean summarize, + double maxTimeInSeconds) { RMApp app = rmContext.getRMApps().get(applicationId); if (app != null && app.getFinalApplicationStatus() == FinalApplicationStatus.UNDEFINED) { @@ -140,6 +142,17 @@ public AppActivitiesInfo getAppActivitiesInfo(ApplicationId applicationId, allocations = new ArrayList(curAllocations); } } + if (summarize && allocations != null) { + AppAllocation summaryAppAllocation = + getSummarizedAppAllocation(allocations, maxTimeInSeconds); + if (summaryAppAllocation != null) { + allocations = Lists.newArrayList(summaryAppAllocation); + } + } + if (allocations != null && limit > 0 && limit < allocations.size()) { + allocations = + allocations.subList(allocations.size() - limit, allocations.size()); + } return new AppActivitiesInfo(allocations, applicationId, groupBy); } else { return new AppActivitiesInfo( @@ -148,6 +161,47 @@ public AppActivitiesInfo getAppActivitiesInfo(ApplicationId applicationId, } } + /** + * Get summarized app allocation from multiple allocations as follows: + * 1. Collect latest allocation attempts on nodes to construct an allocation + * summary on nodes from multiple app allocations which are recorded a few + * seconds before the last allocation. + * 2. Copy other fields from the last allocation. + */ + private AppAllocation getSummarizedAppAllocation( + List allocations, double maxTimeInSeconds) { + if (allocations == null || allocations.isEmpty()) { + return null; + } + long startTime = allocations.get(allocations.size() - 1).getTime() + - (long) (maxTimeInSeconds * 1000); + Map nodeActivities = new HashMap<>(); + for (int i = allocations.size() - 1; i >= 0; i--) { + AppAllocation appAllocation = allocations.get(i); + if (startTime > appAllocation.getTime()) { + break; + } + List activityNodes = appAllocation.getAllocationAttempts(); + for (ActivityNode an : activityNodes) { + if (an.getNodeId() != null) { + nodeActivities.putIfAbsent( + an.getRequestPriority() + "_" + an.getAllocationRequestId() + "_" + + an.getNodeId(), an); + } + } + } + AppAllocation lastAppAllocation = allocations.get(allocations.size() - 1); + AppAllocation summarizedAppAllocation = + new AppAllocation(lastAppAllocation.getPriority(), null, + lastAppAllocation.getQueueName()); + summarizedAppAllocation + .updateAppContainerStateAndTime(null, lastAppAllocation.getAppState(), + lastAppAllocation.getTime(), lastAppAllocation.getDiagnostic()); + summarizedAppAllocation + .setAllocationAttempts(new ArrayList<>(nodeActivities.values())); + return summarizedAppAllocation; + } + public ActivitiesInfo getActivitiesInfo(String nodeId, RMWSConsts.ActivitiesGroupBy groupBy) { List allocations; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/AppAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/AppAllocation.java index 69d6ccf218b31..e226b50fb778d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/AppAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/AppAllocation.java @@ -84,11 +84,8 @@ public ActivityState getAppState() { return appState; } - public String getPriority() { - if (priority == null) { - return null; - } - return priority.toString(); + public Priority getPriority() { + return priority; } public String getContainerId() { @@ -128,4 +125,8 @@ public AppAllocation filterAllocationAttempts(Set requestPriorities, .collect(Collectors.toList()); return appAllocation; } + + public void setAllocationAttempts(List allocationAttempts) { + this.allocationAttempts = allocationAttempts; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java index b7a60087e648f..f2d2b822cd2f0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java @@ -71,7 +71,7 @@ public final class RMWSConsts { /** Path for {@code RMWebServiceProtocol#getAppActivities}. */ public static final String SCHEDULER_APP_ACTIVITIES = - "/scheduler/app-activities"; + "/scheduler/app-activities/{appid}"; /** Path for {@code RMWebServiceProtocol#getAppStatistics}. */ public static final String APP_STATISTICS = "/appstatistics"; @@ -237,6 +237,8 @@ public final class RMWSConsts { public static final String GROUP_BY = "groupBy"; public static final String SIGNAL = "signal"; public static final String COMMAND = "command"; + public static final String ACTIONS = "actions"; + public static final String SUMMARIZE = "summarize"; private RMWSConsts() { // not called @@ -250,4 +252,13 @@ private RMWSConsts() { public enum ActivitiesGroupBy { DIAGNOSTIC } + + /** + * Defines the required action of app activities: + * REFRESH means to turn on activities recording for the required app, + * GET means the required app activities should be involved in response. + */ + public enum AppActivitiesRequiredAction { + REFRESH, GET + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java index 3aa2593c1c2a0..a5bd93bbbed55 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java @@ -227,11 +227,16 @@ ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId, * the activities. It is a QueryParam. * @param groupBy the groupBy type by which the activities should be * aggregated. It is a QueryParam. + * @param limit set a limit of the result. It is a QueryParam. + * @param actions the required actions of app activities. It is a QueryParam. + * @param summarize whether app activities in multiple scheduling processes + * need to be summarized. It is a QueryParam. * @return all the activities about a specific app for a specific time */ AppActivitiesInfo getAppActivities(HttpServletRequest hsr, String appId, String time, Set requestPriorities, - Set allocationRequestIds, String groupBy); + Set allocationRequestIds, String groupBy, String limit, + Set actions, boolean summarize); /** * This method retrieves all the statistics for a specific app, and it is diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index 3f010350cb58d..762569fa6b53b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -236,6 +236,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol { public static final String DEFAULT_START_TIME = "0"; public static final String DEFAULT_END_TIME = "-1"; public static final String DEFAULT_INCLUDE_RESOURCE = "false"; + public static final String DEFAULT_SUMMARIZE = "false"; @VisibleForTesting boolean isCentralizedNodeLabelConfiguration = true; @@ -717,12 +718,16 @@ public ActivitiesInfo getActivities(@Context HttpServletRequest hsr, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) @Override public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr, - @QueryParam(RMWSConsts.APP_ID) String appId, + @PathParam(RMWSConsts.APPID) String appId, @QueryParam(RMWSConsts.MAX_TIME) String time, @QueryParam(RMWSConsts.REQUEST_PRIORITIES) Set requestPriorities, @QueryParam(RMWSConsts.ALLOCATION_REQUEST_IDS) Set allocationRequestIds, - @QueryParam(RMWSConsts.GROUP_BY) String groupBy) { + @QueryParam(RMWSConsts.GROUP_BY) String groupBy, + @QueryParam(RMWSConsts.LIMIT) String limit, + @QueryParam(RMWSConsts.ACTIONS) Set actions, + @QueryParam(RMWSConsts.SUMMARIZE) @DefaultValue(DEFAULT_SUMMARIZE) + boolean summarize) { initForReadableEndpoints(); YarnScheduler scheduler = rm.getRMContext().getScheduler(); @@ -749,6 +754,26 @@ public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr, return new AppActivitiesInfo(e.getMessage(), appId); } + Set requiredActions; + try { + requiredActions = parseAppActivitiesRequiredActions(actions); + } catch (IllegalArgumentException e) { + return new AppActivitiesInfo(e.getMessage(), appId); + } + + int limitNum = -1; + if (limit != null) { + try { + limitNum = Integer.parseInt(limit); + if (limitNum <= 0) { + return new AppActivitiesInfo( + "limit must be greater than 0!", appId); + } + } catch (NumberFormatException e) { + return new AppActivitiesInfo("limit must be integer!", appId); + } + } + double maxTime = 3.0; if (time != null) { @@ -762,12 +787,21 @@ public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr, ApplicationId applicationId; try { applicationId = ApplicationId.fromString(appId); - activitiesManager.turnOnAppActivitiesRecording(applicationId, maxTime); - AppActivitiesInfo appActivitiesInfo = - activitiesManager.getAppActivitiesInfo(applicationId, - requestPriorities, allocationRequestIds, activitiesGroupBy); - - return appActivitiesInfo; + if (requiredActions + .contains(RMWSConsts.AppActivitiesRequiredAction.REFRESH)) { + activitiesManager + .turnOnAppActivitiesRecording(applicationId, maxTime); + } + if (requiredActions + .contains(RMWSConsts.AppActivitiesRequiredAction.GET)) { + AppActivitiesInfo appActivitiesInfo = activitiesManager + .getAppActivitiesInfo(applicationId, requestPriorities, + allocationRequestIds, activitiesGroupBy, limitNum, + summarize, maxTime); + return appActivitiesInfo; + } + return new AppActivitiesInfo("Successfully notified actions: " + + StringUtils.join(',', actions), appId); } catch (Exception e) { String errMessage = "Cannot find application with given appId"; LOG.error(errMessage, e); @@ -778,6 +812,29 @@ public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr, return null; } + private Set + parseAppActivitiesRequiredActions(Set actions) { + Set requiredActions = + new HashSet<>(); + if (actions == null || actions.isEmpty()) { + requiredActions.add(RMWSConsts.AppActivitiesRequiredAction.REFRESH); + requiredActions.add(RMWSConsts.AppActivitiesRequiredAction.GET); + } else { + for (String action : actions) { + if (!EnumUtils.isValidEnum(RMWSConsts.AppActivitiesRequiredAction.class, + action.toUpperCase())) { + String errMesasge = + "Got invalid action: " + action + ", valid actions: " + Arrays + .asList(RMWSConsts.AppActivitiesRequiredAction.values()); + throw new IllegalArgumentException(errMesasge); + } + requiredActions.add(RMWSConsts.AppActivitiesRequiredAction + .valueOf(action.toUpperCase())); + } + } + return requiredActions; + } + private RMWSConsts.ActivitiesGroupBy parseActivitiesGroupBy(String groupBy) { if (groupBy != null) { if (!EnumUtils.isValidEnum(RMWSConsts.ActivitiesGroupBy.class, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAllocationInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAllocationInfo.java index 6b0d86ba92b31..6ae1f9a819baa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAllocationInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAllocationInfo.java @@ -54,7 +54,8 @@ public class AppAllocationInfo { this.requestAllocation = new ArrayList<>(); this.nodeId = allocation.getNodeId(); this.queueName = allocation.getQueueName(); - this.appPriority = allocation.getPriority(); + this.appPriority = allocation.getPriority() == null ? + null : allocation.getPriority().toString(); this.timestamp = allocation.getTime(); this.dateTime = new Date(allocation.getTime()).toString(); this.allocationState = allocation.getAppState().name(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/TestActivitiesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/TestActivitiesManager.java index 495c7e248b069..2bf6b23ed70a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/TestActivitiesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/TestActivitiesManager.java @@ -29,6 +29,7 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -46,6 +47,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.util.SystemClock; @@ -286,18 +288,124 @@ public void testAppActivitiesTTL() throws Exception { ActivityDiagnosticConstant.SKIPPED_ALL_PRIORITIES); } AppActivitiesInfo appActivitiesInfo = newActivitiesManager - .getAppActivitiesInfo(app.getApplicationId(), null, null, null); + .getAppActivitiesInfo(app.getApplicationId(), null, null, null, -1, + false, 3); Assert.assertEquals(numActivities, appActivitiesInfo.getAllocations().size()); // sleep until all app activities expired Thread.sleep(cleanupIntervalMs + appActivitiesTTL); // there should be no remaining app activities appActivitiesInfo = newActivitiesManager - .getAppActivitiesInfo(app.getApplicationId(), null, null, null); + .getAppActivitiesInfo(app.getApplicationId(), null, null, null, -1, + false, 3); Assert.assertEquals(0, appActivitiesInfo.getAllocations().size()); } + @Test (timeout = 30000) + public void testAppActivitiesPerformance() { + // start recording activities for first app + SchedulerApplicationAttempt app = apps.get(0); + FiCaSchedulerNode node = (FiCaSchedulerNode) nodes.get(0); + activitiesManager.turnOnAppActivitiesRecording(app.getApplicationId(), 100); + int numActivities = 100; + int numNodes = 10000; + int testingTimes = 10; + for (int ano = 0; ano < numActivities; ano++) { + ActivitiesLogger.APP.startAppAllocationRecording(activitiesManager, node, + SystemClock.getInstance().getTime(), app); + for (int i = 0; i < numNodes; i++) { + NodeId nodeId = NodeId.newInstance("host" + i, 0); + activitiesManager + .addSchedulingActivityForApp(app.getApplicationId(), null, "0", + ActivityState.SKIPPED, + ActivityDiagnosticConstant.FAIL_TO_ALLOCATE, "container", + nodeId, "0"); + } + ActivitiesLogger.APP + .finishAllocatedAppAllocationRecording(activitiesManager, + app.getApplicationId(), null, ActivityState.SKIPPED, + ActivityDiagnosticConstant.SKIPPED_ALL_PRIORITIES); + } + + // It often take a longer time for the first query, ignore this distraction + activitiesManager + .getAppActivitiesInfo(app.getApplicationId(), null, null, null, -1, + true, 100); + + // Test getting normal app activities + Supplier normalSupplier = () -> { + AppActivitiesInfo appActivitiesInfo = activitiesManager + .getAppActivitiesInfo(app.getApplicationId(), null, null, null, -1, + false, 100); + Assert.assertEquals(numActivities, + appActivitiesInfo.getAllocations().size()); + Assert.assertEquals(1, + appActivitiesInfo.getAllocations().get(0).getRequestAllocation() + .size()); + Assert.assertEquals(numNodes, + appActivitiesInfo.getAllocations().get(0).getRequestAllocation() + .get(0).getAllocationAttempt().size()); + return null; + }; + testManyTimes("Getting normal app activities", normalSupplier, + testingTimes); + + // Test getting aggregated app activities + Supplier aggregatedSupplier = () -> { + AppActivitiesInfo appActivitiesInfo = activitiesManager + .getAppActivitiesInfo(app.getApplicationId(), null, null, + RMWSConsts.ActivitiesGroupBy.DIAGNOSTIC, -1, false, 100); + Assert.assertEquals(numActivities, + appActivitiesInfo.getAllocations().size()); + Assert.assertEquals(1, + appActivitiesInfo.getAllocations().get(0).getRequestAllocation() + .size()); + Assert.assertEquals(1, + appActivitiesInfo.getAllocations().get(0).getRequestAllocation() + .get(0).getAllocationAttempt().size()); + Assert.assertEquals(numNodes, + appActivitiesInfo.getAllocations().get(0).getRequestAllocation() + .get(0).getAllocationAttempt().get(0).getNodeIds().size()); + return null; + }; + testManyTimes("Getting aggregated app activities", aggregatedSupplier, + testingTimes); + + // Test getting summarized app activities + Supplier summarizedSupplier = () -> { + AppActivitiesInfo appActivitiesInfo = activitiesManager + .getAppActivitiesInfo(app.getApplicationId(), null, null, + RMWSConsts.ActivitiesGroupBy.DIAGNOSTIC, -1, true, 100); + Assert.assertEquals(1, appActivitiesInfo.getAllocations().size()); + Assert.assertEquals(1, + appActivitiesInfo.getAllocations().get(0).getRequestAllocation() + .size()); + Assert.assertEquals(1, + appActivitiesInfo.getAllocations().get(0).getRequestAllocation() + .get(0).getAllocationAttempt().size()); + Assert.assertEquals(numNodes, + appActivitiesInfo.getAllocations().get(0).getRequestAllocation() + .get(0).getAllocationAttempt().get(0).getNodeIds().size()); + return null; + }; + testManyTimes("Getting summarized app activities", summarizedSupplier, + testingTimes); + } + + private void testManyTimes(String testingName, + Supplier supplier, int testingTimes) { + long totalTime = 0; + for (int i = 0; i < testingTimes; i++) { + long startTime = System.currentTimeMillis(); + supplier.get(); + totalTime += System.currentTimeMillis() - startTime; + } + System.out.println("#" + testingName + ", testing times : " + testingTimes + + ", total cost time : " + totalTime + " ms, average cost time : " + + (float) totalTime / testingTimes + " ms."); + } + /** * Testing activities manager which can record all history information about * node allocations. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ActivitiesTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ActivitiesTestUtils.java index da898627f94bb..666e5fe9a5dd4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ActivitiesTestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ActivitiesTestUtils.java @@ -41,6 +41,8 @@ import java.util.HashSet; import java.util.List; import java.util.function.Predicate; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import static org.junit.Assert.assertEquals; @@ -209,4 +211,17 @@ public static JSONObject requestWebResource(WebResource webResource, response.getType().toString()); return response.getEntity(JSONObject.class); } + + /** + * Convert format using {name} (HTTP base) into %s (Java based). + * @param format Initial format using {}. + * @param args Arguments for the format. + * @return New format using %s. + */ + public static String format(String format, Object... args) { + Pattern p = Pattern.compile("\\{.*?}"); + Matcher m = p.matcher(format); + String newFormat = m.replaceAll("%s"); + return String.format(newFormat, args); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java index 1e08f05e13414..8bdecb769d0cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java @@ -437,25 +437,17 @@ public void testAppActivityJSON() throws Exception { RMApp app1 = rm.submitApp(10, "app1", "user1", null, "b1"); //Get JSON - WebResource r = resource(); + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); MultivaluedMapImpl params = new MultivaluedMapImpl(); - params.add("appId", app1.getApplicationId().toString()); - ClientResponse response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - JSONObject json = response.getEntity(JSONObject.class); + ActivitiesTestUtils.requestWebResource(r, params); + nm.nodeHeartbeat(true); Thread.sleep(5000); //Get JSON - response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - json = response.getEntity(JSONObject.class); + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); //Check app activities verifyNumberOfAllocations(json, 1); @@ -502,25 +494,17 @@ public void testAppAssignMultipleContainersPerNodeHeartbeat() 10)), null); //Get JSON - WebResource r = resource(); + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); MultivaluedMapImpl params = new MultivaluedMapImpl(); - params.add("appId", app1.getApplicationId().toString()); - ClientResponse response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - JSONObject json = response.getEntity(JSONObject.class); + ActivitiesTestUtils.requestWebResource(r, params); + nm.nodeHeartbeat(true); Thread.sleep(5000); //Get JSON - response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - json = response.getEntity(JSONObject.class); + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); verifyNumberOfAllocations(json, 10); @@ -555,26 +539,17 @@ public void testAppAssignWithoutAvailableResource() throws Exception { 10)), null); //Get JSON - WebResource r = resource(); + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); MultivaluedMapImpl params = new MultivaluedMapImpl(); - params.add("appId", app1.getApplicationId().toString()); - ClientResponse response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - JSONObject json = response.getEntity(JSONObject.class); + ActivitiesTestUtils.requestWebResource(r, params); + nm.nodeHeartbeat(true); Thread.sleep(5000); //Get JSON - response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - json = response.getEntity(JSONObject.class); - + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); verifyNumberOfAllocations(json, 0); } finally { rm.stop(); @@ -590,24 +565,14 @@ public void testAppNoNM() throws Exception { RMApp app1 = rm.submitApp(1024, "app1", "user1", null, "b1"); //Get JSON - WebResource r = resource(); + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); MultivaluedMapImpl params = new MultivaluedMapImpl(); - params.add("appId", app1.getApplicationId().toString()); - ClientResponse response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - JSONObject json = response.getEntity(JSONObject.class); + ActivitiesTestUtils.requestWebResource(r, params); //Get JSON - response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - json = response.getEntity(JSONObject.class); - + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); verifyNumberOfAllocations(json, 0); } finally { rm.stop(); @@ -639,49 +604,23 @@ public void testAppReserveNewContainer() throws Exception { 10)), null); // Reserve new container - WebResource r = resource(); + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); MultivaluedMapImpl params = new MultivaluedMapImpl(); - params.add("appId", app1.getApplicationId().toString()); - ClientResponse response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - JSONObject json = response.getEntity(JSONObject.class); + ActivitiesTestUtils.requestWebResource(r, params); nm2.nodeHeartbeat(true); Thread.sleep(1000); - response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - json = response.getEntity(JSONObject.class); - + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); verifyNumberOfAllocations(json, 1); // Do a node heartbeat again without releasing container from app2 - r = resource(); - params = new MultivaluedMapImpl(); - params.add("appId", app1.getApplicationId().toString()); - response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - json = response.getEntity(JSONObject.class); - nm2.nodeHeartbeat(true); Thread.sleep(1000); - response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - json = response.getEntity(JSONObject.class); - + json = ActivitiesTestUtils.requestWebResource(r, params); verifyNumberOfAllocations(json, 2); // Finish application 2 @@ -693,26 +632,10 @@ public void testAppReserveNewContainer() throws Exception { RMContainerEventType.FINISHED); // Do a node heartbeat again - r = resource(); - params = new MultivaluedMapImpl(); - params.add("appId", app1.getApplicationId().toString()); - response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - json = response.getEntity(JSONObject.class); - nm2.nodeHeartbeat(true); Thread.sleep(1000); - response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - json = response.getEntity(JSONObject.class); - + json = ActivitiesTestUtils.requestWebResource(r, params); verifyNumberOfAllocations(json, 3); } finally { rm.stop(); @@ -847,15 +770,11 @@ public void testAppInsufficientResourceDiagnostic() RMApp app1 = rm.submitApp(512, "app1", "user1", null, "b1"); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); - WebResource r = resource(); + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); MultivaluedMapImpl params = new MultivaluedMapImpl(); - params.add("appId", app1.getApplicationId().toString()); - ClientResponse response = r.path("ws").path("v1").path("cluster") - .path("scheduler/app-activities").queryParams(params) - .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - JSONObject json = response.getEntity(JSONObject.class); + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); assertEquals("waiting for display", json.getString("diagnostic")); @@ -867,14 +786,7 @@ public void testAppInsufficientResourceDiagnostic() cs.handle(new NodeUpdateSchedulerEvent( rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); - response = - r.path("ws").path("v1").path("cluster") - .path("scheduler/app-activities").queryParams(params) - .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - json = response.getEntity(JSONObject.class); - + json = ActivitiesTestUtils.requestWebResource(r, params); verifyNumberOfAllocations(json, 1); JSONObject allocationObj = json.getJSONObject("allocations"); JSONObject requestAllocationObj = @@ -904,15 +816,11 @@ public void testAppPlacementConstraintDiagnostic() RMApp app1 = rm.submitApp(512, "app1", "user1", null, "b1"); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); - WebResource r = resource(); + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); MultivaluedMapImpl params = new MultivaluedMapImpl(); - params.add("appId", app1.getApplicationId().toString()); - ClientResponse response = r.path("ws").path("v1").path("cluster") - .path("scheduler/app-activities").queryParams(params) - .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - JSONObject json = response.getEntity(JSONObject.class); + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); assertEquals("waiting for display", json.getString("diagnostic")); @@ -930,14 +838,7 @@ public void testAppPlacementConstraintDiagnostic() cs.handle(new NodeUpdateSchedulerEvent( rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); - response = - r.path("ws").path("v1").path("cluster") - .path("scheduler/app-activities").queryParams(params) - .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - json = response.getEntity(JSONObject.class); - + json = ActivitiesTestUtils.requestWebResource(r, params); verifyNumberOfAllocations(json, 1); JSONObject allocationObj = json.getJSONObject("allocations"); JSONObject requestAllocationObj = @@ -967,9 +868,9 @@ public void testAppFilterByRequestPrioritiesAndAllocationRequestIds() MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) - .path(RMWSConsts.SCHEDULER_APP_ACTIVITIES); + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); MultivaluedMapImpl params = new MultivaluedMapImpl(); - params.add("appId", app1.getApplicationId().toString()); JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); assertEquals("waiting for display", json.getString("diagnostic")); @@ -1064,4 +965,228 @@ public void testAppFilterByRequestPrioritiesAndAllocationRequestIds() rm.stop(); } } + + @Test(timeout = 30000) + public void testAppLimit() throws Exception { + rm.start(); + CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); + MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * 1024); + MockNM nm2 = rm.registerNode("127.0.0.2:1234", 8 * 1024); + try { + RMApp app1 = rm.submitApp(512, "app1", "user1", null, "b1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); + + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); + MultivaluedMapImpl params = new MultivaluedMapImpl(); + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); + assertEquals("waiting for display", + json.getString("diagnostic")); + + // am1 asks for 1 * 5GB container + am1.allocate("*", 5120, 1, new ArrayList<>()); + // trigger scheduling triple, there will be 3 app activities in cache + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); + + // query all app activities without limit + json = ActivitiesTestUtils.requestWebResource(r, params); + verifyNumberOfAllocations(json, 3); + + // query all app activities with limit > 3 + params.putSingle(RMWSConsts.LIMIT, "10"); + json = ActivitiesTestUtils.requestWebResource(r, params); + verifyNumberOfAllocations(json, 3); + + // query app activities with limit = 2 + params.putSingle(RMWSConsts.LIMIT, "2"); + json = ActivitiesTestUtils.requestWebResource(r, params); + verifyNumberOfAllocations(json, 2); + + // query app activities with limit = 1 + params.putSingle(RMWSConsts.LIMIT, "1"); + json = ActivitiesTestUtils.requestWebResource(r, params); + verifyNumberOfAllocations(json, 1); + + // query all app activities with invalid limit + params.putSingle(RMWSConsts.LIMIT, "STRING"); + json = ActivitiesTestUtils.requestWebResource(r, params); + assertEquals("limit must be integer!", json.getString("diagnostic")); + + // query all app activities with limit = 0 + params.putSingle(RMWSConsts.LIMIT, "0"); + json = ActivitiesTestUtils.requestWebResource(r, params); + assertEquals("limit must be greater than 0!", + json.getString("diagnostic")); + + // query all app activities with limit < 0 + params.putSingle(RMWSConsts.LIMIT, "-3"); + json = ActivitiesTestUtils.requestWebResource(r, params); + assertEquals("limit must be greater than 0!", + json.getString("diagnostic")); + } finally { + rm.stop(); + } + } + + @Test(timeout = 30000) + public void testAppActions() throws Exception { + rm.start(); + CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); + MockNM nm1 = rm.registerNode("127.0.0.1:1234", 8 * 1024); + try { + RMApp app1 = rm.submitApp(512, "app1", "user1", null, "b1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); + // am1 asks for 10 * 512MB container + am1.allocate("*", 512, 10, new ArrayList<>()); + + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); + MultivaluedMapImpl params = new MultivaluedMapImpl(); + params.add("maxTime", 1); //only last for 1 second + + // testing invalid action + params.add(RMWSConsts.ACTIONS, "get"); + params.add(RMWSConsts.ACTIONS, "invalid-action"); + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); + assertTrue(json.getString("diagnostic").startsWith("Got invalid action")); + + /* + * testing get action + */ + params.putSingle(RMWSConsts.ACTIONS, "get"); + json = ActivitiesTestUtils.requestWebResource(r, params); + assertEquals("waiting for display", json.getString("diagnostic")); + + // trigger scheduling + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); + + // app activities won't be recorded + params.putSingle(RMWSConsts.ACTIONS, "get"); + json = ActivitiesTestUtils.requestWebResource(r, params); + assertEquals("waiting for display", json.getString("diagnostic")); + + // trigger scheduling + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); + + /* + * testing update action + */ + params.putSingle(RMWSConsts.ACTIONS, "refresh"); + json = ActivitiesTestUtils.requestWebResource(r, params); + assertEquals("Successfully notified actions: refresh", + json.getString("diagnostic")); + + // trigger scheduling + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); + Thread.sleep(1000); + + // app activities should be recorded + params.putSingle(RMWSConsts.ACTIONS, "get"); + json = ActivitiesTestUtils.requestWebResource(r, params); + verifyNumberOfAllocations(json, 1); + + // trigger scheduling + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); + Thread.sleep(1000); + + /* + * testing update and get actions + */ + params.remove(RMWSConsts.ACTIONS); + params.add(RMWSConsts.ACTIONS, "refresh"); + params.add(RMWSConsts.ACTIONS, "get"); + json = ActivitiesTestUtils.requestWebResource(r, params); + verifyNumberOfAllocations(json, 1); + + // trigger scheduling + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); + Thread.sleep(1000); + + // more app activities should be recorded + json = ActivitiesTestUtils.requestWebResource(r, params); + verifyNumberOfAllocations(json, 2); + + // trigger scheduling + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); + Thread.sleep(1000); + + // more app activities should be recorded + json = ActivitiesTestUtils.requestWebResource(r, params); + verifyNumberOfAllocations(json, 3); + } finally { + rm.stop(); + } + } + + @Test(timeout=30000) + public void testAppSummary() throws Exception { + rm.start(); + CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); + + MockNM nm1 = rm.registerNode("127.0.0.1:1234", 8 * 1024); + MockNM nm2 = rm.registerNode("127.0.0.2:1234", 4 * 1024); + MockNM nm3 = rm.registerNode("127.0.0.3:1234", 4 * 1024); + + try { + RMApp app1 = rm.submitApp(5120, "app1", "user1", null, "b1"); + + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); + MultivaluedMapImpl params = new MultivaluedMapImpl(); + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); + assertEquals("waiting for display", + json.getString("diagnostic")); + + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); + // am1 asks for 1 * 5GB container + am1.allocate(Arrays.asList(ResourceRequest + .newInstance(Priority.newInstance(0), "*", + Resources.createResource(5 * 1024), 1)), null); + // trigger scheduling + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm2.getNodeId()))); + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm3.getNodeId()))); + cs.handle(new NodeUpdateSchedulerEvent( + rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); + + params.add(RMWSConsts.SUMMARIZE, "true"); + params.add(RMWSConsts.GROUP_BY, RMWSConsts.ActivitiesGroupBy.DIAGNOSTIC); + json = ActivitiesTestUtils.requestWebResource(r, params); + + // verify that response contains an allocation summary for all nodes + verifyNumberOfAllocations(json, 1); + JSONObject allocation = json.getJSONObject("allocations"); + JSONObject reqestAllocation = + allocation.getJSONObject("requestAllocation"); + JSONArray attempts = reqestAllocation.getJSONArray("allocationAttempt"); + assertEquals(2, attempts.length()); + for (int i = 0; i < attempts.length(); i++) { + JSONObject attempt = attempts.getJSONObject(i); + if (attempt.getString("allocationState").equals("SKIPPED")) { + JSONArray nodeIds = attempt.optJSONArray("nodeIds"); + assertEquals(2, nodeIds.length()); + } else if (attempt.getString("allocationState").equals("RESERVED")) { + assertEquals(nm1.getNodeId().toString(), + attempt.getString("nodeIds")); + } + } + } finally { + rm.stop(); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivitiesWithMultiNodesEnabled.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivitiesWithMultiNodesEnabled.java index 8383a0d28c617..8998221238a1b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivitiesWithMultiNodesEnabled.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivitiesWithMultiNodesEnabled.java @@ -249,15 +249,11 @@ public void testAppAssignContainer() throws Exception { 1)), null); //Trigger recording for this app - WebResource r = resource(); + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); MultivaluedMapImpl params = new MultivaluedMapImpl(); - params.add(RMWSConsts.APP_ID, app1.getApplicationId().toString()); - ClientResponse response = r.path("ws").path("v1").path("cluster") - .path("scheduler/app-activities").queryParams(params) - .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - JSONObject json = response.getEntity(JSONObject.class); + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); assertEquals("waiting for display", json.getString("diagnostic")); //Trigger scheduling for this app @@ -267,12 +263,7 @@ public void testAppAssignContainer() throws Exception { //Check app activities, it should contain one allocation and // final allocation state is ALLOCATED - response = r.path("ws").path("v1").path("cluster") - .path("scheduler/app-activities").queryParams(params) - .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - json = response.getEntity(JSONObject.class); + json = ActivitiesTestUtils.requestWebResource(r, params); verifyNumberOfAllocations(json, 1); @@ -382,16 +373,11 @@ public void testAppInsufficientResourceDiagnostic() throws Exception { RMApp app1 = rm.submitApp(3072, "app1", "user1", null, "b"); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); - WebResource r = resource(); + WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); MultivaluedMapImpl params = new MultivaluedMapImpl(); - params.add(RMWSConsts.APP_ID, app1.getApplicationId().toString()); - - ClientResponse response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - JSONObject json = response.getEntity(JSONObject.class); + JSONObject json = ActivitiesTestUtils.requestWebResource(r, params); assertEquals("waiting for display", json.getString("diagnostic")); //Request two containers with different priority for am1 @@ -409,14 +395,8 @@ public void testAppInsufficientResourceDiagnostic() throws Exception { cs.handle(new NodeUpdateSchedulerEvent( rm.getRMContext().getRMNodes().get(nm1.getNodeId()))); - response = r.path("ws").path("v1").path("cluster").path( - "scheduler/app-activities").queryParams(params).accept( - MediaType.APPLICATION_JSON).get(ClientResponse.class); - assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, - response.getType().toString()); - json = response.getEntity(JSONObject.class); - //Check app activities + json = ActivitiesTestUtils.requestWebResource(r, params); verifyNumberOfAllocations(json, 2); JSONArray allocationArray = json.getJSONArray("allocations"); //Check first activity is for second allocation with RESERVED state @@ -539,9 +519,9 @@ public void testAppGroupByDiagnostics() throws Exception { MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); WebResource r = resource().path(RMWSConsts.RM_WEB_SERVICE_PATH) - .path(RMWSConsts.SCHEDULER_APP_ACTIVITIES); + .path(ActivitiesTestUtils.format(RMWSConsts.SCHEDULER_APP_ACTIVITIES, + app1.getApplicationId().toString())); MultivaluedMapImpl params = new MultivaluedMapImpl(); - params.add(RMWSConsts.APP_ID, app1.getApplicationId().toString()); /* * test non-exist groupBy diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java index 7e6f306252186..bf0dee6c872b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java @@ -192,7 +192,8 @@ public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId, @Override public AppActivitiesInfo getAppActivities(HttpServletRequest hsr, String appId, String time, Set requestPriorities, - Set allocationRequestIds, String groupBy) { + Set allocationRequestIds, String groupBy, String limit, + Set actions, boolean summarize) { // time and appId are specified inside hsr return RouterWebServiceUtil.genericForward(webAppAddress, hsr, AppActivitiesInfo.class, HTTPMethods.GET, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java index 1c8b7a85f2976..1ed5f5929d934 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java @@ -1146,7 +1146,8 @@ public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId, @Override public AppActivitiesInfo getAppActivities(HttpServletRequest hsr, String appId, String time, Set requestPriorities, - Set allocationRequestIds, String groupBy) { + Set allocationRequestIds, String groupBy, String limit, + Set actions, boolean summarize) { throw new NotImplementedException("Code is not implemented"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java index 9327c6f688d16..9327547655595 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java @@ -95,6 +95,8 @@ import com.google.inject.Inject; import com.google.inject.Singleton; +import static org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices.DEFAULT_SUMMARIZE; + /** * RouterWebServices is a service that runs on each router that can be used to * intercept and inspect {@link RMWebServiceProtocol} messages from client to @@ -465,11 +467,16 @@ public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr, @QueryParam(RMWSConsts.REQUEST_PRIORITIES) Set requestPriorities, @QueryParam(RMWSConsts.ALLOCATION_REQUEST_IDS) Set allocationRequestIds, - @QueryParam(RMWSConsts.GROUP_BY) String groupBy) { + @QueryParam(RMWSConsts.GROUP_BY) String groupBy, + @QueryParam(RMWSConsts.LIMIT) String limit, + @QueryParam(RMWSConsts.ACTIONS) Set actions, + @QueryParam(RMWSConsts.SUMMARIZE) @DefaultValue(DEFAULT_SUMMARIZE) + boolean summarize) { init(); RequestInterceptorChainWrapper pipeline = getInterceptorChain(hsr); return pipeline.getRootInterceptor().getAppActivities(hsr, appId, time, - requestPriorities, allocationRequestIds, groupBy); + requestPriorities, allocationRequestIds, groupBy, limit, actions, + summarize); } @GET diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/BaseRouterWebServicesTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/BaseRouterWebServicesTest.java index 535c579a85d99..78aab5a961bba 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/BaseRouterWebServicesTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/BaseRouterWebServicesTest.java @@ -180,8 +180,8 @@ protected ActivitiesInfo getActivities(String user) protected AppActivitiesInfo getAppActivities(String user) throws IOException, InterruptedException { - return routerWebService.getAppActivities( - createHttpServletRequest(user), null, null, null, null, null); + return routerWebService.getAppActivities(createHttpServletRequest(user), + null, null, null, null, null, null, null, false); } protected ApplicationStatisticsInfo getAppStatistics(String user) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java index f93b397e386a9..50200ed2b7148 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java @@ -141,7 +141,8 @@ public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId, @Override public AppActivitiesInfo getAppActivities(HttpServletRequest hsr, String appId, String time, Set requestPriorities, - Set allocationRequestIds, String groupBy) { + Set allocationRequestIds, String groupBy, String limit, + Set actions, boolean summarize) { return new AppActivitiesInfo(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java index 126610cc475c4..eb7222f9f3d56 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java @@ -169,9 +169,11 @@ public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId, @Override public AppActivitiesInfo getAppActivities(HttpServletRequest hsr, String appId, String time, Set requestPriorities, - Set allocationRequestIds, String groupBy) { + Set allocationRequestIds, String groupBy, String limit, + Set actions, boolean summarize) { return getNextInterceptor().getAppActivities(hsr, appId, time, - requestPriorities, allocationRequestIds, groupBy); + requestPriorities, allocationRequestIds, groupBy, limit, + actions, summarize); } @Override From 940bcf01cd8a524170bdaf6966dd0ff814c323c6 Mon Sep 17 00:00:00 2001 From: "Chen, Junjie" Date: Thu, 13 Jun 2019 11:08:15 +0800 Subject: [PATCH 0182/1308] HDDS-1587. Support dynamically adding delegated classes from to isolated class loader. Contributed by Junjie Chen. (#942) --- .../common/src/main/bin/ozone-config.sh | 5 ++ .../hadoop/fs/ozone/FilteredClassLoader.java | 6 ++ .../fs/ozone/TestFilteredClassLoader.java | 63 +++++++++++++++++++ 3 files changed, 74 insertions(+) create mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java diff --git a/hadoop-ozone/common/src/main/bin/ozone-config.sh b/hadoop-ozone/common/src/main/bin/ozone-config.sh index d179a331ae990..5ccb646f72083 100755 --- a/hadoop-ozone/common/src/main/bin/ozone-config.sh +++ b/hadoop-ozone/common/src/main/bin/ozone-config.sh @@ -49,3 +49,8 @@ else exit 1 fi +# HADOOP_OZONE_DELEGATED_CLASSES defines a list of classes which will be loaded by default +# class loader of application instead of isolated class loader. With this way we can solve +# incompatible problem when using hadoop3.x + ozone with older hadoop version. +#export HADOOP_OZONE_DELEGATED_CLASSES= + diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java index e9e51de1bd057..2643cab41b8a9 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java @@ -22,6 +22,8 @@ import java.util.HashSet; import java.util.Set; +import org.apache.hadoop.util.StringUtils; + /** * Class loader which delegates the loading only for the selected class. * @@ -57,6 +59,10 @@ public FilteredClassLoader(URL[] urls, ClassLoader parent) { delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneFSStorageStatistics"); delegatedClasses.add("org.apache.hadoop.fs.ozone.Statistic"); delegatedClasses.add("org.apache.hadoop.fs.Seekable"); + + delegatedClasses.addAll(StringUtils.getTrimmedStringCollection( + System.getenv("HADOOP_OZONE_DELEGATED_CLASSES"))); + this.delegate = parent; systemClassLoader = getSystemClassLoader(); diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java new file mode 100644 index 0000000000000..26a77eb2e1c84 --- /dev/null +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.ozone; + +import java.net.URL; +import java.util.ArrayList; +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.when; + +/** + * FilteredClassLoader test using mocks. + */ +@RunWith(PowerMockRunner.class) +@PrepareForTest({ FilteredClassLoader.class, OzoneFSInputStream.class}) +public class TestFilteredClassLoader { + @Test + public void testFilteredClassLoader() { + PowerMockito.mockStatic(System.class); + when(System.getenv("HADOOP_OZONE_DELEGATED_CLASSES")) + .thenReturn("org.apache.hadoop.fs.ozone.OzoneFSInputStream"); + + ClassLoader currentClassLoader = + TestFilteredClassLoader.class.getClassLoader(); + + List urls = new ArrayList<>(); + ClassLoader classLoader = new FilteredClassLoader( + urls.toArray(new URL[0]), currentClassLoader); + + try { + classLoader.loadClass( + "org.apache.hadoop.fs.ozone.OzoneFSInputStream"); + ClassLoader expectedClassLoader = + OzoneFSInputStream.class.getClassLoader(); + assertEquals(expectedClassLoader, currentClassLoader); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } + } +} From bcfd22833633e24881891208503971c8ef59d63c Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Thu, 13 Jun 2019 11:08:35 -0700 Subject: [PATCH 0183/1308] YARN-9599. TestContainerSchedulerQueuing#testQueueShedding fails intermittently. Contributed by Abhishek Modi. --- .../scheduler/TestContainerSchedulerQueuing.java | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java index 3e7a735a418c7..05360aba2d42b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java @@ -766,11 +766,9 @@ public void testQueueShedding() throws Exception { ContainerScheduler containerScheduler = containerManager.getContainerScheduler(); // Ensure all containers are properly queued. - int numTries = 30; - while ((containerScheduler.getNumQueuedContainers() < 6) && - (numTries-- > 0)) { - Thread.sleep(100); - } + GenericTestUtils.waitFor( + () -> containerScheduler.getNumQueuedContainers() == 6 + && metrics.getQueuedOpportunisticContainers() == 6, 100, 3000); Assert.assertEquals(6, containerScheduler.getNumQueuedContainers()); Assert.assertEquals(6, metrics.getQueuedOpportunisticContainers()); Assert.assertEquals(0, metrics.getQueuedGuaranteedContainers()); @@ -779,11 +777,8 @@ public void testQueueShedding() throws Exception { .newInstance(); containerQueuingLimit.setMaxQueueLength(2); containerScheduler.updateQueuingLimit(containerQueuingLimit); - numTries = 30; - while ((containerScheduler.getNumQueuedContainers() > 2) && - (numTries-- > 0)) { - Thread.sleep(100); - } + GenericTestUtils.waitFor( + () -> containerScheduler.getNumQueuedContainers() == 2, 100, 3000); Assert.assertEquals(2, containerScheduler.getNumQueuedContainers()); List statList = new ArrayList(); From 89acbbc0b993eaa4edd5f898fa7baa13ba367fb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Fri, 14 Jun 2019 01:04:14 +0200 Subject: [PATCH 0184/1308] HDDS-1677. Auditparser robot test shold use a world writable working directory (#958) --- .../main/smoketest/auditparser/auditparser.robot | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/auditparser/auditparser.robot b/hadoop-ozone/dist/src/main/smoketest/auditparser/auditparser.robot index 30790ec5e43e8..1caae755694dd 100644 --- a/hadoop-ozone/dist/src/main/smoketest/auditparser/auditparser.robot +++ b/hadoop-ozone/dist/src/main/smoketest/auditparser/auditparser.robot @@ -20,8 +20,9 @@ Library BuiltIn Resource ../commonlib.robot *** Variables *** -${user} hadoop -${count} 4 +${user} hadoop +${count} 4 +${auditworkdir} /tmp/ *** Keywords *** Set username @@ -38,15 +39,15 @@ Initiating freon to generate data Testing audit parser ${logdir} = Get Environment Variable HADOOP_LOG_DIR /var/log/hadoop ${logfile} = Execute ls -t "${logdir}" | grep om-audit | head -1 - Execute ozone auditparser /opt/hadoop/audit.db load "${logdir}/${logfile}" - ${result} = Execute ozone auditparser /opt/hadoop/audit.db template top5cmds + Execute ozone auditparser "${auditworkdir}/audit.db" load "${logdir}/${logfile}" + ${result} = Execute ozone auditparser "${auditworkdir}/audit.db" template top5cmds Should Contain ${result} ALLOCATE_KEY - ${result} = Execute ozone auditparser /opt/hadoop/audit.db template top5users + ${result} = Execute ozone auditparser "${auditworkdir}/audit.db" template top5users Run Keyword If '${SECURITY_ENABLED}' == 'true' Set username Should Contain ${result} ${user} - ${result} = Execute ozone auditparser /opt/hadoop/audit.db query "select count(*) from audit where op='CREATE_VOLUME' and RESULT='SUCCESS'" + ${result} = Execute ozone auditparser "${auditworkdir}/audit.db" query "select count(*) from audit where op='CREATE_VOLUME' and RESULT='SUCCESS'" ${result} = Convert To Number ${result} Should be true ${result}>${count} - ${result} = Execute ozone auditparser /opt/hadoop/audit.db query "select count(*) from audit where op='CREATE_BUCKET' and RESULT='SUCCESS'" + ${result} = Execute ozone auditparser "${auditworkdir}/audit.db" query "select count(*) from audit where op='CREATE_BUCKET' and RESULT='SUCCESS'" ${result} = Convert To Number ${result} Should be true ${result}>${count} From e094b3b4cd15fe9cd309884bc27a973bf9e4eb6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Fri, 14 Jun 2019 01:18:15 +0200 Subject: [PATCH 0185/1308] HDDS-1680. Create missing parent directories during the creation of HddsVolume dirs (#961) --- .../apache/hadoop/ozone/container/common/volume/HddsVolume.java | 2 +- .../hadoop/ozone/container/common/volume/TestVolumeSet.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 4eb16c166c5fd..3e89f9031389c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -204,7 +204,7 @@ private void initialize() throws IOException { switch (intialVolumeState) { case NON_EXISTENT: // Root directory does not exist. Create it. - if (!hddsRootDir.mkdir()) { + if (!hddsRootDir.mkdirs()) { throw new IOException("Cannot create directory " + hddsRootDir); } setState(VolumeState.NOT_FORMATTED); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 79eeb61495ae5..fa280ddb73084 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -230,7 +230,7 @@ public void testFailVolumes() throws Exception{ ozoneConfig.set(HDDS_DATANODE_DIR_KEY, readOnlyVolumePath.getAbsolutePath() + "," + volumePath.getAbsolutePath()); volSet = new VolumeSet(UUID.randomUUID().toString(), ozoneConfig); - assertTrue(volSet.getFailedVolumesList().size() == 1); + assertEquals(1, volSet.getFailedVolumesList().size()); assertEquals(readOnlyVolumePath, volSet.getFailedVolumesList().get(0) .getHddsRootDir()); From 54f9f75a443d7d167a7aa7d04a87e3f5af049887 Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Fri, 14 Jun 2019 10:17:25 +0900 Subject: [PATCH 0186/1308] HADOOP-16369. Fix zstandard shortname misspelled as zts. Contributed by Jonathan Eagles. --- hadoop-common-project/hadoop-common/pom.xml | 4 ++-- .../org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 5b600538d6ce0..b507b8b155869 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -741,7 +741,7 @@ - false + false true @@ -844,7 +844,7 @@ /p:CustomZstdPrefix=${zstd.prefix} /p:CustomZstdLib=${zstd.lib} /p:CustomZstdInclude=${zstd.include} - /p:RequireZstd=${require.ztsd} + /p:RequireZstd=${require.zstd} /p:CustomOpensslPrefix=${openssl.prefix} /p:CustomOpensslLib=${openssl.lib} /p:CustomOpensslInclude=${openssl.include} diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c index 289554b4cf140..41eb9e2c85a10 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c @@ -78,7 +78,7 @@ static __dlsym_ZSTD_isError dlsym_ZSTD_isError; static __dlsym_ZSTD_getErrorName dlsym_ZSTD_getErrorName; #endif -// Load the libztsd.so from disk +// Load the libzstd.so from disk JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_initIDs (JNIEnv *env, jclass clazz) { #ifdef UNIX // Load libzstd.so From 4f455290b15902e7e44c4b1a762bf915414b2bb6 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 13 Jun 2019 18:26:53 -0700 Subject: [PATCH 0187/1308] HDFS-14560. Allow block replication parameters to be refreshable. Contributed by Stephen O'Donnell. --- .../server/blockmanagement/BlockManager.java | 71 ++++++++- .../hadoop/hdfs/server/namenode/NameNode.java | 64 +++++++- .../TestRefreshNamenodeReplicationConfig.java | 143 ++++++++++++++++++ .../hadoop/hdfs/tools/TestDFSAdmin.java | 2 +- 4 files changed, 276 insertions(+), 4 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index bc2141d986aef..2947b72730524 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -383,7 +383,7 @@ public long getTotalECBlockGroups() { final int maxCorruptFilesReturned; final float blocksInvalidateWorkPct; - final int blocksReplWorkMultiplier; + private int blocksReplWorkMultiplier; // whether or not to issue block encryption keys. final boolean encryptDataTransfer; @@ -897,11 +897,78 @@ private void dumpBlockMeta(Block block, PrintWriter out) { out.println(""); } - /** @return maxReplicationStreams */ + /** Returns the current setting for maxReplicationStreams, which is set by + * {@code DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY}. + * + * @return maxReplicationStreams + */ public int getMaxReplicationStreams() { return maxReplicationStreams; } + static private void ensurePositiveInt(int val, String key) { + Preconditions.checkArgument( + (val > 0), + key + " = '" + val + "' is invalid. " + + "It should be a positive, non-zero integer value."); + } + + /** + * Updates the value used for maxReplicationStreams, which is set by + * {@code DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY} initially. + * + * @param newVal - Must be a positive non-zero integer. + */ + public void setMaxReplicationStreams(int newVal) { + ensurePositiveInt(newVal, + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY); + maxReplicationStreams = newVal; + } + + /** Returns the current setting for maxReplicationStreamsHardLimit, set by + * {@code DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY}. + * + * @return maxReplicationStreamsHardLimit + */ + public int getReplicationStreamsHardLimit() { + return replicationStreamsHardLimit; + } + + /** + * Updates the value used for replicationStreamsHardLimit, which is set by + * {@code DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY} + * initially. + * + * @param newVal - Must be a positive non-zero integer. + */ + public void setReplicationStreamsHardLimit(int newVal) { + ensurePositiveInt(newVal, + DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY); + replicationStreamsHardLimit = newVal; + } + + /** Returns the current setting for blocksReplWorkMultiplier, set by + * {@code DFSConfigKeys. + * DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION}. + * + * @return maxReplicationStreamsHardLimit + */ + public int getBlocksReplWorkMultiplier() { + return blocksReplWorkMultiplier; + } + + /** + * Updates the value used for blocksReplWorkMultiplier, set by + * {@code DFSConfigKeys. + * DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION} initially. + * @param newVal - Must be a positive non-zero integer. + */ + public void setBlocksReplWorkMultiplier(int newVal) { + ensurePositiveInt(newVal, + DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION); + blocksReplWorkMultiplier = newVal; + } + public int getDefaultStorageNum(BlockInfo block) { switch (block.getBlockType()) { case STRIPED: return ((BlockInfoStriped) block).getRealTotalBlockNum(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 1b4f7704d3783..e4c88563732df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap; import org.apache.hadoop.hdfs.server.aliasmap.InMemoryLevelDBAliasMapServer; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; @@ -165,6 +166,13 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.FS_PROTECTED_DIRECTORIES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT; + import static org.apache.hadoop.util.ExitUtil.terminate; import static org.apache.hadoop.util.ToolRunner.confirmPrompt; import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE; @@ -299,7 +307,10 @@ public enum OperationCategory { DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, FS_PROTECTED_DIRECTORIES, HADOOP_CALLER_CONTEXT_ENABLED_KEY, - DFS_STORAGE_POLICY_SATISFIER_MODE_KEY)); + DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, + DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, + DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, + DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION)); private static final String USAGE = "Usage: hdfs namenode [" + StartupOption.BACKUP.getName() + "] | \n\t[" @@ -2125,12 +2136,63 @@ protected String reconfigurePropertyImpl(String property, String newVal) return reconfigureIPCBackoffEnabled(newVal); } else if (property.equals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY)) { return reconfigureSPSModeEvent(newVal, property); + } else if (property.equals(DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY) + || property.equals(DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY) + || property.equals( + DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION)) { + return reconfReplicationParameters(newVal, property); } else { throw new ReconfigurationException(property, newVal, getConf().get( property)); } } + private String reconfReplicationParameters(final String newVal, + final String property) throws ReconfigurationException { + BlockManager bm = namesystem.getBlockManager(); + int newSetting; + namesystem.writeLock(); + try { + if (property.equals(DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY)) { + bm.setMaxReplicationStreams( + adjustNewVal(DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT, newVal)); + newSetting = bm.getMaxReplicationStreams(); + } else if (property.equals( + DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY)) { + bm.setReplicationStreamsHardLimit( + adjustNewVal(DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT, + newVal)); + newSetting = bm.getReplicationStreamsHardLimit(); + } else if ( + property.equals( + DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION)) { + bm.setBlocksReplWorkMultiplier( + adjustNewVal( + DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT, + newVal)); + newSetting = bm.getBlocksReplWorkMultiplier(); + } else { + throw new IllegalArgumentException("Unexpected property " + + property + "in reconfReplicationParameters"); + } + LOG.info("RECONFIGURE* changed {} to {}", property, newSetting); + return String.valueOf(newSetting); + } catch (IllegalArgumentException e) { + throw new ReconfigurationException(property, newVal, getConf().get( + property), e); + } finally { + namesystem.writeUnlock(); + } + } + + private int adjustNewVal(int defaultVal, String newVal) { + if (newVal == null) { + return defaultVal; + } else { + return Integer.parseInt(newVal); + } + } + private String reconfHeartbeatInterval(final DatanodeManager datanodeManager, final String property, final String newVal) throws ReconfigurationException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java new file mode 100644 index 0000000000000..8dc81f8c1a21d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.ReconfigurationException; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.test.LambdaTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import java.io.IOException; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * This class tests the replication related parameters in the namenode can + * be refreshed dynamically, without a namenode restart. + */ +public class TestRefreshNamenodeReplicationConfig { + private MiniDFSCluster cluster = null; + private BlockManager bm; + + @Before + public void setup() throws IOException { + Configuration config = new Configuration(); + config.setInt( + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 8); + config.setInt( + DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, 10); + config.setInt( + DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION, + 12); + + cluster = new MiniDFSCluster.Builder(config) + .nnTopology(MiniDFSNNTopology.simpleSingleNN(0, 0)) + .numDataNodes(0).build(); + cluster.waitActive(); + bm = cluster.getNameNode().getNamesystem().getBlockManager(); + } + + @After + public void teardown() throws IOException { + cluster.shutdown(); + } + + /** + * Tests to ensure each of the block replication parameters can be passed + * updated successfully. + */ + @Test(timeout = 90000) + public void testParamsCanBeReconfigured() throws ReconfigurationException { + + assertEquals(8, bm.getMaxReplicationStreams()); + assertEquals(10, bm.getReplicationStreamsHardLimit()); + assertEquals(12, bm.getBlocksReplWorkMultiplier()); + + cluster.getNameNode().reconfigurePropertyImpl( + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, "20"); + cluster.getNameNode().reconfigurePropertyImpl( + DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, + "22"); + cluster.getNameNode().reconfigurePropertyImpl( + DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION, + "24"); + + assertEquals(20, bm.getMaxReplicationStreams()); + assertEquals(22, bm.getReplicationStreamsHardLimit()); + assertEquals(24, bm.getBlocksReplWorkMultiplier()); + } + + /** + * Tests to ensure reconfiguration fails with a negative, zero or string value + * value for each parameter. + */ + @Test(timeout = 90000) + public void testReconfigureFailsWithInvalidValues() throws Exception { + String[] keys = new String[]{ + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, + DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, + DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION + }; + + // Ensure we cannot set any of the parameters negative + for (String key : keys) { + ReconfigurationException e = + LambdaTestUtils.intercept(ReconfigurationException.class, + () -> cluster.getNameNode().reconfigurePropertyImpl(key, "-20")); + assertTrue(e.getCause() instanceof IllegalArgumentException); + assertEquals(key+" = '-20' is invalid. It should be a " + +"positive, non-zero integer value.", e.getCause().getMessage()); + } + // Ensure none of the values were updated from the defaults + assertEquals(8, bm.getMaxReplicationStreams()); + assertEquals(10, bm.getReplicationStreamsHardLimit()); + assertEquals(12, bm.getBlocksReplWorkMultiplier()); + + for (String key : keys) { + ReconfigurationException e = + LambdaTestUtils.intercept(ReconfigurationException.class, + () -> cluster.getNameNode().reconfigurePropertyImpl(key, "0")); + assertTrue(e.getCause() instanceof IllegalArgumentException); + assertEquals(key+" = '0' is invalid. It should be a " + +"positive, non-zero integer value.", e.getCause().getMessage()); + } + + // Ensure none of the values were updated from the defaults + assertEquals(8, bm.getMaxReplicationStreams()); + assertEquals(10, bm.getReplicationStreamsHardLimit()); + assertEquals(12, bm.getBlocksReplWorkMultiplier()); + + // Ensure none of the parameters can be set to a string value + for (String key : keys) { + ReconfigurationException e = + LambdaTestUtils.intercept(ReconfigurationException.class, + () -> cluster.getNameNode().reconfigurePropertyImpl(key, "str")); + assertTrue(e.getCause() instanceof NumberFormatException); + } + + // Ensure none of the values were updated from the defaults + assertEquals(8, bm.getMaxReplicationStreams()); + assertEquals(10, bm.getReplicationStreamsHardLimit()); + assertEquals(12, bm.getBlocksReplWorkMultiplier()); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 063217bf9258e..90d0761a8e0e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -394,7 +394,7 @@ public void testNameNodeGetReconfigurableProperties() throws IOException { final List outs = Lists.newArrayList(); final List errs = Lists.newArrayList(); getReconfigurableProperties("namenode", address, outs, errs); - assertEquals(7, outs.size()); + assertEquals(10, outs.size()); assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(1)); assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(2)); assertEquals(errs.size(), 0); From 9ebbda342f2adbbce30820a6f8374d310e361ff8 Mon Sep 17 00:00:00 2001 From: dineshchitlangia Date: Fri, 14 Jun 2019 16:09:37 +0200 Subject: [PATCH 0188/1308] HADOOP-16372. Fix typo in DFSUtil getHttpPolicy method Closes #967 --- .../src/main/java/org/apache/hadoop/hdfs/DFSUtil.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 3cdf66d4f318a..b1af697a4e6eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -1476,7 +1476,7 @@ public static HttpConfig.Policy getHttpPolicy(Configuration conf) { DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT); HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr); if (policy == null) { - throw new HadoopIllegalArgumentException("Unregonized value '" + throw new HadoopIllegalArgumentException("Unrecognized value '" + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY); } From 3ba090f4360c81c9dfb575efa13b8161c7a5255b Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Fri, 14 Jun 2019 12:54:16 -0400 Subject: [PATCH 0189/1308] HADOOP-16366. Fixed ProxyUserAuthenticationFilterInitializer for timeline server. Contributed by Prabhu Joseph --- .../server/timelineservice/reader/TimelineReaderServer.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java index 49c1d4b1e6b05..10265c6586805 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java @@ -165,10 +165,10 @@ protected void addFilters(Configuration conf) { TimelineReaderAuthenticationFilterInitializer.class.getName())) { defaultInitializers.add( TimelineReaderAuthenticationFilterInitializer.class.getName()); - } else { - defaultInitializers.add( - ProxyUserAuthenticationFilterInitializer.class.getName()); } + } else { + defaultInitializers.add( + ProxyUserAuthenticationFilterInitializer.class.getName()); } defaultInitializers.add( From ae4143a529d74d94f205ca627c31360abfa11bfa Mon Sep 17 00:00:00 2001 From: Santosh Marella Date: Fri, 14 Jun 2019 10:35:33 -0700 Subject: [PATCH 0190/1308] HDFS-12914. Block report leases cause missing blocks until next report. Contributed by Santosh Marella, He Xiaoqiao. Signed-off-by: Wei-Chiu Chuang Co-authored-by: He Xiaoqiao --- .../server/blockmanagement/BlockManager.java | 21 ++++++++---- .../server/namenode/NameNodeRpcServer.java | 34 +++++++++++-------- 2 files changed, 34 insertions(+), 21 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 2947b72730524..8b9788a6fc236 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2572,6 +2572,21 @@ private static class BlockInfoToAdd { } } + /** + * Check block report lease. + * @return true if lease exist and not expire + */ + public boolean checkBlockReportLease(BlockReportContext context, + final DatanodeID nodeID) throws UnregisteredNodeException { + if (context == null) { + return true; + } + DatanodeDescriptor node = datanodeManager.getDatanode(nodeID); + final long startTime = Time.monotonicNow(); + return blockReportLeaseManager.checkLease(node, startTime, + context.getLeaseId()); + } + /** * The given storage is reporting all its blocks. * Update the (storage{@literal -->}block list) and @@ -2619,12 +2634,6 @@ public boolean processReport(final DatanodeID nodeID, blockReportLeaseManager.removeLease(node); return !node.hasStaleStorages(); } - if (context != null) { - if (!blockReportLeaseManager.checkLease(node, startTime, - context.getLeaseId())) { - return false; - } - } if (storageInfo.getBlockReportCount() == 0) { // The first block report can be processed a lot more efficiently than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 7a2a81cdf3c9e..31a5eb0b41ab1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -45,7 +45,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.Callable; import com.google.common.collect.Lists; @@ -175,6 +174,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports; @@ -1591,21 +1591,25 @@ public DatanodeCommand blockReport(final DatanodeRegistration nodeReg, } final BlockManager bm = namesystem.getBlockManager(); boolean noStaleStorages = false; - for (int r = 0; r < reports.length; r++) { - final BlockListAsLongs blocks = reports[r].getBlocks(); - // - // BlockManager.processReport accumulates information of prior calls - // for the same node and storage, so the value returned by the last - // call of this loop is the final updated value for noStaleStorage. - // - final int index = r; - noStaleStorages = bm.runBlockOp(new Callable() { - @Override - public Boolean call() throws IOException { - return bm.processReport(nodeReg, reports[index].getStorage(), - blocks, context); + try { + if (bm.checkBlockReportLease(context, nodeReg)) { + for (int r = 0; r < reports.length; r++) { + final BlockListAsLongs blocks = reports[r].getBlocks(); + // + // BlockManager.processReport accumulates information of prior calls + // for the same node and storage, so the value returned by the last + // call of this loop is the final updated value for noStaleStorage. + // + final int index = r; + noStaleStorages = bm.runBlockOp(() -> + bm.processReport(nodeReg, reports[index].getStorage(), + blocks, context)); } - }); + } + } catch (UnregisteredNodeException une) { + LOG.debug("Datanode {} is attempting to report but not register yet.", + nodeReg); + return RegisterCommand.REGISTER; } bm.removeBRLeaseIfNeeded(nodeReg, context); From cdc5de6448e429d6cb523b8a61bed8b1cb2fc263 Mon Sep 17 00:00:00 2001 From: Santosh Marella Date: Fri, 14 Jun 2019 13:00:56 -0700 Subject: [PATCH 0191/1308] HDFS-12914. Addendum patch. Block report leases cause missing blocks until next report. Contributed by Santosh Marella, He Xiaoqiao. Signed-off-by: Wei-Chiu Chuang Co-authored-by: He Xiaoqiao --- .../blockmanagement/TestBlockReportLease.java | 156 ++++++++++++++++++ 1 file changed, 156 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java new file mode 100644 index 0000000000000..3e60aa6278224 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java @@ -0,0 +1,156 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.blockmanagement; + +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; +import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; +import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; + +/** + * Tests that BlockReportLease in BlockManager. + */ +public class TestBlockReportLease { + + /** + * Test check lease about one BlockReport with many StorageBlockReport. + * Before HDFS-12914, when batch storage report to NameNode, it will check + * less for one storage by one, So it could part storage report can + * be process normally, however, the rest storage report can not be process + * since check lease failed. + * After HDFS-12914, NameNode check lease once for every blockreport request, + * So this issue will not exist anymore. + */ + @Test + public void testCheckBlockReportLease() throws Exception { + HdfsConfiguration conf = new HdfsConfiguration(); + Random rand = new Random(); + + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(1).build()) { + cluster.waitActive(); + + FSNamesystem fsn = cluster.getNamesystem(); + BlockManager blockManager = fsn.getBlockManager(); + BlockManager spyBlockManager = spy(blockManager); + fsn.setBlockManagerForTesting(spyBlockManager); + String poolId = cluster.getNamesystem().getBlockPoolId(); + + NamenodeProtocols rpcServer = cluster.getNameNodeRpc(); + + // Test based on one DataNode report to Namenode + DataNode dn = cluster.getDataNodes().get(0); + DatanodeDescriptor datanodeDescriptor = spyBlockManager + .getDatanodeManager().getDatanode(dn.getDatanodeId()); + + DatanodeRegistration dnRegistration = dn.getDNRegistrationForBP(poolId); + StorageReport[] storages = dn.getFSDataset().getStorageReports(poolId); + + // Send heartbeat and request full block report lease + HeartbeatResponse hbResponse = rpcServer.sendHeartbeat( + dnRegistration, storages, 0, 0, 0, 0, 0, null, true, null, null); + + DelayAnswer delayer = new DelayAnswer(BlockManager.LOG); + doAnswer(delayer).when(spyBlockManager).processReport( + any(DatanodeStorageInfo.class), + any(BlockListAsLongs.class), + any(BlockReportContext.class)); + + ExecutorService pool = Executors.newFixedThreadPool(1); + + // Trigger sendBlockReport + BlockReportContext brContext = new BlockReportContext(1, 0, + rand.nextLong(), hbResponse.getFullBlockReportLeaseId(), true); + Future sendBRfuturea = pool.submit(() -> { + // Build every storage with 100 blocks for sending report + DatanodeStorage[] datanodeStorages + = new DatanodeStorage[storages.length]; + for (int i = 0; i < storages.length; i++) { + datanodeStorages[i] = storages[i].getStorage(); + } + StorageBlockReport[] reports = createReports(datanodeStorages, 100); + + // Send blockReport + return rpcServer.blockReport(dnRegistration, poolId, reports, + brContext); + }); + + // Wait until BlockManager calls processReport + delayer.waitForCall(); + + // Remove full block report lease about dn + spyBlockManager.getBlockReportLeaseManager() + .removeLease(datanodeDescriptor); + + // Allow blockreport to proceed + delayer.proceed(); + + // Get result, it will not null if process successfully + DatanodeCommand datanodeCommand = sendBRfuturea.get(); + assertTrue(datanodeCommand instanceof FinalizeCommand); + assertEquals(poolId, ((FinalizeCommand)datanodeCommand) + .getBlockPoolId()); + } + } + + private StorageBlockReport[] createReports(DatanodeStorage[] dnStorages, + int numBlocks) { + int longsPerBlock = 3; + int blockListSize = 2 + numBlocks * longsPerBlock; + int numStorages = dnStorages.length; + StorageBlockReport[] storageBlockReports + = new StorageBlockReport[numStorages]; + for (int i = 0; i < numStorages; i++) { + List longs = new ArrayList(blockListSize); + longs.add(Long.valueOf(numBlocks)); + longs.add(0L); + for (int j = 0; j < blockListSize; ++j) { + longs.add(Long.valueOf(j)); + } + BlockListAsLongs blockList = BlockListAsLongs.decodeLongs(longs); + storageBlockReports[i] = new StorageBlockReport(dnStorages[i], blockList); + } + return storageBlockReports; + } +} From b24efa11ea2b3ecbae6578058aea89b6823d18d8 Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Fri, 14 Jun 2019 13:37:23 -0700 Subject: [PATCH 0192/1308] HDFS-14549. EditLogTailer shouldn't output full stack trace when interrupted. Contributed by Inigo Goiri. --- .../apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index 9d0602e025221..11e05a218b0cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -483,7 +483,7 @@ private void doWork() { try { Thread.sleep(sleepTimeMs); } catch (InterruptedException e) { - LOG.warn("Edit log tailer interrupted", e); + LOG.warn("Edit log tailer interrupted: {}", e.getMessage()); } } } From c7554ffd5c5ea45aac434c44d543ac4d966eca43 Mon Sep 17 00:00:00 2001 From: dineshchitlangia Date: Fri, 14 Jun 2019 17:36:04 -0400 Subject: [PATCH 0193/1308] HADOOP-16373. Fix typo in FileSystemShell#test documentation (#968) --- .../hadoop-common/src/site/markdown/FileSystemShell.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md index 44927f2464489..93385b42d4f08 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md @@ -737,7 +737,7 @@ Usage: `hadoop fs -test -[defsz] URI` Options: -* -d: f the path is a directory, return 0. +* -d: if the path is a directory, return 0. * -e: if the path exists, return 0. * -f: if the path is a file, return 0. * -s: if the path is not empty, return 0. From cda9f3374573f0cb5ae4f26ba3fbc77aae45ec58 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Fri, 14 Jun 2019 18:59:14 -0400 Subject: [PATCH 0194/1308] YARN-8499 ATSv2 Generalize TimelineStorageMonitor. Contributed by Prabhu Joseph --- .../storage/TestTimelineReaderHBaseDown.java | 4 +- .../storage/HBaseStorageMonitor.java | 90 +++++++++++++++ .../storage/HBaseTimelineReaderImpl.java | 90 ++------------- .../storage/TimelineStorageMonitor.java | 106 ++++++++++++++++++ 4 files changed, 206 insertions(+), 84 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseStorageMonitor.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineStorageMonitor.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java index 786f529a7aa95..e738d3971d999 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java @@ -34,8 +34,8 @@ import java.util.concurrent.TimeoutException; import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS; -import static org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl.DATA_TO_RETRIEVE; -import static org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl.MONITOR_FILTERS; +import static org.apache.hadoop.yarn.server.timelineservice.storage.HBaseStorageMonitor.DATA_TO_RETRIEVE; +import static org.apache.hadoop.yarn.server.timelineservice.storage.HBaseStorageMonitor.MONITOR_FILTERS; public class TestTimelineReaderHBaseDown { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseStorageMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseStorageMonitor.java new file mode 100644 index 0000000000000..c433aa66becb2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseStorageMonitor.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.timelineservice.storage; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve; +import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters; +import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext; +import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils; +import org.apache.hadoop.yarn.server.timelineservice.storage.reader.TimelineEntityReader; +import org.apache.hadoop.yarn.server.timelineservice.storage.reader.TimelineEntityReaderFactory; + +/** + * HBase based implementation for {@link TimelineStorageMonitor}. + */ +public class HBaseStorageMonitor extends TimelineStorageMonitor { + + protected static final TimelineEntityFilters MONITOR_FILTERS = + new TimelineEntityFilters.Builder().entityLimit(1L).build(); + protected static final TimelineDataToRetrieve DATA_TO_RETRIEVE = + new TimelineDataToRetrieve(null, null, null, null, null, null); + + private Configuration monitorHBaseConf; + private Connection monitorConn; + private TimelineEntityReader reader; + + public HBaseStorageMonitor(Configuration conf) throws Exception { + super(conf, Storage.HBase); + this.initialize(conf); + } + + private void initialize(Configuration conf) throws Exception { + monitorHBaseConf = HBaseTimelineStorageUtils. + getTimelineServiceHBaseConf(conf); + monitorHBaseConf.setInt("hbase.client.retries.number", 3); + monitorHBaseConf.setLong("hbase.client.pause", 1000); + long monitorInterval = conf.getLong( + YarnConfiguration.TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS, + YarnConfiguration.DEFAULT_TIMELINE_SERVICE_STORAGE_MONITOR_INTERVAL_MS + ); + monitorHBaseConf.setLong("hbase.rpc.timeout", monitorInterval); + monitorHBaseConf.setLong("hbase.client.scanner.timeout.period", + monitorInterval); + monitorHBaseConf.setInt("zookeeper.recovery.retry", 1); + monitorConn = ConnectionFactory.createConnection(monitorHBaseConf); + + String clusterId = conf.get(YarnConfiguration.RM_CLUSTER_ID, + YarnConfiguration.DEFAULT_RM_CLUSTER_ID); + TimelineReaderContext monitorContext = + new TimelineReaderContext(clusterId, null, null, null, null, + TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null, null); + reader = TimelineEntityReaderFactory.createMultipleEntitiesReader( + monitorContext, MONITOR_FILTERS, DATA_TO_RETRIEVE); + } + + @Override + public void healthCheck() throws Exception { + reader.readEntities(monitorHBaseConf, monitorConn); + } + + @Override + public void start() { + super.start(); + } + + @Override + public void stop() throws Exception { + super.stop(); + monitorConn.close(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java index 653126e10080d..4c71fd6b49eea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java @@ -20,10 +20,6 @@ import java.io.IOException; import java.util.Set; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Connection; @@ -31,8 +27,6 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.timeline.TimelineHealth; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; -import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType; -import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext; @@ -54,12 +48,7 @@ public class HBaseTimelineReaderImpl private Configuration hbaseConf = null; private Connection conn; - private Configuration monitorHBaseConf = null; - private Connection monitorConn; - private ScheduledExecutorService monitorExecutorService; - private TimelineReaderContext monitorContext; - private long monitorInterval; - private AtomicBoolean hbaseDown = new AtomicBoolean(); + private TimelineStorageMonitor storageMonitor; public HBaseTimelineReaderImpl() { super(HBaseTimelineReaderImpl.class.getName()); @@ -68,39 +57,15 @@ public HBaseTimelineReaderImpl() { @Override public void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); - - String clusterId = conf.get( - YarnConfiguration.RM_CLUSTER_ID, - YarnConfiguration.DEFAULT_RM_CLUSTER_ID); - monitorContext = - new TimelineReaderContext(clusterId, null, null, null, null, - TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null, null); - monitorInterval = conf.getLong( - YarnConfiguration.TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS, - YarnConfiguration.DEFAULT_TIMELINE_SERVICE_STORAGE_MONITOR_INTERVAL_MS); - - monitorHBaseConf = HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf); - monitorHBaseConf.setInt("hbase.client.retries.number", 3); - monitorHBaseConf.setLong("hbase.client.pause", 1000); - monitorHBaseConf.setLong("hbase.rpc.timeout", monitorInterval); - monitorHBaseConf.setLong("hbase.client.scanner.timeout.period", - monitorInterval); - monitorHBaseConf.setInt("zookeeper.recovery.retry", 1); - monitorConn = ConnectionFactory.createConnection(monitorHBaseConf); - - monitorExecutorService = Executors.newScheduledThreadPool(1); - hbaseConf = HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf); conn = ConnectionFactory.createConnection(hbaseConf); + storageMonitor = new HBaseStorageMonitor(conf); } @Override protected void serviceStart() throws Exception { super.serviceStart(); - LOG.info("Scheduling HBase liveness monitor at interval {}", - monitorInterval); - monitorExecutorService.scheduleAtFixedRate(new HBaseMonitor(), 0, - monitorInterval, TimeUnit.MILLISECONDS); + storageMonitor.start(); } @Override @@ -109,31 +74,18 @@ protected void serviceStop() throws Exception { LOG.info("closing the hbase Connection"); conn.close(); } - if (monitorExecutorService != null) { - monitorExecutorService.shutdownNow(); - if (!monitorExecutorService.awaitTermination(30, TimeUnit.SECONDS)) { - LOG.warn("failed to stop the monitir task in time. " + - "will still proceed to close the monitor."); - } - } - monitorConn.close(); + storageMonitor.stop(); super.serviceStop(); } - private void checkHBaseDown() throws IOException { - if (hbaseDown.get()) { - throw new IOException("HBase is down"); - } - } - public boolean isHBaseDown() { - return hbaseDown.get(); + return storageMonitor.isStorageDown(); } @Override public TimelineEntity getEntity(TimelineReaderContext context, TimelineDataToRetrieve dataToRetrieve) throws IOException { - checkHBaseDown(); + storageMonitor.checkStorageIsUp(); TimelineEntityReader reader = TimelineEntityReaderFactory.createSingleEntityReader(context, dataToRetrieve); @@ -144,7 +96,7 @@ public TimelineEntity getEntity(TimelineReaderContext context, public Set getEntities(TimelineReaderContext context, TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve) throws IOException { - checkHBaseDown(); + storageMonitor.checkStorageIsUp(); TimelineEntityReader reader = TimelineEntityReaderFactory.createMultipleEntitiesReader(context, filters, dataToRetrieve); @@ -154,7 +106,7 @@ public Set getEntities(TimelineReaderContext context, @Override public Set getEntityTypes(TimelineReaderContext context) throws IOException { - checkHBaseDown(); + storageMonitor.checkStorageIsUp(); EntityTypeReader reader = new EntityTypeReader(context); return reader.readEntityTypes(hbaseConf, conn); } @@ -171,30 +123,4 @@ public TimelineHealth getHealthStatus() { } } - protected static final TimelineEntityFilters MONITOR_FILTERS = - new TimelineEntityFilters.Builder().entityLimit(1L).build(); - protected static final TimelineDataToRetrieve DATA_TO_RETRIEVE = - new TimelineDataToRetrieve(null, null, null, null, null, null); - - private class HBaseMonitor implements Runnable { - @Override - public void run() { - try { - LOG.debug("Running HBase liveness monitor"); - TimelineEntityReader reader = - TimelineEntityReaderFactory.createMultipleEntitiesReader( - monitorContext, MONITOR_FILTERS, DATA_TO_RETRIEVE); - reader.readEntities(monitorHBaseConf, monitorConn); - - // on success, reset hbase down flag - if (hbaseDown.getAndSet(false)) { - LOG.debug("HBase request succeeded, assuming HBase up"); - } - } catch (Exception e) { - LOG.warn("Got failure attempting to read from timeline storage, " + - "assuming HBase down", e); - hbaseDown.getAndSet(true); - } - } - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineStorageMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineStorageMonitor.java new file mode 100644 index 0000000000000..fc96f19d75ebe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineStorageMonitor.java @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.timelineservice.storage; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; + +/** + * This abstract class is for monitoring Health of Timeline Storage. + */ +public abstract class TimelineStorageMonitor { + private static final Logger LOG = LoggerFactory + .getLogger(TimelineStorageMonitor.class); + + /** Different Storages supported by ATSV2. */ + public enum Storage { + HBase + } + + private ScheduledExecutorService monitorExecutorService; + private long monitorInterval; + private Storage storage; + private AtomicBoolean storageDown = new AtomicBoolean(); + + public TimelineStorageMonitor(Configuration conf, Storage storage) { + this.storage = storage; + this.monitorInterval = conf.getLong( + YarnConfiguration.TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS, + YarnConfiguration.DEFAULT_TIMELINE_SERVICE_STORAGE_MONITOR_INTERVAL_MS + ); + } + + public void start() { + LOG.info("Scheduling {} storage monitor at interval {}", + this.storage, monitorInterval); + monitorExecutorService = Executors.newScheduledThreadPool(1); + monitorExecutorService.scheduleAtFixedRate(new MonitorThread(), 0, + monitorInterval, TimeUnit.MILLISECONDS); + } + + public void stop() throws Exception { + if (monitorExecutorService != null) { + monitorExecutorService.shutdownNow(); + if (!monitorExecutorService.awaitTermination(30, TimeUnit.SECONDS)) { + LOG.warn("Failed to stop the monitor task in time. " + + "will still proceed to close the monitor."); + } + } + } + + abstract public void healthCheck() throws Exception; + + public void checkStorageIsUp() throws IOException { + if (storageDown.get()) { + throw new IOException(storage + " is down"); + } + } + + public boolean isStorageDown() { + return storageDown.get(); + } + + private class MonitorThread implements Runnable { + @Override + public void run() { + try { + LOG.debug("Running Timeline Storage monitor"); + healthCheck(); + if (storageDown.getAndSet(false)) { + LOG.debug("{} health check succeeded, " + + "assuming storage is up", storage); + } + } catch (Exception e) { + LOG.warn(String.format("Got failure attempting to read from %s, " + + "assuming Storage is down", storage), e); + storageDown.getAndSet(true); + } + } + } + +} From 076618677d3524187e5be4b5401e25a9ca154230 Mon Sep 17 00:00:00 2001 From: cxorm Date: Sat, 15 Jun 2019 11:05:20 +0800 Subject: [PATCH 0195/1308] HADOOP-16336. finish variable is unused in ZStandardCompressor. Contributed by cxorm. --- .../apache/hadoop/io/compress/zstd/ZStandardCompressor.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java index 7445502c989d8..716a19886f85b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java @@ -49,7 +49,7 @@ public class ZStandardCompressor implements Compressor { private int uncompressedDirectBufOff = 0, uncompressedDirectBufLen = 0; private boolean keepUncompressedBuf = false; private ByteBuffer compressedDirectBuf = null; - private boolean finish, finished; + private boolean finished; private long bytesRead = 0; private long bytesWritten = 0; @@ -180,7 +180,6 @@ public boolean needsInput() { @Override public void finish() { - finish = true; } @Override @@ -266,7 +265,6 @@ public long getBytesRead() { public void reset() { checkStream(); init(level, stream); - finish = false; finished = false; bytesRead = 0; bytesWritten = 0; From 8370a0ae1681f2836fa0c1c63e334a3fdafafd7b Mon Sep 17 00:00:00 2001 From: Adam Antal Date: Sat, 15 Jun 2019 06:45:26 -0700 Subject: [PATCH 0196/1308] HDFS-14203. Refactor OIV Delimited output entry building mechanism. Contributed by Adam Antal. Signed-off-by: Wei-Chiu Chuang --- .../PBImageDelimitedTextWriter.java | 155 ++++++++++-------- .../TestOfflineImageViewer.java | 113 ++++++++++++- 2 files changed, 203 insertions(+), 65 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java index 7b484511eb63b..29799e2728be1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java @@ -27,7 +27,6 @@ import java.io.IOException; import java.io.PrintStream; import java.text.SimpleDateFormat; -import java.util.Date; /** * A PBImageDelimitedTextWriter generates a text representation of the PB fsimage, @@ -44,80 +43,108 @@ * constructor. */ public class PBImageDelimitedTextWriter extends PBImageTextWriter { - private static final String DATE_FORMAT="yyyy-MM-dd HH:mm"; - private final SimpleDateFormat dateFormatter = - new SimpleDateFormat(DATE_FORMAT); + private static final String DATE_FORMAT = "yyyy-MM-dd HH:mm"; + + static class OutputEntryBuilder { + private final SimpleDateFormat dateFormatter = + new SimpleDateFormat(DATE_FORMAT); + + private PBImageDelimitedTextWriter writer; + private Path path; + private int replication = 0; + private long modificationTime; + private long accessTime = 0; + private long preferredBlockSize = 0; + private int blocksCount = 0; + private long fileSize = 0; + private long nsQuota = 0; + private long dsQuota = 0; + + private String dirPermission = "-"; + private PermissionStatus permissionStatus; + private String aclPermission = ""; + + OutputEntryBuilder(PBImageDelimitedTextWriter writer, INode inode) { + this.writer = writer; + switch (inode.getType()) { + case FILE: + INodeFile file = inode.getFile(); + replication = file.getReplication(); + modificationTime = file.getModificationTime(); + accessTime = file.getAccessTime(); + preferredBlockSize = file.getPreferredBlockSize(); + blocksCount = file.getBlocksCount(); + fileSize = FSImageLoader.getFileSize(file); + permissionStatus = writer.getPermission(file.getPermission()); + if (file.hasAcl() && file.getAcl().getEntriesCount() > 0){ + aclPermission = "+"; + } + break; + case DIRECTORY: + INodeDirectory dir = inode.getDirectory(); + modificationTime = dir.getModificationTime(); + nsQuota = dir.getNsQuota(); + dsQuota = dir.getDsQuota(); + dirPermission = "d"; + permissionStatus = writer.getPermission(dir.getPermission()); + if (dir.hasAcl() && dir.getAcl().getEntriesCount() > 0){ + aclPermission = "+"; + } + break; + case SYMLINK: + INodeSymlink s = inode.getSymlink(); + modificationTime = s.getModificationTime(); + accessTime = s.getAccessTime(); + permissionStatus = writer.getPermission(s.getPermission()); + break; + default: + break; + } + } + + void setPath(Path path) { + this.path = path; + } + + public String build() { + assert permissionStatus != null : "The PermissionStatus is null!"; + assert permissionStatus.getUserName() != null : "User name is null!"; + assert permissionStatus.getGroupName() != null : "Group name is null!"; + + StringBuffer buffer = new StringBuffer(); + writer.append(buffer, path.toString()); + writer.append(buffer, replication); + writer.append(buffer, dateFormatter.format(modificationTime)); + writer.append(buffer, dateFormatter.format(accessTime)); + writer.append(buffer, preferredBlockSize); + writer.append(buffer, blocksCount); + writer.append(buffer, fileSize); + writer.append(buffer, nsQuota); + writer.append(buffer, dsQuota); + writer.append(buffer, dirPermission + + permissionStatus.getPermission().toString() + aclPermission); + writer.append(buffer, permissionStatus.getUserName()); + writer.append(buffer, permissionStatus.getGroupName()); + return buffer.substring(1); + } + } PBImageDelimitedTextWriter(PrintStream out, String delimiter, String tempPath) throws IOException { super(out, delimiter, tempPath); } - private String formatDate(long date) { - return dateFormatter.format(new Date(date)); - } - @Override public String getEntry(String parent, INode inode) { - StringBuffer buffer = new StringBuffer(); + OutputEntryBuilder entryBuilder = + new OutputEntryBuilder(this, inode); + String inodeName = inode.getName().toStringUtf8(); Path path = new Path(parent.isEmpty() ? "/" : parent, inodeName.isEmpty() ? "/" : inodeName); - append(buffer, path.toString()); - PermissionStatus p = null; - boolean isDir = false; - boolean hasAcl = false; - - switch (inode.getType()) { - case FILE: - INodeFile file = inode.getFile(); - p = getPermission(file.getPermission()); - hasAcl = file.hasAcl() && file.getAcl().getEntriesCount() > 0; - append(buffer, file.getReplication()); - append(buffer, formatDate(file.getModificationTime())); - append(buffer, formatDate(file.getAccessTime())); - append(buffer, file.getPreferredBlockSize()); - append(buffer, file.getBlocksCount()); - append(buffer, FSImageLoader.getFileSize(file)); - append(buffer, 0); // NS_QUOTA - append(buffer, 0); // DS_QUOTA - break; - case DIRECTORY: - INodeDirectory dir = inode.getDirectory(); - p = getPermission(dir.getPermission()); - hasAcl = dir.hasAcl() && dir.getAcl().getEntriesCount() > 0; - append(buffer, 0); // Replication - append(buffer, formatDate(dir.getModificationTime())); - append(buffer, formatDate(0)); // Access time. - append(buffer, 0); // Block size. - append(buffer, 0); // Num blocks. - append(buffer, 0); // Num bytes. - append(buffer, dir.getNsQuota()); - append(buffer, dir.getDsQuota()); - isDir = true; - break; - case SYMLINK: - INodeSymlink s = inode.getSymlink(); - p = getPermission(s.getPermission()); - append(buffer, 0); // Replication - append(buffer, formatDate(s.getModificationTime())); - append(buffer, formatDate(s.getAccessTime())); - append(buffer, 0); // Block size. - append(buffer, 0); // Num blocks. - append(buffer, 0); // Num bytes. - append(buffer, 0); // NS_QUOTA - append(buffer, 0); // DS_QUOTA - break; - default: - break; - } - assert p != null; - String dirString = isDir ? "d" : "-"; - String aclString = hasAcl ? "+" : ""; - append(buffer, dirString + p.getPermission().toString() + aclString); - append(buffer, p.getUserName()); - append(buffer, p.getGroupName()); - return buffer.substring(1); + entryBuilder.setPath(path); + + return entryBuilder.build(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 1895ada79d10a..4be26d385114f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -40,6 +40,9 @@ import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA; import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA_CODEC_NAME; import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA_OPTION; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; import static org.junit.Assert.assertEquals; @@ -98,6 +101,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -105,7 +109,9 @@ import org.apache.hadoop.hdfs.protocol.BlockType; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; +import org.apache.hadoop.hdfs.server.namenode.FsImageProto; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; @@ -132,6 +138,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; +import com.google.protobuf.ByteString; public class TestOfflineImageViewer { private static final Logger LOG = @@ -146,6 +153,7 @@ public class TestOfflineImageViewer { private static final long FILE_NODE_ID_2 = 16389; private static final long FILE_NODE_ID_3 = 16394; private static final long DIR_NODE_ID = 16391; + private static final long SAMPLE_TIMESTAMP = 946684800000L; // namespace as written to dfs, to be compared with viewer's output final static HashMap writtenFiles = Maps.newHashMap(); @@ -659,6 +667,109 @@ public void testWebImageViewerSecureMode() throws Exception { } } + private FsImageProto.INodeSection.INode createSampleFileInode() { + HdfsProtos.BlockProto.Builder block = + HdfsProtos.BlockProto.newBuilder() + .setNumBytes(1024) + .setBlockId(8) + .setGenStamp(SAMPLE_TIMESTAMP); + FsImageProto.INodeSection.AclFeatureProto.Builder acl = + FsImageProto.INodeSection.AclFeatureProto.newBuilder() + .addEntries(2); + FsImageProto.INodeSection.INodeFile.Builder file = + FsImageProto.INodeSection.INodeFile.newBuilder() + .setReplication(5) + .setModificationTime(SAMPLE_TIMESTAMP) + .setAccessTime(SAMPLE_TIMESTAMP) + .setPreferredBlockSize(1024) + .addBlocks(block) + .addBlocks(block) + .addBlocks(block) + .setAcl(acl); + + return FsImageProto.INodeSection.INode.newBuilder() + .setType(FsImageProto.INodeSection.INode.Type.FILE) + .setFile(file) + .setName(ByteString.copyFromUtf8("file")) + .setId(3) + .build(); + } + + private FsImageProto.INodeSection.INode createSampleDirInode() { + FsImageProto.INodeSection.AclFeatureProto.Builder acl = + FsImageProto.INodeSection.AclFeatureProto.newBuilder() + .addEntries(2); + FsImageProto.INodeSection.INodeDirectory.Builder directory = + FsImageProto.INodeSection.INodeDirectory.newBuilder() + .setDsQuota(1000) + .setNsQuota(700) + .setModificationTime(SAMPLE_TIMESTAMP) + .setAcl(acl); + + return FsImageProto.INodeSection.INode.newBuilder() + .setType(FsImageProto.INodeSection.INode.Type.DIRECTORY) + .setDirectory(directory) + .setName(ByteString.copyFromUtf8("dir")) + .setId(3) + .build(); + } + + private FsImageProto.INodeSection.INode createSampleSymlink() { + FsImageProto.INodeSection.INodeSymlink.Builder symlink = + FsImageProto.INodeSection.INodeSymlink.newBuilder() + .setModificationTime(SAMPLE_TIMESTAMP) + .setAccessTime(SAMPLE_TIMESTAMP); + + return FsImageProto.INodeSection.INode.newBuilder() + .setType(FsImageProto.INodeSection.INode.Type.SYMLINK) + .setSymlink(symlink) + .setName(ByteString.copyFromUtf8("sym")) + .setId(5) + .build(); + } + + private PBImageDelimitedTextWriter createDelimitedWriterSpy() + throws IOException { + FsPermission fsPermission = new FsPermission( + FsAction.ALL, + FsAction.WRITE_EXECUTE, + FsAction.WRITE); + PermissionStatus permStatus = new PermissionStatus( + "user_1", + "group_1", + fsPermission); + + PBImageDelimitedTextWriter writer = new + PBImageDelimitedTextWriter(null, ",", ""); + PBImageDelimitedTextWriter writerSpy = spy(writer); + when(writerSpy.getPermission(anyLong())).thenReturn(permStatus); + return writerSpy; + } + + @Test + public void testWriterOutputEntryBuilderForFile() throws IOException { + assertEquals("/path/file,5,2000-01-01 00:00,2000-01-01 00:00," + + "1024,3,3072,0,0,-rwx-wx-w-+,user_1,group_1", + createDelimitedWriterSpy().getEntry("/path/", + createSampleFileInode())); + } + + @Test + public void testWriterOutputEntryBuilderForDirectory() throws IOException { + assertEquals("/path/dir,0,2000-01-01 00:00,1970-01-01 00:00" + + ",0,0,0,700,1000,drwx-wx-w-+,user_1,group_1", + createDelimitedWriterSpy().getEntry("/path/", + createSampleDirInode())); + } + + @Test + public void testWriterOutputEntryBuilderForSymlink() throws IOException { + assertEquals("/path/sym,0,2000-01-01 00:00,2000-01-01 00:00" + + ",0,0,0,0,0,-rwx-wx-w-,user_1,group_1", + createDelimitedWriterSpy().getEntry("/path/", + createSampleSymlink())); + } + @Test public void testPBDelimitedWriter() throws IOException, InterruptedException { testPBDelimitedWriter(""); // Test in memory db. @@ -667,7 +778,7 @@ public void testPBDelimitedWriter() throws IOException, InterruptedException { } @Test - public void testOutputEntryBuilder() throws IOException { + public void testCorruptionOutputEntryBuilder() throws IOException { PBImageCorruptionDetector corrDetector = new PBImageCorruptionDetector(null, ",", ""); PBImageCorruption c1 = new PBImageCorruption(342, true, false, 3); From e70aeb4d7e46c2b049409f524bc6f3fac290ab86 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Sat, 15 Jun 2019 13:47:07 -0700 Subject: [PATCH 0197/1308] HDDS-1601. Implement updating lastAppliedIndex after buffer flush to OM DB. (#972) --- .../om/ratis/OzoneManagerDoubleBuffer.java | 15 +++++++-- .../om/ratis/OzoneManagerRatisSnapshot.java | 32 +++++++++++++++++++ .../om/ratis/OzoneManagerStateMachine.java | 8 ++++- ...eManagerDoubleBufferWithDummyResponse.java | 12 ++++++- ...zoneManagerDoubleBufferWithOMResponse.java | 19 ++++++++++- 5 files changed, 81 insertions(+), 5 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisSnapshot.java diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index 810311583c514..8c25347449c64 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -64,18 +64,23 @@ public class OzoneManagerDoubleBuffer { private final AtomicLong flushIterations = new AtomicLong(0); private volatile boolean isRunning; + private final OzoneManagerRatisSnapshot ozoneManagerRatisSnapShot; - public OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager) { + public OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager, + OzoneManagerRatisSnapshot ozoneManagerRatisSnapShot) { this.currentBuffer = new ConcurrentLinkedQueue<>(); this.readyBuffer = new ConcurrentLinkedQueue<>(); this.omMetadataManager = omMetadataManager; + this.ozoneManagerRatisSnapShot = ozoneManagerRatisSnapShot; + isRunning = true; // Daemon thread which runs in back ground and flushes transactions to DB. daemon = new Daemon(this::flushTransactions); daemon.setName("OMDoubleBufferFlushThread"); daemon.start(); + } /** @@ -117,7 +122,13 @@ private void flushTransactions() { readyBuffer.clear(); // cleanup cache. cleanupCache(lastRatisTransactionIndex); - // TODO: update the last updated index in OzoneManagerStateMachine. + + // TODO: Need to revisit this logic, once we have multiple + // executors for volume/bucket request handling. As for now + // transactions are serialized this should be fine. + // update the last updated index in OzoneManagerStateMachine. + ozoneManagerRatisSnapShot.updateLastAppliedIndex( + lastRatisTransactionIndex); } } catch (InterruptedException ex) { Thread.currentThread().interrupt(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisSnapshot.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisSnapshot.java new file mode 100644 index 0000000000000..518026184a905 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisSnapshot.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.ratis; + +/** + * Functional interface for OM RatisSnapshot. + */ + +public interface OzoneManagerRatisSnapshot { + + /** + * Update lastAppliedIndex with the specified value in OzoneManager + * StateMachine. + * @param lastAppliedIndex + */ + void updateLastAppliedIndex(long lastAppliedIndex); +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 2577cb5dbebdb..718967a905f16 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -75,7 +75,8 @@ public OzoneManagerStateMachine(OzoneManagerRatisServer ratisServer) { this.omRatisServer = ratisServer; this.ozoneManager = omRatisServer.getOzoneManager(); this.ozoneManagerDoubleBuffer = - new OzoneManagerDoubleBuffer(ozoneManager.getMetadataManager()); + new OzoneManagerDoubleBuffer(ozoneManager.getMetadataManager(), + this::updateLastAppliedIndex); this.handler = new OzoneManagerHARequestHandlerImpl(ozoneManager, ozoneManagerDoubleBuffer); } @@ -375,6 +376,11 @@ private Message runCommand(OMRequest request, long trxLogIndex) { return OMRatisHelper.convertResponseToMessage(response); } + @SuppressWarnings("HiddenField") + public void updateLastAppliedIndex(long lastAppliedIndex) { + this.lastAppliedIndex = lastAppliedIndex; + } + /** * Submits read request to OM and returns the response Message. * @param request OMRequest diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index c616a28d4335a..116595500c0b9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -55,6 +55,9 @@ public class TestOzoneManagerDoubleBufferWithDummyResponse { private OMMetadataManager omMetadataManager; private OzoneManagerDoubleBuffer doubleBuffer; private AtomicLong trxId = new AtomicLong(0); + private OzoneManagerRatisSnapshot ozoneManagerRatisSnapshot; + private long lastAppliedIndex; + @Rule public TemporaryFolder folder = new TemporaryFolder(); @@ -66,7 +69,11 @@ public void setup() throws IOException { folder.newFolder().getAbsolutePath()); omMetadataManager = new OmMetadataManagerImpl(configuration); - doubleBuffer = new OzoneManagerDoubleBuffer(omMetadataManager); + ozoneManagerRatisSnapshot = index -> { + lastAppliedIndex = index; + }; + doubleBuffer = new OzoneManagerDoubleBuffer(omMetadataManager, + ozoneManagerRatisSnapshot); } @After @@ -94,6 +101,9 @@ public void testDoubleBufferWithDummyResponse() throws Exception { Assert.assertTrue(omMetadataManager.countRowsInTable( omMetadataManager.getBucketTable()) == (bucketCount)); Assert.assertTrue(doubleBuffer.getFlushIterations() > 0); + + // Check lastAppliedIndex is updated correctly or not. + Assert.assertEquals(bucketCount, lastAppliedIndex); } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index 3b544449ef3ea..6a0bcb6a057f2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -65,6 +65,8 @@ public class TestOzoneManagerDoubleBufferWithOMResponse { private OMMetadataManager omMetadataManager; private OzoneManagerDoubleBuffer doubleBuffer; private AtomicLong trxId = new AtomicLong(0); + private OzoneManagerRatisSnapshot ozoneManagerRatisSnapshot; + private long lastAppliedIndex; @Rule public TemporaryFolder folder = new TemporaryFolder(); @@ -76,7 +78,11 @@ public void setup() throws IOException { folder.newFolder().getAbsolutePath()); omMetadataManager = new OmMetadataManagerImpl(configuration); - doubleBuffer = new OzoneManagerDoubleBuffer(omMetadataManager); + ozoneManagerRatisSnapshot = index -> { + lastAppliedIndex = index; + }; + doubleBuffer = new OzoneManagerDoubleBuffer(omMetadataManager, + ozoneManagerRatisSnapshot); } @After @@ -146,6 +152,9 @@ public void testDoubleBufferWithMixOfTransactions() throws Exception { checkCreateBuckets(bucketQueue); checkDeletedBuckets(deleteBucketQueue); + + // Check lastAppliedIndex is updated correctly or not. + Assert.assertEquals(bucketCount + deleteCount + 1, lastAppliedIndex); } /** @@ -208,6 +217,9 @@ public void testDoubleBufferWithMixOfTransactionsParallel() throws Exception { checkCreateBuckets(bucketQueue); checkDeletedBuckets(deleteBucketQueue); + + // Check lastAppliedIndex is updated correctly or not. + Assert.assertEquals(bucketCount + deleteCount + 2, lastAppliedIndex); } /** @@ -321,6 +333,8 @@ private void checkDeletedBuckets(Queue public void testDoubleBuffer(int iterations, int bucketCount) throws Exception { try { + // Reset transaction id. + trxId.set(0); // Calling setup and stop here because this method is called from a // single test multiple times. setup(); @@ -343,6 +357,9 @@ public void testDoubleBuffer(int iterations, int bucketCount) omMetadataManager.getBucketTable()) == (bucketCount) * iterations); Assert.assertTrue(doubleBuffer.getFlushIterations() > 0); + + // Check lastAppliedIndex is updated correctly or not. + Assert.assertEquals((bucketCount + 1) * iterations, lastAppliedIndex); } finally { stop(); } From f9cc9e162175444efe9d5b07ecb9a795f750ca3c Mon Sep 17 00:00:00 2001 From: Gabor Bota Date: Sun, 16 Jun 2019 17:05:01 +0100 Subject: [PATCH 0198/1308] HADOOP-16279. S3Guard: Implement time-based (TTL) expiry for entries (and tombstones). Contributed by Gabor Bota. Change-Id: I73a2d2861901dedfe7a0e783b310fbb95e7c1af9 --- .../src/main/resources/core-default.xml | 8 +- .../org/apache/hadoop/fs/s3a/Constants.java | 12 +- .../org/apache/hadoop/fs/s3a/Listing.java | 2 +- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 39 +- .../fs/s3a/s3guard/DynamoDBMetadataStore.java | 99 +++-- .../fs/s3a/s3guard/ITtlTimeProvider.java | 34 ++ .../fs/s3a/s3guard/LocalMetadataStore.java | 111 ++++-- .../hadoop/fs/s3a/s3guard/MetadataStore.java | 87 ++++- .../fs/s3a/s3guard/NullMetadataStore.java | 13 +- .../apache/hadoop/fs/s3a/s3guard/S3Guard.java | 137 +++++-- .../hadoop/fs/s3a/s3guard/S3GuardTool.java | 9 +- .../site/markdown/tools/hadoop-aws/s3guard.md | 4 +- .../s3a/ITestS3GuardOutOfBandOperations.java | 349 +++++++++++++++++- .../apache/hadoop/fs/s3a/ITestS3GuardTtl.java | 194 +++++++++- .../s3guard/AbstractS3GuardToolTestBase.java | 2 +- .../s3guard/ITestDynamoDBMetadataStore.java | 10 +- .../ITestDynamoDBMetadataStoreScale.java | 5 +- .../fs/s3a/s3guard/MetadataStoreTestBase.java | 127 ++++++- .../s3a/s3guard/TestLocalMetadataStore.java | 7 +- .../fs/s3a/s3guard/TestNullMetadataStore.java | 5 + .../hadoop/fs/s3a/s3guard/TestS3Guard.java | 195 +++++++++- .../AbstractITestS3AMetadataStoreScale.java | 14 +- 22 files changed, 1287 insertions(+), 176 deletions(-) create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ITtlTimeProvider.java diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index b5056d1d23c4f..7ffc2adb461a6 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -1502,12 +1502,10 @@ - fs.s3a.metadatastore.authoritative.dir.ttl - 3600000 + fs.s3a.metadatastore.metadata.ttl + 15m - This value sets how long a directory listing in the MS is considered as - authoritative. The value is in milliseconds. - MetadataStore should be authoritative to use this configuration knob. + This value sets how long an entry in a MetadataStore is valid. diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index a8dc161e5ec76..7334506367a1e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -353,10 +353,14 @@ private Constants() { /** * How long a directory listing in the MS is considered as authoritative. */ - public static final String METADATASTORE_AUTHORITATIVE_DIR_TTL = - "fs.s3a.metadatastore.authoritative.dir.ttl"; - public static final long DEFAULT_METADATASTORE_AUTHORITATIVE_DIR_TTL = - TimeUnit.MINUTES.toMillis(60); + public static final String METADATASTORE_METADATA_TTL = + "fs.s3a.metadatastore.metadata.ttl"; + + /** + * Default TTL in milliseconds: 15 minutes. + */ + public static final long DEFAULT_METADATASTORE_METADATA_TTL = + TimeUnit.MINUTES.toMillis(15); /** read ahead buffer size to prevent connection re-establishments. */ public static final String READAHEAD_RANGE = "fs.s3a.readahead.range"; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java index 0f8c52be186a1..b62c4569b6e62 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java @@ -464,7 +464,7 @@ private boolean buildNextStatusBatch(S3ListResult objects) { if (acceptor.accept(keyPath, summary) && filter.accept(keyPath)) { S3AFileStatus status = createFileStatus(keyPath, summary, owner.getDefaultBlockSize(keyPath), owner.getUsername(), - null, null); + summary.getETag(), null); LOG.debug("Adding: {}", status); stats.add(status); added++; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index e6850e9e7c5f5..4bd58d5136860 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -126,6 +126,7 @@ import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; import org.apache.hadoop.fs.s3a.s3guard.PathMetadata; import org.apache.hadoop.fs.s3a.s3guard.S3Guard; +import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; import org.apache.hadoop.fs.s3native.S3xLoginHelper; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.fs.store.EtagChecksum; @@ -244,7 +245,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities, private AWSCredentialProviderList credentials; - private S3Guard.ITtlTimeProvider ttlTimeProvider; + private ITtlTimeProvider ttlTimeProvider; /** Add any deprecated keys. */ @SuppressWarnings("deprecation") @@ -388,9 +389,11 @@ public void initialize(URI name, Configuration originalConf) getMetadataStore(), allowAuthoritative); } initMultipartUploads(conf); - long authDirTtl = conf.getLong(METADATASTORE_AUTHORITATIVE_DIR_TTL, - DEFAULT_METADATASTORE_AUTHORITATIVE_DIR_TTL); - ttlTimeProvider = new S3Guard.TtlTimeProvider(authDirTtl); + if (hasMetadataStore()) { + long authDirTtl = conf.getTimeDuration(METADATASTORE_METADATA_TTL, + DEFAULT_METADATASTORE_METADATA_TTL, TimeUnit.MILLISECONDS); + ttlTimeProvider = new S3Guard.TtlTimeProvider(authDirTtl); + } } catch (AmazonClientException e) { throw translateException("initializing ", new Path(name), e); } @@ -1341,7 +1344,7 @@ childDst, length, getDefaultBlockSize(childDst), username, } } - metadataStore.move(srcPaths, dstMetas); + metadataStore.move(srcPaths, dstMetas, ttlTimeProvider); if (!src.getParent().equals(dst.getParent())) { LOG.debug("source & dest parents are different; fix up dir markers"); @@ -1722,7 +1725,7 @@ void deleteObjectAtPath(Path f, String key, boolean isFile) instrumentation.directoryDeleted(); } deleteObject(key); - metadataStore.delete(f); + metadataStore.delete(f, ttlTimeProvider); } /** @@ -2143,7 +2146,7 @@ private boolean innerDelete(S3AFileStatus status, boolean recursive) } } } - metadataStore.deleteSubtree(f); + metadataStore.deleteSubtree(f, ttlTimeProvider); } else { LOG.debug("delete: Path is a file"); deleteObjectAtPath(f, key, true); @@ -2466,7 +2469,10 @@ S3AFileStatus innerGetFileStatus(final Path f, LOG.debug("Getting path status for {} ({})", path, key); // Check MetadataStore, if any. - PathMetadata pm = metadataStore.get(path, needEmptyDirectoryFlag); + PathMetadata pm = null; + if (hasMetadataStore()) { + pm = S3Guard.getWithTtl(metadataStore, path, ttlTimeProvider); + } Set tombstones = Collections.emptySet(); if (pm != null) { if (pm.isDeleted()) { @@ -2501,7 +2507,7 @@ S3AFileStatus innerGetFileStatus(final Path f, LOG.debug("S3Guard metadata for {} is outdated, updating it", path); return S3Guard.putAndReturn(metadataStore, s3AFileStatus, - instrumentation); + instrumentation, ttlTimeProvider); } } } @@ -2534,12 +2540,14 @@ S3AFileStatus innerGetFileStatus(final Path f, null, null); } // entry was found, save in S3Guard - return S3Guard.putAndReturn(metadataStore, s3FileStatus, instrumentation); + return S3Guard.putAndReturn(metadataStore, s3FileStatus, + instrumentation, ttlTimeProvider); } else { // there was no entry in S3Guard // retrieve the data and update the metadata store in the process. return S3Guard.putAndReturn(metadataStore, - s3GetFileStatus(path, key, tombstones), instrumentation); + s3GetFileStatus(path, key, tombstones), instrumentation, + ttlTimeProvider); } } @@ -3191,11 +3199,12 @@ void finishedWrite(String key, long length, String eTag, String versionId) // See note about failure semantics in S3Guard documentation try { if (hasMetadataStore()) { - S3Guard.addAncestors(metadataStore, p, username); + S3Guard.addAncestors(metadataStore, p, username, ttlTimeProvider); S3AFileStatus status = createUploadFileStatus(p, S3AUtils.objectRepresentsDirectory(key, length), length, getDefaultBlockSize(p), username, eTag, versionId); - S3Guard.putAndReturn(metadataStore, status, instrumentation); + S3Guard.putAndReturn(metadataStore, status, instrumentation, + ttlTimeProvider); } } catch (IOException e) { if (failOnMetadataWriteError) { @@ -3860,12 +3869,12 @@ public AWSCredentialProviderList shareCredentials(final String purpose) { } @VisibleForTesting - protected S3Guard.ITtlTimeProvider getTtlTimeProvider() { + public ITtlTimeProvider getTtlTimeProvider() { return ttlTimeProvider; } @VisibleForTesting - protected void setTtlTimeProvider(S3Guard.ITtlTimeProvider ttlTimeProvider) { + protected void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) { this.ttlTimeProvider = ttlTimeProvider; } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java index fa1a203fc7213..f668c6affdc92 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java @@ -189,8 +189,10 @@ * directory helps prevent unnecessary queries during traversal of an entire * sub-tree. * - * Some mutating operations, notably {@link #deleteSubtree(Path)} and - * {@link #move(Collection, Collection)}, are less efficient with this schema. + * Some mutating operations, notably + * {@link MetadataStore#deleteSubtree(Path, ITtlTimeProvider)} and + * {@link MetadataStore#move(Collection, Collection, ITtlTimeProvider)}, + * are less efficient with this schema. * They require mutating multiple items in the DynamoDB table. * * By default, DynamoDB access is performed within the same AWS region as @@ -471,14 +473,15 @@ private void initDataAccessRetries(Configuration config) { @Override @Retries.RetryTranslated - public void delete(Path path) throws IOException { - innerDelete(path, true); + public void delete(Path path, ITtlTimeProvider ttlTimeProvider) + throws IOException { + innerDelete(path, true, ttlTimeProvider); } @Override @Retries.RetryTranslated public void forgetMetadata(Path path) throws IOException { - innerDelete(path, false); + innerDelete(path, false, null); } /** @@ -487,10 +490,13 @@ public void forgetMetadata(Path path) throws IOException { * There is no check as to whether the entry exists in the table first. * @param path path to delete * @param tombstone flag to create a tombstone marker + * @param ttlTimeProvider The time provider to set last_updated. Must not + * be null if tombstone is true. * @throws IOException I/O error. */ @Retries.RetryTranslated - private void innerDelete(final Path path, boolean tombstone) + private void innerDelete(final Path path, boolean tombstone, + ITtlTimeProvider ttlTimeProvider) throws IOException { checkPath(path); LOG.debug("Deleting from table {} in region {}: {}", @@ -505,8 +511,13 @@ private void innerDelete(final Path path, boolean tombstone) // on that of S3A itself boolean idempotent = S3AFileSystem.DELETE_CONSIDERED_IDEMPOTENT; if (tombstone) { + Preconditions.checkArgument(ttlTimeProvider != null, "ttlTimeProvider " + + "must not be null"); + final PathMetadata pmTombstone = PathMetadata.tombstone(path); + // update the last updated field of record when putting a tombstone + pmTombstone.setLastUpdated(ttlTimeProvider.getNow()); Item item = PathMetadataDynamoDBTranslation.pathMetadataToItem( - new DDBPathMetadata(PathMetadata.tombstone(path))); + new DDBPathMetadata(pmTombstone)); writeOp.retry( "Put tombstone", path.toString(), @@ -524,7 +535,8 @@ private void innerDelete(final Path path, boolean tombstone) @Override @Retries.RetryTranslated - public void deleteSubtree(Path path) throws IOException { + public void deleteSubtree(Path path, ITtlTimeProvider ttlTimeProvider) + throws IOException { checkPath(path); LOG.debug("Deleting subtree from table {} in region {}: {}", tableName, region, path); @@ -537,7 +549,7 @@ public void deleteSubtree(Path path) throws IOException { for (DescendantsIterator desc = new DescendantsIterator(this, meta); desc.hasNext();) { - innerDelete(desc.next().getPath(), true); + innerDelete(desc.next().getPath(), true, ttlTimeProvider); } } @@ -731,7 +743,8 @@ Collection completeAncestry( @Override @Retries.RetryTranslated public void move(Collection pathsToDelete, - Collection pathsToCreate) throws IOException { + Collection pathsToCreate, ITtlTimeProvider ttlTimeProvider) + throws IOException { if (pathsToDelete == null && pathsToCreate == null) { return; } @@ -754,7 +767,11 @@ public void move(Collection pathsToDelete, } if (pathsToDelete != null) { for (Path meta : pathsToDelete) { - newItems.add(new DDBPathMetadata(PathMetadata.tombstone(meta))); + Preconditions.checkArgument(ttlTimeProvider != null, "ttlTimeProvider" + + " must not be null"); + final PathMetadata pmTombstone = PathMetadata.tombstone(meta); + pmTombstone.setLastUpdated(ttlTimeProvider.getNow()); + newItems.add(new DDBPathMetadata(pmTombstone)); } } @@ -1024,14 +1041,37 @@ public void destroy() throws IOException { } @Retries.RetryTranslated - private ItemCollection expiredFiles(long modTime, - String keyPrefix) throws IOException { - String filterExpression = - "mod_time < :mod_time and begins_with(parent, :parent)"; - String projectionExpression = "parent,child"; - ValueMap map = new ValueMap() - .withLong(":mod_time", modTime) - .withString(":parent", keyPrefix); + private ItemCollection expiredFiles(PruneMode pruneMode, + long cutoff, String keyPrefix) throws IOException { + + String filterExpression; + String projectionExpression; + ValueMap map; + + switch (pruneMode) { + case ALL_BY_MODTIME: + filterExpression = + "mod_time < :mod_time and begins_with(parent, :parent)"; + projectionExpression = "parent,child"; + map = new ValueMap() + .withLong(":mod_time", cutoff) + .withString(":parent", keyPrefix); + break; + case TOMBSTONES_BY_LASTUPDATED: + filterExpression = + "last_updated < :last_updated and begins_with(parent, :parent) " + + "and is_deleted = :is_deleted"; + projectionExpression = "parent,child"; + map = new ValueMap() + .withLong(":last_updated", cutoff) + .withString(":parent", keyPrefix) + .withBoolean(":is_deleted", true); + break; + default: + throw new UnsupportedOperationException("Unsupported prune mode: " + + pruneMode); + } + return readOp.retry( "scan", keyPrefix, @@ -1041,20 +1081,31 @@ private ItemCollection expiredFiles(long modTime, @Override @Retries.RetryTranslated - public void prune(long modTime) throws IOException { - prune(modTime, "/"); + public void prune(PruneMode pruneMode, long cutoff) throws IOException { + prune(pruneMode, cutoff, "/"); } /** * Prune files, in batches. There's a sleep between each batch. - * @param modTime Oldest modification time to allow + * + * @param pruneMode The mode of operation for the prune For details see + * {@link MetadataStore#prune(PruneMode, long)} + * @param cutoff Oldest modification time to allow * @param keyPrefix The prefix for the keys that should be removed * @throws IOException Any IO/DDB failure. * @throws InterruptedIOException if the prune was interrupted */ @Override @Retries.RetryTranslated - public void prune(long modTime, String keyPrefix) throws IOException { + public void prune(PruneMode pruneMode, long cutoff, String keyPrefix) + throws IOException { + final ItemCollection items = + expiredFiles(pruneMode, cutoff, keyPrefix); + innerPrune(items); + } + + private void innerPrune(ItemCollection items) + throws IOException { int itemCount = 0; try { Collection deletionBatch = @@ -1064,7 +1115,7 @@ public void prune(long modTime, String keyPrefix) throws IOException { S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_DEFAULT, TimeUnit.MILLISECONDS); Set parentPathSet = new HashSet<>(); - for (Item item : expiredFiles(modTime, keyPrefix)) { + for (Item item : items) { DDBPathMetadata md = PathMetadataDynamoDBTranslation .itemToPathMetadata(item, username); Path path = md.getFileStatus().getPath(); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ITtlTimeProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ITtlTimeProvider.java new file mode 100644 index 0000000000000..daee6211b41d9 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ITtlTimeProvider.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.s3guard; + +/** + * This interface is defined for handling TTL expiry of metadata in S3Guard. + * + * TTL can be tested by implementing this interface and setting is as + * {@code S3Guard.ttlTimeProvider}. By doing this, getNow() can return any + * value preferred and flaky tests could be avoided. By default getNow() + * returns the EPOCH in runtime. + * + * Time is measured in milliseconds, + */ +public interface ITtlTimeProvider { + long getNow(); + long getMetadataTtl(); +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java index 9276388679866..6c13cd151d5da 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java @@ -112,32 +112,34 @@ public String toString() { } @Override - public void delete(Path p) throws IOException { - doDelete(p, false, true); + public void delete(Path p, ITtlTimeProvider ttlTimeProvider) + throws IOException { + doDelete(p, false, true, ttlTimeProvider); } @Override public void forgetMetadata(Path p) throws IOException { - doDelete(p, false, false); + doDelete(p, false, false, null); } @Override - public void deleteSubtree(Path path) throws IOException { - doDelete(path, true, true); + public void deleteSubtree(Path path, ITtlTimeProvider ttlTimeProvider) + throws IOException { + doDelete(path, true, true, ttlTimeProvider); } - private synchronized void doDelete(Path p, boolean recursive, boolean - tombstone) { + private synchronized void doDelete(Path p, boolean recursive, + boolean tombstone, ITtlTimeProvider ttlTimeProvider) { Path path = standardize(p); // Delete entry from file cache, then from cached parent directory, if any - deleteCacheEntries(path, tombstone); + deleteCacheEntries(path, tombstone, ttlTimeProvider); if (recursive) { // Remove all entries that have this dir as path prefix. - deleteEntryByAncestor(path, localCache, tombstone); + deleteEntryByAncestor(path, localCache, tombstone, ttlTimeProvider); } } @@ -191,7 +193,8 @@ public synchronized DirListingMetadata listChildren(Path p) throws @Override public void move(Collection pathsToDelete, - Collection pathsToCreate) throws IOException { + Collection pathsToCreate, + ITtlTimeProvider ttlTimeProvider) throws IOException { LOG.info("Move {} to {}", pathsToDelete, pathsToCreate); Preconditions.checkNotNull(pathsToDelete, "pathsToDelete is null"); @@ -205,7 +208,7 @@ public void move(Collection pathsToDelete, // 1. Delete pathsToDelete for (Path meta : pathsToDelete) { LOG.debug("move: deleting metadata {}", meta); - delete(meta); + delete(meta, ttlTimeProvider); } // 2. Create new destination path metadata @@ -332,18 +335,19 @@ public void destroy() throws IOException { } @Override - public void prune(long modTime) throws IOException{ - prune(modTime, ""); + public void prune(PruneMode pruneMode, long cutoff) throws IOException{ + prune(pruneMode, cutoff, ""); } @Override - public synchronized void prune(long modTime, String keyPrefix) { + public synchronized void prune(PruneMode pruneMode, long cutoff, + String keyPrefix) { // prune files // filter path_metadata (files), filter expired, remove expired localCache.asMap().entrySet().stream() .filter(entry -> entry.getValue().hasPathMeta()) - .filter(entry -> expired( - entry.getValue().getFileMeta().getFileStatus(), modTime, keyPrefix)) + .filter(entry -> expired(pruneMode, + entry.getValue().getFileMeta(), cutoff, keyPrefix)) .forEach(entry -> localCache.invalidate(entry.getKey())); @@ -358,28 +362,37 @@ public synchronized void prune(long modTime, String keyPrefix) { Collection newChildren = new LinkedList<>(); for (PathMetadata child : oldChildren) { - FileStatus status = child.getFileStatus(); - if (!expired(status, modTime, keyPrefix)) { + if (!expired(pruneMode, child, cutoff, keyPrefix)) { newChildren.add(child); } } - if (newChildren.size() != oldChildren.size()) { - DirListingMetadata dlm = - new DirListingMetadata(path, newChildren, false); - localCache.put(path, new LocalMetadataEntry(dlm)); - if (!path.isRoot()) { - DirListingMetadata parent = getDirListingMeta(path.getParent()); - if (parent != null) { - parent.setAuthoritative(false); - } - } - } + removeAuthoritativeFromParent(path, oldChildren, newChildren); }); } - private boolean expired(FileStatus status, long expiry, String keyPrefix) { + private void removeAuthoritativeFromParent(Path path, + Collection oldChildren, + Collection newChildren) { + if (newChildren.size() != oldChildren.size()) { + DirListingMetadata dlm = + new DirListingMetadata(path, newChildren, false); + localCache.put(path, new LocalMetadataEntry(dlm)); + if (!path.isRoot()) { + DirListingMetadata parent = getDirListingMeta(path.getParent()); + if (parent != null) { + parent.setAuthoritative(false); + } + } + } + } + + private boolean expired(PruneMode pruneMode, PathMetadata metadata, + long cutoff, String keyPrefix) { + final S3AFileStatus status = metadata.getFileStatus(); + final URI statusUri = status.getPath().toUri(); + // remove the protocol from path string to be able to compare - String bucket = status.getPath().toUri().getHost(); + String bucket = statusUri.getHost(); String statusTranslatedPath = ""; if(bucket != null && !bucket.isEmpty()){ // if there's a bucket, (well defined host in Uri) the pathToParentKey @@ -389,18 +402,33 @@ private boolean expired(FileStatus status, long expiry, String keyPrefix) { } else { // if there's no bucket in the path the pathToParentKey will fail, so // this is the fallback to get the path from status - statusTranslatedPath = status.getPath().toUri().getPath(); + statusTranslatedPath = statusUri.getPath(); + } + + boolean expired; + switch (pruneMode) { + case ALL_BY_MODTIME: + // Note: S3 doesn't track modification time on directories, so for + // consistency with the DynamoDB implementation we ignore that here + expired = status.getModificationTime() < cutoff && !status.isDirectory() + && statusTranslatedPath.startsWith(keyPrefix); + break; + case TOMBSTONES_BY_LASTUPDATED: + expired = metadata.getLastUpdated() < cutoff && metadata.isDeleted() + && statusTranslatedPath.startsWith(keyPrefix); + break; + default: + throw new UnsupportedOperationException("Unsupported prune mode: " + + pruneMode); } - // Note: S3 doesn't track modification time on directories, so for - // consistency with the DynamoDB implementation we ignore that here - return status.getModificationTime() < expiry && !status.isDirectory() - && statusTranslatedPath.startsWith(keyPrefix); + return expired; } @VisibleForTesting static void deleteEntryByAncestor(Path ancestor, - Cache cache, boolean tombstone) { + Cache cache, boolean tombstone, + ITtlTimeProvider ttlTimeProvider) { cache.asMap().entrySet().stream() .filter(entry -> isAncestorOf(ancestor, entry.getKey())) @@ -410,7 +438,9 @@ static void deleteEntryByAncestor(Path ancestor, if(meta.hasDirMeta()){ cache.invalidate(path); } else if(tombstone && meta.hasPathMeta()){ - meta.setPathMetadata(PathMetadata.tombstone(path)); + final PathMetadata pmTombstone = PathMetadata.tombstone(path); + pmTombstone.setLastUpdated(ttlTimeProvider.getNow()); + meta.setPathMetadata(pmTombstone); } else { cache.invalidate(path); } @@ -434,7 +464,8 @@ private static boolean isAncestorOf(Path ancestor, Path f) { * Update fileCache and dirCache to reflect deletion of file 'f'. Call with * lock held. */ - private void deleteCacheEntries(Path path, boolean tombstone) { + private void deleteCacheEntries(Path path, boolean tombstone, + ITtlTimeProvider ttlTimeProvider) { LocalMetadataEntry entry = localCache.getIfPresent(path); // If there's no entry, delete should silently succeed // (based on MetadataStoreTestBase#testDeleteNonExisting) @@ -448,6 +479,7 @@ private void deleteCacheEntries(Path path, boolean tombstone) { if(entry.hasPathMeta()){ if (tombstone) { PathMetadata pmd = PathMetadata.tombstone(path); + pmd.setLastUpdated(ttlTimeProvider.getNow()); entry.setPathMetadata(pmd); } else { entry.setPathMetadata(null); @@ -474,6 +506,7 @@ private void deleteCacheEntries(Path path, boolean tombstone) { LOG.debug("removing parent's entry for {} ", path); if (tombstone) { dir.markDeleted(path); + dir.setLastUpdated(ttlTimeProvider.getNow()); } else { dir.remove(path); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java index 746fd82950b27..7875d43d1e6bb 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java @@ -63,16 +63,23 @@ public interface MetadataStore extends Closeable { * Deletes exactly one path, leaving a tombstone to prevent lingering, * inconsistent copies of it from being listed. * + * Deleting an entry with a tombstone needs a + * {@link org.apache.hadoop.fs.s3a.s3guard.S3Guard.TtlTimeProvider} because + * the lastUpdated field of the record has to be updated to

    now
    . + * * @param path the path to delete + * @param ttlTimeProvider the time provider to set last_updated. Must not + * be null. * @throws IOException if there is an error */ - void delete(Path path) throws IOException; + void delete(Path path, ITtlTimeProvider ttlTimeProvider) + throws IOException; /** * Removes the record of exactly one path. Does not leave a tombstone (see - * {@link MetadataStore#delete(Path)}. It is currently intended for testing - * only, and a need to use it as part of normal FileSystem usage is not - * anticipated. + * {@link MetadataStore#delete(Path, ITtlTimeProvider)}. It is currently + * intended for testing only, and a need to use it as part of normal + * FileSystem usage is not anticipated. * * @param path the path to delete * @throws IOException if there is an error @@ -88,10 +95,17 @@ public interface MetadataStore extends Closeable { * implementations must also update any stored {@code DirListingMetadata} * objects which track the parent of this file. * + * Deleting a subtree with a tombstone needs a + * {@link org.apache.hadoop.fs.s3a.s3guard.S3Guard.TtlTimeProvider} because + * the lastUpdated field of all records have to be updated to
    now
    . + * * @param path the root of the sub-tree to delete + * @param ttlTimeProvider the time provider to set last_updated. Must not + * be null. * @throws IOException if there is an error */ - void deleteSubtree(Path path) throws IOException; + void deleteSubtree(Path path, ITtlTimeProvider ttlTimeProvider) + throws IOException; /** * Gets metadata for a path. @@ -151,10 +165,13 @@ PathMetadata get(Path path, boolean wantEmptyDirectoryFlag) * @param pathsToCreate Collection of all PathMetadata for the new paths * that were created at the destination of the rename * (). + * @param ttlTimeProvider the time provider to set last_updated. Must not + * be null. * @throws IOException if there is an error */ void move(Collection pathsToDelete, - Collection pathsToCreate) throws IOException; + Collection pathsToCreate, + ITtlTimeProvider ttlTimeProvider) throws IOException; /** * Saves metadata for exactly one path. @@ -212,29 +229,54 @@ void move(Collection pathsToDelete, void destroy() throws IOException; /** - * Clear any metadata older than a specified time from the repository. - * Implementations MUST clear file metadata, and MAY clear directory metadata - * (s3a itself does not track modification time for directories). - * Implementations may also choose to throw UnsupportedOperationException - * istead. Note that modification times should be in UTC, as returned by - * System.currentTimeMillis at the time of modification. + * Prune method with two modes of operation: + *
      + *
    • + * {@link PruneMode#ALL_BY_MODTIME} + * Clear any metadata older than a specified mod_time from the store. + * Note that this modification time is the S3 modification time from the + * object's metadata - from the object store. + * Implementations MUST clear file metadata, and MAY clear directory + * metadata (s3a itself does not track modification time for directories). + * Implementations may also choose to throw UnsupportedOperationException + * instead. Note that modification times must be in UTC, as returned by + * System.currentTimeMillis at the time of modification. + *
    • + *
    * - * @param modTime Oldest modification time to allow + *
      + *
    • + * {@link PruneMode#TOMBSTONES_BY_LASTUPDATED} + * Clear any tombstone updated earlier than a specified time from the + * store. Note that this last_updated is the time when the metadata + * entry was last updated and maintained by the metadata store. + * Implementations MUST clear file metadata, and MAY clear directory + * metadata (s3a itself does not track modification time for directories). + * Implementations may also choose to throw UnsupportedOperationException + * instead. Note that last_updated must be in UTC, as returned by + * System.currentTimeMillis at the time of modification. + *
    • + *
    + * + * @param pruneMode + * @param cutoff Oldest time to allow (UTC) * @throws IOException if there is an error * @throws UnsupportedOperationException if not implemented */ - void prune(long modTime) throws IOException, UnsupportedOperationException; + void prune(PruneMode pruneMode, long cutoff) throws IOException, + UnsupportedOperationException; /** - * Same as {@link MetadataStore#prune(long)}, but with an additional - * keyPrefix parameter to filter the pruned keys with a prefix. + * Same as {@link MetadataStore#prune(PruneMode, long)}, but with an + * additional keyPrefix parameter to filter the pruned keys with a prefix. * - * @param modTime Oldest modification time to allow + * @param pruneMode + * @param cutoff Oldest time to allow (UTC) * @param keyPrefix The prefix for the keys that should be removed * @throws IOException if there is an error * @throws UnsupportedOperationException if not implemented */ - void prune(long modTime, String keyPrefix) + void prune(PruneMode pruneMode, long cutoff, String keyPrefix) throws IOException, UnsupportedOperationException; /** @@ -252,4 +294,13 @@ void prune(long modTime, String keyPrefix) * @throws IOException if there is an error */ void updateParameters(Map parameters) throws IOException; + + /** + * Modes of operation for prune. + * For details see {@link MetadataStore#prune(PruneMode, long)} + */ + enum PruneMode { + ALL_BY_MODTIME, + TOMBSTONES_BY_LASTUPDATED + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/NullMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/NullMetadataStore.java index 04704e7ea73d7..1472ef1a2219f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/NullMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/NullMetadataStore.java @@ -47,7 +47,8 @@ public void close() throws IOException { } @Override - public void delete(Path path) throws IOException { + public void delete(Path path, ITtlTimeProvider ttlTimeProvider) + throws IOException { } @Override @@ -55,7 +56,8 @@ public void forgetMetadata(Path path) throws IOException { } @Override - public void deleteSubtree(Path path) throws IOException { + public void deleteSubtree(Path path, ITtlTimeProvider ttlTimeProvider) + throws IOException { } @Override @@ -76,7 +78,8 @@ public DirListingMetadata listChildren(Path path) throws IOException { @Override public void move(Collection pathsToDelete, - Collection pathsToCreate) throws IOException { + Collection pathsToCreate, + ITtlTimeProvider ttlTimeProvider) throws IOException { } @Override @@ -96,11 +99,11 @@ public void destroy() throws IOException { } @Override - public void prune(long modTime) { + public void prune(PruneMode pruneMode, long cutoff) { } @Override - public void prune(long modTime, String keyPrefix) { + public void prune(PruneMode pruneMode, long cutoff, String keyPrefix) { } @Override diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java index 26c75e82133ce..933a01ced5f4c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java @@ -25,9 +25,13 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import javax.annotation.Nullable; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.slf4j.Logger; @@ -46,6 +50,8 @@ import org.apache.hadoop.fs.s3a.Tristate; import org.apache.hadoop.util.ReflectionUtils; +import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_METADATASTORE_METADATA_TTL; +import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_METADATA_TTL; import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL; import static org.apache.hadoop.fs.s3a.Statistic.S3GUARD_METADATASTORE_PUT_PATH_LATENCY; import static org.apache.hadoop.fs.s3a.Statistic.S3GUARD_METADATASTORE_PUT_PATH_REQUEST; @@ -142,15 +148,17 @@ static Class getMetadataStoreClass( * @param ms MetadataStore to {@code put()} into. * @param status status to store * @param instrumentation instrumentation of the s3a file system + * @param timeProvider Time provider to use when writing entries * @return The same status as passed in * @throws IOException if metadata store update failed */ @RetryTranslated public static S3AFileStatus putAndReturn(MetadataStore ms, S3AFileStatus status, - S3AInstrumentation instrumentation) throws IOException { + S3AInstrumentation instrumentation, + ITtlTimeProvider timeProvider) throws IOException { long startTimeNano = System.nanoTime(); - ms.put(new PathMetadata(status)); + S3Guard.putWithTtl(ms, new PathMetadata(status), timeProvider); instrumentation.addValueToQuantiles(S3GUARD_METADATASTORE_PUT_PATH_LATENCY, (System.nanoTime() - startTimeNano)); instrumentation.incrementCounter(S3GUARD_METADATASTORE_PUT_PATH_REQUEST, 1); @@ -196,7 +204,7 @@ public static S3AFileStatus[] dirMetaToStatuses(DirListingMetadata dirMeta) { * @param backingStatuses Directory listing from the backing store. * @param dirMeta Directory listing from MetadataStore. May be null. * @param isAuthoritative State of authoritative mode - * @param timeProvider Time provider for testing. + * @param timeProvider Time provider to use when updating entries * @return Final result of directory listing. * @throws IOException if metadata store update failed */ @@ -242,7 +250,7 @@ public static FileStatus[] dirListingUnion(MetadataStore ms, Path path, if (status != null && s.getModificationTime() > status.getModificationTime()) { LOG.debug("Update ms with newer metadata of: {}", status); - ms.put(new PathMetadata(s)); + S3Guard.putWithTtl(ms, new PathMetadata(s), timeProvider); } } @@ -357,7 +365,7 @@ public static void makeDirsOrdered(MetadataStore ms, List dirs, } // Batched put - ms.put(pathMetas); + S3Guard.putWithTtl(ms, pathMetas, timeProvider); } catch (IOException ioe) { LOG.error("MetadataStore#put() failure:", ioe); } @@ -462,7 +470,8 @@ public static void addMoveAncestors(MetadataStore ms, } public static void addAncestors(MetadataStore metadataStore, - Path qualifiedPath, String username) throws IOException { + Path qualifiedPath, String username, ITtlTimeProvider timeProvider) + throws IOException { Collection newDirs = new ArrayList<>(); Path parent = qualifiedPath.getParent(); while (!parent.isRoot()) { @@ -476,7 +485,7 @@ public static void addAncestors(MetadataStore metadataStore, } parent = parent.getParent(); } - metadataStore.put(newDirs); + S3Guard.putWithTtl(metadataStore, newDirs, timeProvider); } private static void addMoveStatus(Collection srcPaths, @@ -513,17 +522,6 @@ public static void assertQualified(Path...paths) { } } - /** - * This interface is defined for testing purposes. - * TTL can be tested by implementing this interface and setting is as - * {@code S3Guard.ttlTimeProvider}. By doing this, getNow() can return any - * value preferred and flaky tests could be avoided. - */ - public interface ITtlTimeProvider { - long getNow(); - long getAuthoritativeDirTtl(); - } - /** * Runtime implementation for TTL Time Provider interface. */ @@ -534,34 +532,127 @@ public TtlTimeProvider(long authoritativeDirTtl) { this.authoritativeDirTtl = authoritativeDirTtl; } + public TtlTimeProvider(Configuration conf) { + this.authoritativeDirTtl = + conf.getTimeDuration(METADATASTORE_METADATA_TTL, + DEFAULT_METADATASTORE_METADATA_TTL, TimeUnit.MILLISECONDS); + } + @Override public long getNow() { return System.currentTimeMillis(); } - @Override public long getAuthoritativeDirTtl() { + @Override public long getMetadataTtl() { return authoritativeDirTtl; } + + @Override + public boolean equals(final Object o) { + if (this == o) { return true; } + if (o == null || getClass() != o.getClass()) { return false; } + final TtlTimeProvider that = (TtlTimeProvider) o; + return authoritativeDirTtl == that.authoritativeDirTtl; + } + + @Override + public int hashCode() { + return Objects.hash(authoritativeDirTtl); + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "TtlTimeProvider{"); + sb.append("authoritativeDirTtl=").append(authoritativeDirTtl); + sb.append(" millis}"); + return sb.toString(); + } } public static void putWithTtl(MetadataStore ms, DirListingMetadata dirMeta, ITtlTimeProvider timeProvider) throws IOException { dirMeta.setLastUpdated(timeProvider.getNow()); + dirMeta.getListing() + .forEach(pm -> pm.setLastUpdated(timeProvider.getNow())); ms.put(dirMeta); } - public static DirListingMetadata listChildrenWithTtl(MetadataStore ms, - Path path, ITtlTimeProvider timeProvider) + public static void putWithTtl(MetadataStore ms, PathMetadata fileMeta, + @Nullable ITtlTimeProvider timeProvider) throws IOException { + if (timeProvider != null) { + fileMeta.setLastUpdated(timeProvider.getNow()); + } else { + LOG.debug("timeProvider is null, put {} without setting last_updated", + fileMeta); + } + ms.put(fileMeta); + } + + public static void putWithTtl(MetadataStore ms, + Collection fileMetas, + @Nullable ITtlTimeProvider timeProvider) throws IOException { - long ttl = timeProvider.getAuthoritativeDirTtl(); + if (timeProvider != null) { + final long now = timeProvider.getNow(); + fileMetas.forEach(fileMeta -> fileMeta.setLastUpdated(now)); + } else { + LOG.debug("timeProvider is null, put {} without setting last_updated", + fileMetas); + } + ms.put(fileMetas); + } + + public static PathMetadata getWithTtl(MetadataStore ms, Path path, + @Nullable ITtlTimeProvider timeProvider) throws IOException { + final PathMetadata pathMetadata = ms.get(path); + // if timeProvider is null let's return with what the ms has + if (timeProvider == null) { + LOG.debug("timeProvider is null, returning pathMetadata as is"); + return pathMetadata; + } + + long ttl = timeProvider.getMetadataTtl(); + + if (pathMetadata != null) { + // Special case: the pathmetadata's last updated is 0. This can happen + // eg. with an old db using this implementation + if (pathMetadata.getLastUpdated() == 0) { + LOG.debug("PathMetadata TTL for {} is 0, so it will be returned as " + + "not expired."); + return pathMetadata; + } + + if (!pathMetadata.isExpired(ttl, timeProvider.getNow())) { + return pathMetadata; + } else { + LOG.debug("PathMetadata TTl for {} is expired in metadata store.", + path); + return null; + } + } + return null; + } + + public static DirListingMetadata listChildrenWithTtl(MetadataStore ms, + Path path, @Nullable ITtlTimeProvider timeProvider) + throws IOException { DirListingMetadata dlm = ms.listChildren(path); - if(dlm != null && dlm.isAuthoritative() + if (timeProvider == null) { + LOG.debug("timeProvider is null, returning DirListingMetadata as is"); + return dlm; + } + + long ttl = timeProvider.getMetadataTtl(); + + if (dlm != null && dlm.isAuthoritative() && dlm.isExpired(ttl, timeProvider.getNow())) { dlm.setAuthoritative(false); } return dlm; } + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java index 397a9cba670a8..dedb84931a902 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java @@ -707,7 +707,8 @@ private void putParentsIfNotPresent(FileStatus f) throws IOException { } S3AFileStatus dir = DynamoDBMetadataStore.makeDirStatus(parent, f.getOwner()); - getStore().put(new PathMetadata(dir)); + S3Guard.putWithTtl(getStore(), new PathMetadata(dir), + getFilesystem().getTtlTimeProvider()); dirCache.add(parent); parent = parent.getParent(); } @@ -741,7 +742,8 @@ private long importDir(FileStatus status) throws IOException { located.getVersionId()); } putParentsIfNotPresent(child); - getStore().put(new PathMetadata(child)); + S3Guard.putWithTtl(getStore(), new PathMetadata(child), + getFilesystem().getTtlTimeProvider()); items++; } return items; @@ -1073,7 +1075,8 @@ public int run(String[] args, PrintStream out) throws } try { - getStore().prune(divide, keyPrefix); + getStore().prune(MetadataStore.PruneMode.ALL_BY_MODTIME, divide, + keyPrefix); } catch (UnsupportedOperationException e){ errorln("Prune operation not supported in metadata store."); } diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md index 94dc89b70d332..337fc95b6c703 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md @@ -181,8 +181,8 @@ removed on `S3AFileSystem` level. ```xml - fs.s3a.metadatastore.authoritative.dir.ttl - 3600000 + fs.s3a.metadatastore.metadata.ttl + 15m ``` diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardOutOfBandOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardOutOfBandOperations.java index 6dbe6f91d48e3..2af9a0ab73ef2 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardOutOfBandOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardOutOfBandOperations.java @@ -24,6 +24,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; import org.junit.Assume; @@ -37,20 +38,32 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy; import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy.Source; import org.apache.hadoop.fs.s3a.s3guard.DirListingMetadata; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; - +import org.apache.hadoop.fs.s3a.s3guard.PathMetadata; +import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.RemoteIterator; + +import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; +import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_METADATASTORE_METADATA_TTL; +import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_METADATA_TTL; import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; import static org.apache.hadoop.test.LambdaTestUtils.eventually; import static org.junit.Assume.assumeTrue; +import static org.apache.hadoop.fs.contract.ContractTestUtils.readBytesToString; import static org.apache.hadoop.fs.contract.ContractTestUtils.writeTextFile; import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE; import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL; import static org.apache.hadoop.fs.s3a.S3ATestUtils.metadataStorePersistsAuthoritativeBit; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * @@ -115,7 +128,7 @@ public class ITestS3GuardOutOfBandOperations extends AbstractS3ATestBase { * Test array for parameterized test runs. * @return a list of parameter tuples. */ - @Parameterized.Parameters + @Parameterized.Parameters(name="auth={0}") public static Collection params() { return Arrays.asList(new Object[][]{ {true}, {false} @@ -190,8 +203,11 @@ private S3AFileSystem createGuardedFS(boolean authoritativeMode) URI uri = testFS.getUri(); removeBaseAndBucketOverrides(uri.getHost(), config, - METADATASTORE_AUTHORITATIVE); + METADATASTORE_AUTHORITATIVE, + METADATASTORE_METADATA_TTL); config.setBoolean(METADATASTORE_AUTHORITATIVE, authoritativeMode); + config.setLong(METADATASTORE_METADATA_TTL, + DEFAULT_METADATASTORE_METADATA_TTL); final S3AFileSystem gFs = createFS(uri, config); // set back the same metadata store instance gFs.setMetadataStore(realMs); @@ -271,6 +287,292 @@ public void testListingDelete() throws Exception { deleteFileInListing(); } + /** + * Tests that tombstone expiry is implemented, so if a file is created raw + * while the tombstone exist in ms for with the same name then S3Guard will + * check S3 for the file. + * + * Seq: create guarded; delete guarded; create raw (same path); read guarded; + * This will fail if no tombstone expiry is set + * + * @throws Exception + */ + @Test + public void testTombstoneExpiryGuardedDeleteRawCreate() throws Exception { + boolean allowAuthoritative = authoritative; + Path testFilePath = path("TEGDRC-" + UUID.randomUUID() + "/file"); + LOG.info("Allow authoritative param: {}", allowAuthoritative); + String originalText = "some test"; + String newText = "the new originalText for test"; + + final ITtlTimeProvider originalTimeProvider = + guardedFs.getTtlTimeProvider(); + try { + final AtomicLong now = new AtomicLong(1); + final AtomicLong metadataTtl = new AtomicLong(1); + + // SET TTL TIME PROVIDER FOR TESTING + ITtlTimeProvider testTimeProvider = + new ITtlTimeProvider() { + @Override public long getNow() { + return now.get(); + } + + @Override public long getMetadataTtl() { + return metadataTtl.get(); + } + }; + guardedFs.setTtlTimeProvider(testTimeProvider); + + // CREATE GUARDED + createAndAwaitFs(guardedFs, testFilePath, originalText); + + // DELETE GUARDED + deleteGuardedTombstoned(guardedFs, testFilePath, now); + + // CREATE RAW + createAndAwaitFs(rawFS, testFilePath, newText); + + // CHECK LISTING - THE FILE SHOULD NOT BE THERE, EVEN IF IT'S CREATED RAW + checkListingDoesNotContainPath(guardedFs, testFilePath); + + // CHANGE TTL SO ENTRY (& TOMBSTONE METADATA) WILL EXPIRE + long willExpire = now.get() + metadataTtl.get() + 1L; + now.set(willExpire); + LOG.info("willExpire: {}, ttlNow: {}; ttlTTL: {}", willExpire, + testTimeProvider.getNow(), testTimeProvider.getMetadataTtl()); + + // READ GUARDED + String newRead = readBytesToString(guardedFs, testFilePath, + newText.length()); + + // CHECK LISTING - THE FILE SHOULD BE THERE, TOMBSTONE EXPIRED + checkListingContainsPath(guardedFs, testFilePath); + + // we can assert that the originalText is the new one, which created raw + LOG.info("Old: {}, New: {}, Read: {}", originalText, newText, newRead); + assertEquals("The text should be modified with a new.", newText, + newRead); + } finally { + guardedFs.delete(testFilePath, true); + guardedFs.setTtlTimeProvider(originalTimeProvider); + } + } + + private void createAndAwaitFs(S3AFileSystem fs, Path testFilePath, + String text) throws Exception { + writeTextFile(fs, testFilePath, text, true); + final FileStatus newStatus = awaitFileStatus(fs, testFilePath); + assertNotNull("Newly created file status should not be null.", newStatus); + } + + private void deleteGuardedTombstoned(S3AFileSystem guarded, + Path testFilePath, AtomicLong now) throws Exception { + guarded.delete(testFilePath, true); + + final PathMetadata metadata = + guarded.getMetadataStore().get(testFilePath); + assertNotNull("Created file metadata should not be null in ms", + metadata); + assertEquals("Created file metadata last_updated should equal with " + + "mocked now", now.get(), metadata.getLastUpdated()); + + intercept(FileNotFoundException.class, testFilePath.toString(), + "This file should throw FNFE when reading through " + + "the guarded fs, and the metadatastore tombstoned the file.", + () -> guarded.getFileStatus(testFilePath)); + } + + /** + * createNonRecursive must fail if the parent directory has been deleted, + * and succeed if the tombstone has expired and the directory has been + * created out of band. + */ + @Test + public void testCreateNonRecursiveFailsIfParentDeleted() throws Exception { + LOG.info("Authoritative mode: {}", authoritative); + + String dirToDelete = methodName + UUID.randomUUID().toString(); + String fileToTry = dirToDelete + "/theFileToTry"; + + final Path dirPath = path(dirToDelete); + final Path filePath = path(fileToTry); + + // Create a directory with + ITtlTimeProvider mockTimeProvider = mock(ITtlTimeProvider.class); + ITtlTimeProvider originalTimeProvider = guardedFs.getTtlTimeProvider(); + + try { + guardedFs.setTtlTimeProvider(mockTimeProvider); + when(mockTimeProvider.getNow()).thenReturn(100L); + when(mockTimeProvider.getMetadataTtl()).thenReturn(5L); + + // CREATE DIRECTORY + guardedFs.mkdirs(dirPath); + + // DELETE DIRECTORY + guardedFs.delete(dirPath, true); + + // WRITE TO DELETED DIRECTORY - FAIL + intercept(FileNotFoundException.class, + dirToDelete, + "createNonRecursive must fail if the parent directory has been deleted.", + () -> createNonRecursive(guardedFs, filePath)); + + // CREATE THE DIRECTORY RAW + rawFS.mkdirs(dirPath); + awaitFileStatus(rawFS, dirPath); + + // SET TIME SO METADATA EXPIRES + when(mockTimeProvider.getNow()).thenReturn(110L); + + // WRITE TO DELETED DIRECTORY - SUCCESS + createNonRecursive(guardedFs, filePath); + + } finally { + guardedFs.delete(filePath, true); + guardedFs.delete(dirPath, true); + guardedFs.setTtlTimeProvider(originalTimeProvider); + } + } + + /** + * When lastUpdated = 0 the entry should not expire. This is a special case + * eg. for old metadata entries + */ + @Test + public void testLastUpdatedZeroWontExpire() throws Exception { + LOG.info("Authoritative mode: {}", authoritative); + + String testFile = methodName + UUID.randomUUID().toString() + + "/theFileToTry"; + + long ttl = 10L; + final Path filePath = path(testFile); + + ITtlTimeProvider mockTimeProvider = mock(ITtlTimeProvider.class); + ITtlTimeProvider originalTimeProvider = guardedFs.getTtlTimeProvider(); + + try { + guardedFs.setTtlTimeProvider(mockTimeProvider); + when(mockTimeProvider.getMetadataTtl()).thenReturn(ttl); + + // create a file while the NOW is 0, so it will set 0 as the last_updated + when(mockTimeProvider.getNow()).thenReturn(0L); + touch(guardedFs, filePath); + deleteFile(guardedFs, filePath); + + final PathMetadata pathMetadata = + guardedFs.getMetadataStore().get(filePath); + assertNotNull("pathMetadata should not be null after deleting with " + + "tombstones", pathMetadata); + assertEquals("pathMetadata lastUpdated field should be 0", 0, + pathMetadata.getLastUpdated()); + + // set the time, so the metadata would expire + when(mockTimeProvider.getNow()).thenReturn(2*ttl); + intercept(FileNotFoundException.class, filePath.toString(), + "This file should throw FNFE when reading through " + + "the guarded fs, and the metadatastore tombstoned the file. " + + "The tombstone won't expire if lastUpdated is set to 0.", + () -> guardedFs.getFileStatus(filePath)); + + } finally { + guardedFs.delete(filePath, true); + guardedFs.setTtlTimeProvider(originalTimeProvider); + } + } + + /** + * 1. File is deleted in the guarded fs. + * 2. File is replaced in the raw fs. + * 3. File is deleted in the guarded FS after the expiry time. + * 4. File MUST NOT exist in raw FS. + */ + @Test + public void deleteAfterTombstoneExpiryOobCreate() throws Exception { + LOG.info("Authoritative mode: {}", authoritative); + + String testFile = methodName + UUID.randomUUID().toString() + + "/theFileToTry"; + + long ttl = 10L; + final Path filePath = path(testFile); + + ITtlTimeProvider mockTimeProvider = mock(ITtlTimeProvider.class); + ITtlTimeProvider originalTimeProvider = guardedFs.getTtlTimeProvider(); + + try { + guardedFs.setTtlTimeProvider(mockTimeProvider); + when(mockTimeProvider.getMetadataTtl()).thenReturn(ttl); + + // CREATE AND DELETE WITH GUARDED FS + when(mockTimeProvider.getNow()).thenReturn(100L); + touch(guardedFs, filePath); + deleteFile(guardedFs, filePath); + + final PathMetadata pathMetadata = + guardedFs.getMetadataStore().get(filePath); + assertNotNull("pathMetadata should not be null after deleting with " + + "tombstones", pathMetadata); + + // REPLACE WITH RAW FS + touch(rawFS, filePath); + awaitFileStatus(rawFS, filePath); + + // SET EXPIRY TIME, SO THE TOMBSTONE IS EXPIRED + when(mockTimeProvider.getNow()).thenReturn(100L + 2 * ttl); + + // DELETE IN GUARDED FS + guardedFs.delete(filePath, true); + + // FILE MUST NOT EXIST IN RAW + intercept(FileNotFoundException.class, filePath.toString(), + "This file should throw FNFE when reading through " + + "the raw fs, and the guarded fs deleted the file.", + () -> rawFS.getFileStatus(filePath)); + + } finally { + guardedFs.delete(filePath, true); + guardedFs.setTtlTimeProvider(originalTimeProvider); + } + } + + private void checkListingDoesNotContainPath(S3AFileSystem fs, Path filePath) + throws IOException { + final RemoteIterator listIter = + fs.listFiles(filePath.getParent(), false); + while (listIter.hasNext()) { + final LocatedFileStatus lfs = listIter.next(); + assertNotEquals("The tombstone has not been expired, so must not be" + + " listed.", filePath, lfs.getPath()); + } + LOG.info("{}; file omitted from listFiles listing as expected.", filePath); + + final FileStatus[] fileStatuses = fs.listStatus(filePath.getParent()); + for (FileStatus fileStatus : fileStatuses) { + assertNotEquals("The tombstone has not been expired, so must not be" + + " listed.", filePath, fileStatus.getPath()); + } + LOG.info("{}; file omitted from listStatus as expected.", filePath); + } + + private void checkListingContainsPath(S3AFileSystem fs, Path filePath) + throws IOException { + final RemoteIterator listIter = + fs.listFiles(filePath.getParent(), false); + + while (listIter.hasNext()) { + final LocatedFileStatus lfs = listIter.next(); + assertEquals(filePath, lfs.getPath()); + } + + final FileStatus[] fileStatuses = fs.listStatus(filePath.getParent()); + for (FileStatus fileStatus : fileStatuses) + assertEquals("The file should be listed in fs.listStatus", + filePath, fileStatus.getPath()); + } + /** * Perform an out-of-band delete. * @param testFilePath filename @@ -384,12 +686,18 @@ private void overwriteFileInListing(String firstText, String secondText) // Create initial statusIterator with guarded ms writeTextFile(guardedFs, testFilePath, firstText, true); // and cache the value for later - final FileStatus origStatus = awaitFileStatus(rawFS, testFilePath); + final S3AFileStatus origStatus = awaitFileStatus(rawFS, testFilePath); + assertNotNull("No etag in raw status " + origStatus, + origStatus.getETag()); // Do a listing to cache the lists. Should be authoritative if it's set. - final FileStatus[] origList = guardedFs.listStatus(testDirPath); + final S3AFileStatus[] origList = (S3AFileStatus[]) guardedFs.listStatus( + testDirPath); assertArraySize("Added one file to the new dir, so the number of " + "files in the dir should be one.", 1, origList); + S3AFileStatus origGuardedFileStatus = origList[0]; + assertNotNull("No etag in origGuardedFileStatus" + origGuardedFileStatus, + origGuardedFileStatus.getETag()); final DirListingMetadata dirListingMetadata = realMs.listChildren(guardedFs.qualify(testDirPath)); assertListingAuthority(allowAuthoritative, dirListingMetadata); @@ -406,7 +714,8 @@ private void overwriteFileInListing(String firstText, String secondText) final FileStatus rawFileStatus = awaitFileStatus(rawFS, testFilePath); // check listing in guarded store. - final FileStatus[] modList = guardedFs.listStatus(testDirPath); + final S3AFileStatus[] modList = (S3AFileStatus[]) guardedFs.listStatus( + testDirPath); assertArraySize("Added one file to the new dir then modified it, " + "so the number of files in the dir should be one.", 1, modList); @@ -479,6 +788,24 @@ private void verifyFileStatusAsExpected(final String firstText, expectedLength, guardedLength); } } + // check etag. This relies on first and second text being different. + final S3AFileStatus rawS3AFileStatus = (S3AFileStatus) rawFileStatus; + final S3AFileStatus guardedS3AFileStatus = (S3AFileStatus) + guardedFileStatus; + final S3AFileStatus origS3AFileStatus = (S3AFileStatus) origStatus; + assertNotEquals( + "raw status still no to date with changes" + stats, + origS3AFileStatus.getETag(), rawS3AFileStatus.getETag()); + if (allowAuthoritative) { + // expect the etag to be out of sync + assertNotEquals( + "etag in authoritative table with " + stats, + rawS3AFileStatus.getETag(), guardedS3AFileStatus.getETag()); + } else { + assertEquals( + "etag in non-authoritative table with " + stats, + rawS3AFileStatus.getETag(), guardedS3AFileStatus.getETag()); + } // Next: modification time. long rawModTime = rawFileStatus.getModificationTime(); long guardedModTime = guardedFileStatus.getModificationTime(); @@ -631,12 +958,18 @@ private void awaitDeletedFileDisappearance(final S3AFileSystem fs, * @return the file status. * @throws Exception failure */ - private FileStatus awaitFileStatus(S3AFileSystem fs, + private S3AFileStatus awaitFileStatus(S3AFileSystem fs, final Path testFilePath) throws Exception { - return eventually( + return (S3AFileStatus) eventually( STABILIZATION_TIME, PROBE_INTERVAL_MILLIS, () -> fs.getFileStatus(testFilePath)); } + private FSDataOutputStream createNonRecursive(FileSystem fs, Path path) + throws Exception { + return fs + .createNonRecursive(path, false, 4096, (short) 3, (short) 4096, null); + } + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardTtl.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardTtl.java index d24009cea22eb..962232239afb9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardTtl.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardTtl.java @@ -18,13 +18,22 @@ package org.apache.hadoop.fs.s3a; +import java.util.Arrays; +import java.util.Collection; +import java.util.UUID; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.s3guard.DirListingMetadata; +import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; import org.apache.hadoop.fs.s3a.s3guard.S3Guard; + import org.junit.Assume; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE; @@ -36,8 +45,37 @@ /** * These tests are testing the S3Guard TTL (time to live) features. */ +@RunWith(Parameterized.class) public class ITestS3GuardTtl extends AbstractS3ATestBase { + private final boolean authoritative; + + /** + * Test array for parameterized test runs. + * @return a list of parameter tuples. + */ + @Parameterized.Parameters + public static Collection params() { + return Arrays.asList(new Object[][]{ + {true}, {false} + }); + } + + /** + * By changing the method name, the thread name is changed and + * so you can see in the logs which mode is being tested. + * @return a string to use for the thread namer. + */ + @Override + protected String getMethodName() { + return super.getMethodName() + + (authoritative ? "-auth" : "-nonauth"); + } + + public ITestS3GuardTtl(boolean authoritative) { + this.authoritative = authoritative; + } + /** * Patch the configuration - this test needs disabled filesystem caching. * These tests modify the fs instance that would cause flaky tests. @@ -47,11 +85,15 @@ public class ITestS3GuardTtl extends AbstractS3ATestBase { protected Configuration createConfiguration() { Configuration configuration = super.createConfiguration(); S3ATestUtils.disableFilesystemCaching(configuration); - return S3ATestUtils.prepareTestConfiguration(configuration); + configuration = + S3ATestUtils.prepareTestConfiguration(configuration); + configuration.setBoolean(METADATASTORE_AUTHORITATIVE, authoritative); + return configuration; } @Test public void testDirectoryListingAuthoritativeTtl() throws Exception { + LOG.info("Authoritative mode: {}", authoritative); final S3AFileSystem fs = getFileSystem(); Assume.assumeTrue(fs.hasMetadataStore()); @@ -64,12 +106,12 @@ public void testDirectoryListingAuthoritativeTtl() throws Exception { Assume.assumeTrue("MetadataStore should be authoritative for this test", isMetadataStoreAuthoritative(getFileSystem().getConf())); - S3Guard.ITtlTimeProvider mockTimeProvider = - mock(S3Guard.ITtlTimeProvider.class); - S3Guard.ITtlTimeProvider restoreTimeProvider = fs.getTtlTimeProvider(); + ITtlTimeProvider mockTimeProvider = + mock(ITtlTimeProvider.class); + ITtlTimeProvider restoreTimeProvider = fs.getTtlTimeProvider(); fs.setTtlTimeProvider(mockTimeProvider); when(mockTimeProvider.getNow()).thenReturn(100L); - when(mockTimeProvider.getAuthoritativeDirTtl()).thenReturn(1L); + when(mockTimeProvider.getMetadataTtl()).thenReturn(1L); Path dir = path("ttl/"); Path file = path("ttl/afile"); @@ -102,4 +144,146 @@ public void testDirectoryListingAuthoritativeTtl() throws Exception { fs.setTtlTimeProvider(restoreTimeProvider); } } + + @Test + public void testFileMetadataExpiresTtl() throws Exception { + LOG.info("Authoritative mode: {}", authoritative); + + Path fileExpire1 = path("expirettl-" + UUID.randomUUID()); + Path fileExpire2 = path("expirettl-" + UUID.randomUUID()); + Path fileRetain = path("expirettl-" + UUID.randomUUID()); + + final S3AFileSystem fs = getFileSystem(); + Assume.assumeTrue(fs.hasMetadataStore()); + final MetadataStore ms = fs.getMetadataStore(); + + ITtlTimeProvider mockTimeProvider = mock(ITtlTimeProvider.class); + ITtlTimeProvider originalTimeProvider = fs.getTtlTimeProvider(); + + try { + fs.setTtlTimeProvider(mockTimeProvider); + when(mockTimeProvider.getMetadataTtl()).thenReturn(5L); + + // set the time, so the fileExpire1 will expire + when(mockTimeProvider.getNow()).thenReturn(100L); + touch(fs, fileExpire1); + // set the time, so fileExpire2 will expire + when(mockTimeProvider.getNow()).thenReturn(101L); + touch(fs, fileExpire2); + // set the time, so fileRetain won't expire + when(mockTimeProvider.getNow()).thenReturn(109L); + touch(fs, fileRetain); + final FileStatus origFileRetainStatus = fs.getFileStatus(fileRetain); + // change time, so the first two file metadata is expired + when(mockTimeProvider.getNow()).thenReturn(110L); + + // metadata is expired so this should refresh the metadata with + // last_updated to the getNow() + final FileStatus fileExpire1Status = fs.getFileStatus(fileExpire1); + assertNotNull(fileExpire1Status); + assertEquals(110L, ms.get(fileExpire1).getLastUpdated()); + + // metadata is expired so this should refresh the metadata with + // last_updated to the getNow() + final FileStatus fileExpire2Status = fs.getFileStatus(fileExpire2); + assertNotNull(fileExpire2Status); + assertEquals(110L, ms.get(fileExpire2).getLastUpdated()); + + final FileStatus fileRetainStatus = fs.getFileStatus(fileRetain); + assertEquals("Modification time of these files should be equal.", + origFileRetainStatus.getModificationTime(), + fileRetainStatus.getModificationTime()); + assertNotNull(fileRetainStatus); + assertEquals(109L, ms.get(fileRetain).getLastUpdated()); + } finally { + fs.delete(fileExpire1, true); + fs.delete(fileExpire2, true); + fs.delete(fileRetain, true); + fs.setTtlTimeProvider(originalTimeProvider); + } + } + + /** + * create(tombstone file) must succeed irrespective of overwrite flag. + */ + @Test + public void testCreateOnTombstonedFileSucceeds() throws Exception { + LOG.info("Authoritative mode: {}", authoritative); + final S3AFileSystem fs = getFileSystem(); + + String fileToTry = methodName + UUID.randomUUID().toString(); + + final Path filePath = path(fileToTry); + + // Create a directory with + ITtlTimeProvider mockTimeProvider = mock(ITtlTimeProvider.class); + ITtlTimeProvider originalTimeProvider = fs.getTtlTimeProvider(); + + try { + fs.setTtlTimeProvider(mockTimeProvider); + when(mockTimeProvider.getNow()).thenReturn(100L); + when(mockTimeProvider.getMetadataTtl()).thenReturn(5L); + + // CREATE A FILE + touch(fs, filePath); + + // DELETE THE FILE - TOMBSTONE + fs.delete(filePath, true); + + // CREATE THE SAME FILE WITHOUT ERROR DESPITE THE TOMBSTONE + touch(fs, filePath); + + } finally { + fs.delete(filePath, true); + fs.setTtlTimeProvider(originalTimeProvider); + } + } + + /** + * create("parent has tombstone") must always succeed (We dont check the + * parent), but after the file has been written, all entries up the tree + * must be valid. That is: the putAncestor code will correct everything + */ + @Test + public void testCreateParentHasTombstone() throws Exception { + LOG.info("Authoritative mode: {}", authoritative); + final S3AFileSystem fs = getFileSystem(); + + String dirToDelete = methodName + UUID.randomUUID().toString(); + String fileToTry = dirToDelete + "/theFileToTry"; + + final Path dirPath = path(dirToDelete); + final Path filePath = path(fileToTry); + + // Create a directory with + ITtlTimeProvider mockTimeProvider = mock(ITtlTimeProvider.class); + ITtlTimeProvider originalTimeProvider = fs.getTtlTimeProvider(); + + try { + fs.setTtlTimeProvider(mockTimeProvider); + when(mockTimeProvider.getNow()).thenReturn(100L); + when(mockTimeProvider.getMetadataTtl()).thenReturn(5L); + + // CREATE DIRECTORY + fs.mkdirs(dirPath); + + // DELETE DIRECTORY + fs.delete(dirPath, true); + + // WRITE TO DELETED DIRECTORY - SUCCESS + touch(fs, filePath); + + // SET TIME SO METADATA EXPIRES + when(mockTimeProvider.getNow()).thenReturn(110L); + + // WRITE TO DELETED DIRECTORY - SUCCESS + touch(fs, filePath); + + } finally { + fs.delete(filePath, true); + fs.delete(dirPath, true); + fs.setTtlTimeProvider(originalTimeProvider); + } + } + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java index 9241686090536..f616190040a3b 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java @@ -280,7 +280,7 @@ private void testPruneCommand(Configuration cmdConf, Path parent, "This child should have been kept (prefix restriction).", 1); } finally { getFileSystem().delete(parent, true); - ms.prune(Long.MAX_VALUE); + ms.prune(MetadataStore.PruneMode.ALL_BY_MODTIME, Long.MAX_VALUE); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java index 149d1f36065da..5241dd481d313 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java @@ -230,6 +230,12 @@ public void tearDown() throws Exception { IOUtils.cleanupWithLogger(LOG, fileSystem); } + @Override protected String getPathStringForPrune(String path) + throws Exception { + String b = getTestBucketName(getContract().getFileSystem().getConf()); + return "/" + b + "/dir2"; + } + /** * Each contract has its own S3AFileSystem and DynamoDBMetadataStore objects. */ @@ -437,7 +443,7 @@ private void doTestBatchWrite(int numDelete, int numPut, } // move the old paths to new paths and verify - ms.move(pathsToDelete, newMetas); + ms.move(pathsToDelete, newMetas, getTtlTimeProvider()); assertEquals(0, ms.listChildren(oldDir).withoutTombstones().numEntries()); if (newMetas != null) { assertTrue(CollectionUtils @@ -650,7 +656,7 @@ public void testMovePopulatesAncestors() throws IOException { 1024, false)) ); - ddbms.move(fullSourcePaths, pathsToCreate); + ddbms.move(fullSourcePaths, pathsToCreate, getTtlTimeProvider()); // assert that all the ancestors should have been populated automatically assertCached(testRoot + "/c"); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreScale.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreScale.java index 301ba16aec57a..95c607aa66183 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreScale.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreScale.java @@ -243,7 +243,8 @@ public void test_030_BatchedWrite() throws Exception { if (pruneItems == BATCH_SIZE) { describe("pruning files"); - ddbms.prune(Long.MAX_VALUE /* all files */); + ddbms.prune(MetadataStore.PruneMode.ALL_BY_MODTIME, + Long.MAX_VALUE /* all files */); pruneItems = 0; } if (tracker.probe()) { @@ -305,7 +306,7 @@ public void test_050_getVersionMarkerItem() throws Throwable { private void retryingDelete(final Path path) { try { ddbms.getInvoker().retry("Delete ", path.toString(), true, - () -> ddbms.delete(path)); + () -> ddbms.delete(path, new S3Guard.TtlTimeProvider(getConf()))); } catch (IOException e) { LOG.warn("Failed to delete {}: ", path, e); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java index 55f4707fe460f..754da0db7992a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java @@ -23,6 +23,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.HashSet; +import java.util.List; import java.util.Set; import com.google.common.collect.Sets; @@ -68,6 +69,7 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase { static final FsPermission PERMISSION = null; static final String GROUP = null; private final long accessTime = 0; + private static ITtlTimeProvider ttlTimeProvider; /** * Each test should override this. Will use a new Configuration instance. @@ -123,6 +125,8 @@ public void setUp() throws Exception { assertNotNull("null MetadataStore", ms); assertNotNull("null FileSystem", contract.getFileSystem()); ms.initialize(contract.getFileSystem()); + ttlTimeProvider = + new S3Guard.TtlTimeProvider(contract.getFileSystem().getConf()); } @After @@ -310,7 +314,7 @@ public void testRootDirPutNew() throws Exception { public void testDelete() throws Exception { setUpDeleteTest(); - ms.delete(strToPath("/ADirectory1/db1/file2")); + ms.delete(strToPath("/ADirectory1/db1/file2"), ttlTimeProvider); /* Ensure delete happened. */ assertDirectorySize("/ADirectory1/db1", 1); @@ -338,7 +342,7 @@ private void deleteSubtreeHelper(String pathPrefix) throws Exception { if (!allowMissing()) { assertCached(p + "/ADirectory1/db1"); } - ms.deleteSubtree(strToPath(p + "/ADirectory1/db1/")); + ms.deleteSubtree(strToPath(p + "/ADirectory1/db1/"), ttlTimeProvider); assertEmptyDirectory(p + "/ADirectory1"); assertDeleted(p + "/ADirectory1/db1"); @@ -358,7 +362,7 @@ private void deleteSubtreeHelper(String pathPrefix) throws Exception { public void testDeleteRecursiveRoot() throws Exception { setUpDeleteTest(); - ms.deleteSubtree(strToPath("/")); + ms.deleteSubtree(strToPath("/"), ttlTimeProvider); assertDeleted("/ADirectory1"); assertDeleted("/ADirectory2"); assertDeleted("/ADirectory2/db1"); @@ -369,10 +373,10 @@ public void testDeleteRecursiveRoot() throws Exception { @Test public void testDeleteNonExisting() throws Exception { // Path doesn't exist, but should silently succeed - ms.delete(strToPath("/bobs/your/uncle")); + ms.delete(strToPath("/bobs/your/uncle"), ttlTimeProvider); // Ditto. - ms.deleteSubtree(strToPath("/internets")); + ms.deleteSubtree(strToPath("/internets"), ttlTimeProvider); } @@ -408,7 +412,7 @@ public void testGet() throws Exception { } if (!(ms instanceof NullMetadataStore)) { - ms.delete(strToPath(filePath)); + ms.delete(strToPath(filePath), ttlTimeProvider); meta = ms.get(strToPath(filePath)); assertTrue("Tombstone not left for deleted file", meta.isDeleted()); } @@ -586,7 +590,7 @@ public void testMove() throws Exception { destMetas.add(new PathMetadata(makeDirStatus("/b1"))); destMetas.add(new PathMetadata(makeFileStatus("/b1/file1", 100))); destMetas.add(new PathMetadata(makeFileStatus("/b1/file2", 100))); - ms.move(srcPaths, destMetas); + ms.move(srcPaths, destMetas, ttlTimeProvider); // Assert src is no longer there dirMeta = ms.listChildren(strToPath("/a1")); @@ -636,11 +640,11 @@ public void testMultiBucketPaths() throws Exception { // Make sure delete is correct as well if (!allowMissing()) { - ms.delete(new Path(p2)); + ms.delete(new Path(p2), ttlTimeProvider); meta = ms.get(new Path(p1)); assertNotNull("Path should not have been deleted", meta); } - ms.delete(new Path(p1)); + ms.delete(new Path(p1), ttlTimeProvider); } @Test @@ -668,7 +672,7 @@ public void testPruneFiles() throws Exception { assertListingsEqual(ls.getListing(), "/pruneFiles/new", "/pruneFiles/old"); } - ms.prune(cutoff); + ms.prune(MetadataStore.PruneMode.ALL_BY_MODTIME, cutoff); ls = ms.listChildren(strToPath("/pruneFiles")); if (allowMissing()) { assertDeleted("/pruneFiles/old"); @@ -698,7 +702,7 @@ public void testPruneDirs() throws Exception { Thread.sleep(1); long cutoff = getTime(); - ms.prune(cutoff); + ms.prune(MetadataStore.PruneMode.ALL_BY_MODTIME, cutoff); assertDeleted("/pruneDirs/dir/file"); } @@ -728,7 +732,7 @@ public void testPruneUnsetsAuthoritative() throws Exception { ms.put(parentDirMd); } - ms.prune(time); + ms.prune(MetadataStore.PruneMode.ALL_BY_MODTIME, time); DirListingMetadata listing; for (String directory : directories) { Path path = strToPath(directory); @@ -765,7 +769,7 @@ public void testPrunePreservesAuthoritative() throws Exception { ms.put(parentDirMd); // prune the ms - ms.prune(time); + ms.prune(MetadataStore.PruneMode.ALL_BY_MODTIME, time); // get the directory listings DirListingMetadata rootDirMd = ms.listChildren(strToPath(rootDir)); @@ -823,6 +827,89 @@ public void testPutRetainsIsDeletedInParentListing() throws Exception { } } + @Test + public void testPruneExpiredTombstones() throws Exception { + List keepFilenames = new ArrayList<>( + Arrays.asList("/dir1/fileK1", "/dir1/fileK2", "/dir1/fileK3")); + List removeFilenames = new ArrayList<>( + Arrays.asList("/dir1/fileR1", "/dir1/fileR2", "/dir1/fileR3")); + + long cutoff = 9001; + + for(String fN : keepFilenames) { + final PathMetadata pathMetadata = new PathMetadata(makeFileStatus(fN, 1)); + pathMetadata.setLastUpdated(9002L); + ms.put(pathMetadata); + } + + for(String fN : removeFilenames) { + final PathMetadata pathMetadata = new PathMetadata(makeFileStatus(fN, 1)); + pathMetadata.setLastUpdated(9000L); + // tombstones are the deleted files! + pathMetadata.setIsDeleted(true); + ms.put(pathMetadata); + } + + ms.prune(MetadataStore.PruneMode.TOMBSTONES_BY_LASTUPDATED, cutoff); + + if (!allowMissing()) { + for (String fN : keepFilenames) { + final PathMetadata pathMetadata = ms.get(strToPath(fN)); + assertNotNull("Kept files should be in the metastore after prune", + pathMetadata); + } + } + + for(String fN : removeFilenames) { + final PathMetadata pathMetadata = ms.get(strToPath(fN)); + assertNull("Expired tombstones should be removed from metastore after " + + "the prune.", pathMetadata); + } + } + + @Test + public void testPruneExpiredTombstonesSpecifiedPath() throws Exception { + List keepFilenames = new ArrayList<>( + Arrays.asList("/dir1/fileK1", "/dir1/fileK2", "/dir1/fileK3")); + List removeFilenames = new ArrayList<>( + Arrays.asList("/dir2/fileR1", "/dir2/fileR2", "/dir2/fileR3")); + + long cutoff = 9001; + + // Both are expired. Difference is it will only delete the specified one. + for (String fN : keepFilenames) { + final PathMetadata pathMetadata = new PathMetadata(makeFileStatus(fN, 1)); + pathMetadata.setLastUpdated(9002L); + ms.put(pathMetadata); + } + + for (String fN : removeFilenames) { + final PathMetadata pathMetadata = new PathMetadata(makeFileStatus(fN, 1)); + pathMetadata.setLastUpdated(9000L); + // tombstones are the deleted files! + pathMetadata.setIsDeleted(true); + ms.put(pathMetadata); + } + + final String prunePath = getPathStringForPrune("/dir2"); + ms.prune(MetadataStore.PruneMode.TOMBSTONES_BY_LASTUPDATED, cutoff, + prunePath); + + if (!allowMissing()) { + for (String fN : keepFilenames) { + final PathMetadata pathMetadata = ms.get(strToPath(fN)); + assertNotNull("Kept files should be in the metastore after prune", + pathMetadata); + } + } + + for (String fN : removeFilenames) { + final PathMetadata pathMetadata = ms.get(strToPath(fN)); + assertNull("Expired tombstones should be removed from metastore after " + + "the prune.", pathMetadata); + } + } + /* * Helper functions. */ @@ -837,6 +924,16 @@ private String[] buildPathStrings(String parent, String... paths) return paths; } + + /** + * The prune operation needs the path with the bucket name as a string in + * {@link DynamoDBMetadataStore}, but not for {@link LocalMetadataStore}. + * This is an implementation detail of the ms, so this should be + * implemented in the subclasses. + */ + protected abstract String getPathStringForPrune(String path) + throws Exception; + private void commonTestPutListStatus(final String parent) throws IOException { putListStatusFiles(parent, true, buildPathStrings(parent, "file1", "file2", "file3")); @@ -1012,4 +1109,8 @@ protected static long getTime() { return System.currentTimeMillis(); } + protected static ITtlTimeProvider getTtlTimeProvider() { + return ttlTimeProvider; + } + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java index d0156f13e82ea..ee7b584ca18d2 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java @@ -75,6 +75,11 @@ public AbstractMSContract createContract(Configuration conf) throws return new LocalMSContract(conf); } + @Override protected String getPathStringForPrune(String path) + throws Exception{ + return path; + } + @Test public void testClearByAncestor() throws Exception { Cache cache = CacheBuilder.newBuilder().build(); @@ -184,7 +189,7 @@ private static void assertClearResult(Cache cache, String prefixStr, String pathStr, int leftoverSize) throws IOException { populateMap(cache, prefixStr); LocalMetadataStore.deleteEntryByAncestor(new Path(prefixStr + pathStr), - cache, true); + cache, true, getTtlTimeProvider()); assertEquals(String.format("Cache should have %d entries", leftoverSize), leftoverSize, sizeOfMap(cache)); cache.invalidateAll(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestNullMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestNullMetadataStore.java index c0541ea98ee26..2e0bc4b7e4f0e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestNullMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestNullMetadataStore.java @@ -46,6 +46,11 @@ public boolean allowMissing() { return true; } + @Override protected String getPathStringForPrune(String path) + throws Exception { + return path; + } + @Override public AbstractMSContract createContract() { return new NullMSContract(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestS3Guard.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestS3Guard.java index b246da2d50030..bdb256cba3dea 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestS3Guard.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestS3Guard.java @@ -18,18 +18,28 @@ package org.apache.hadoop.fs.s3a.s3guard; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; +import java.util.concurrent.TimeUnit; import org.junit.Assert; import org.junit.Test; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.S3AFileStatus; import org.apache.hadoop.fs.s3a.Tristate; -import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_METADATASTORE_AUTHORITATIVE_DIR_TTL; +import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_METADATASTORE_METADATA_TTL; +import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_METADATA_TTL; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; /** * Tests for the {@link S3Guard} utility class. @@ -58,8 +68,8 @@ public void testDirListingUnion() throws Exception { makeFileStatus("s3a://bucket/dir/s3-file4", false) ); - S3Guard.ITtlTimeProvider timeProvider = new S3Guard.TtlTimeProvider( - DEFAULT_METADATASTORE_AUTHORITATIVE_DIR_TTL); + ITtlTimeProvider timeProvider = new S3Guard.TtlTimeProvider( + DEFAULT_METADATASTORE_METADATA_TTL); FileStatus[] result = S3Guard.dirListingUnion(ms, dirPath, s3Listing, dirMeta, false, timeProvider); @@ -70,6 +80,185 @@ public void testDirListingUnion() throws Exception { assertContainsPath(result, "s3a://bucket/dir/s3-file4"); } + @Test + public void testPutWithTtlDirListingMeta() throws Exception { + // arrange + DirListingMetadata dlm = new DirListingMetadata(new Path("/"), null, + false); + MetadataStore ms = spy(MetadataStore.class); + ITtlTimeProvider timeProvider = + mock(ITtlTimeProvider.class); + when(timeProvider.getNow()).thenReturn(100L); + + // act + S3Guard.putWithTtl(ms, dlm, timeProvider); + + // assert + assertEquals("last update in " + dlm, 100L, dlm.getLastUpdated()); + verify(timeProvider, times(1)).getNow(); + verify(ms, times(1)).put(dlm); + } + + @Test + public void testPutWithTtlFileMeta() throws Exception { + // arrange + S3AFileStatus fileStatus = mock(S3AFileStatus.class); + when(fileStatus.getPath()).thenReturn(new Path("/")); + PathMetadata pm = new PathMetadata(fileStatus); + MetadataStore ms = spy(MetadataStore.class); + ITtlTimeProvider timeProvider = + mock(ITtlTimeProvider.class); + when(timeProvider.getNow()).thenReturn(100L); + + // act + S3Guard.putWithTtl(ms, pm, timeProvider); + + // assert + assertEquals("last update in " + pm, 100L, pm.getLastUpdated()); + verify(timeProvider, times(1)).getNow(); + verify(ms, times(1)).put(pm); + } + + @Test + public void testPutWithTtlCollection() throws Exception { + // arrange + S3AFileStatus fileStatus = mock(S3AFileStatus.class); + when(fileStatus.getPath()).thenReturn(new Path("/")); + Collection pmCollection = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + pmCollection.add(new PathMetadata(fileStatus)); + } + MetadataStore ms = spy(MetadataStore.class); + ITtlTimeProvider timeProvider = + mock(ITtlTimeProvider.class); + when(timeProvider.getNow()).thenReturn(100L); + + // act + S3Guard.putWithTtl(ms, pmCollection, timeProvider); + + // assert + pmCollection.forEach( + pm -> assertEquals(100L, pm.getLastUpdated()) + ); + verify(timeProvider, times(1)).getNow(); + verify(ms, times(1)).put(pmCollection); + } + + @Test + public void testGetWithTtlExpired() throws Exception { + // arrange + S3AFileStatus fileStatus = mock(S3AFileStatus.class); + Path path = new Path("/file"); + when(fileStatus.getPath()).thenReturn(path); + PathMetadata pm = new PathMetadata(fileStatus); + pm.setLastUpdated(100L); + + MetadataStore ms = mock(MetadataStore.class); + when(ms.get(path)).thenReturn(pm); + + ITtlTimeProvider timeProvider = + mock(ITtlTimeProvider.class); + when(timeProvider.getNow()).thenReturn(101L); + when(timeProvider.getMetadataTtl()).thenReturn(1L); + + // act + final PathMetadata pmExpired = S3Guard.getWithTtl(ms, path, timeProvider); + + // assert + assertNull(pmExpired); + } + + @Test + public void testGetWithTtlNotExpired() throws Exception { + // arrange + S3AFileStatus fileStatus = mock(S3AFileStatus.class); + Path path = new Path("/file"); + when(fileStatus.getPath()).thenReturn(path); + PathMetadata pm = new PathMetadata(fileStatus); + pm.setLastUpdated(100L); + + MetadataStore ms = mock(MetadataStore.class); + when(ms.get(path)).thenReturn(pm); + + ITtlTimeProvider timeProvider = + mock(ITtlTimeProvider.class); + when(timeProvider.getNow()).thenReturn(101L); + when(timeProvider.getMetadataTtl()).thenReturn(2L); + + // act + final PathMetadata pmNotExpired = + S3Guard.getWithTtl(ms, path, timeProvider); + + // assert + assertNotNull(pmNotExpired); + } + + @Test + public void testGetWithZeroLastUpdatedNotExpired() throws Exception { + // arrange + S3AFileStatus fileStatus = mock(S3AFileStatus.class); + Path path = new Path("/file"); + when(fileStatus.getPath()).thenReturn(path); + PathMetadata pm = new PathMetadata(fileStatus); + // we set 0 this time as the last updated: can happen eg. when we use an + // old dynamo table + pm.setLastUpdated(0L); + + MetadataStore ms = mock(MetadataStore.class); + when(ms.get(path)).thenReturn(pm); + + ITtlTimeProvider timeProvider = + mock(ITtlTimeProvider.class); + when(timeProvider.getNow()).thenReturn(101L); + when(timeProvider.getMetadataTtl()).thenReturn(2L); + + // act + final PathMetadata pmExpired = S3Guard.getWithTtl(ms, path, timeProvider); + + // assert + assertNotNull(pmExpired); + } + + + /** + * Makes sure that all uses of TTL timeouts use a consistent time unit. + * @throws Throwable failure + */ + @Test + public void testTTLConstruction() throws Throwable { + // first one + ITtlTimeProvider timeProviderExplicit = new S3Guard.TtlTimeProvider( + DEFAULT_METADATASTORE_METADATA_TTL); + + // mirror the FS construction, + // from a config guaranteed to be empty (i.e. the code defval) + Configuration conf = new Configuration(false); + long millitime = conf.getTimeDuration(METADATASTORE_METADATA_TTL, + DEFAULT_METADATASTORE_METADATA_TTL, TimeUnit.MILLISECONDS); + assertEquals(15 * 60_000, millitime); + S3Guard.TtlTimeProvider fsConstruction = new S3Guard.TtlTimeProvider( + millitime); + assertEquals("explicit vs fs construction", timeProviderExplicit, + fsConstruction); + assertEquals("first and second constructor", timeProviderExplicit, + new S3Guard.TtlTimeProvider(conf)); + // set the conf to a time without unit + conf.setLong(METADATASTORE_METADATA_TTL, + DEFAULT_METADATASTORE_METADATA_TTL); + assertEquals("first and second time set through long", timeProviderExplicit, + new S3Guard.TtlTimeProvider(conf)); + double timeInSeconds = DEFAULT_METADATASTORE_METADATA_TTL / 1000; + double timeInMinutes = timeInSeconds / 60; + String timeStr = String.format("%dm", (int) timeInMinutes); + assertEquals(":wrong time in minutes from " + timeInMinutes, + "15m", timeStr); + conf.set(METADATASTORE_METADATA_TTL, timeStr); + assertEquals("Time in millis as string from " + + conf.get(METADATASTORE_METADATA_TTL), + timeProviderExplicit, + new S3Guard.TtlTimeProvider(conf)); + } + void assertContainsPath(FileStatus[] statuses, String pathStr) { assertTrue("listing doesn't contain " + pathStr, containsPath(statuses, pathStr)); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractITestS3AMetadataStoreScale.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractITestS3AMetadataStoreScale.java index b843392ebfa07..1bffc3b1b72fc 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractITestS3AMetadataStoreScale.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractITestS3AMetadataStoreScale.java @@ -18,11 +18,15 @@ package org.apache.hadoop.fs.s3a.scale; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; import org.apache.hadoop.fs.s3a.s3guard.PathMetadata; +import org.apache.hadoop.fs.s3a.s3guard.S3Guard; +import org.junit.Before; import org.junit.FixMethodOrder; import org.junit.Test; import org.junit.runners.MethodSorters; @@ -54,6 +58,12 @@ public abstract class AbstractITestS3AMetadataStoreScale extends static final long ACCESS_TIME = System.currentTimeMillis(); static final Path BUCKET_ROOT = new Path("s3a://fake-bucket/"); + private ITtlTimeProvider ttlTimeProvider; + + @Before + public void initialize() { + ttlTimeProvider = new S3Guard.TtlTimeProvider(new Configuration()); + } /** * Subclasses should override this to provide the MetadataStore they which @@ -129,7 +139,7 @@ public void test_020_Moves() throws Throwable { toDelete = movedPaths; toCreate = origMetas; } - ms.move(toDelete, toCreate); + ms.move(toDelete, toCreate, ttlTimeProvider); } moveTimer.end(); printTiming(LOG, "move", moveTimer, operations); @@ -194,7 +204,7 @@ protected void clearMetadataStore(MetadataStore ms, long count) throws IOException { describe("Recursive deletion"); NanoTimer deleteTimer = new NanoTimer(); - ms.deleteSubtree(BUCKET_ROOT); + ms.deleteSubtree(BUCKET_ROOT, ttlTimeProvider); deleteTimer.end(); printTiming(LOG, "delete", deleteTimer, count); } From 1da09bd9d5fc293e87a33521996596701746888f Mon Sep 17 00:00:00 2001 From: Da Zhou Date: Sun, 16 Jun 2019 19:20:46 +0100 Subject: [PATCH 0199/1308] HADOOP-16376. ABFS: Override access() to no-op. Contributed by Da Zhou. Change-Id: Ia0024bba32250189a87eb6247808b2473c331ed0 --- .../fs/azurebfs/AzureBlobFileSystem.java | 23 +++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java index 107465a1c3966..d93822f33d5ba 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java @@ -38,12 +38,12 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.azurebfs.services.AbfsClient; -import org.apache.hadoop.fs.azurebfs.services.AbfsClientThrottlingIntercept; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.commons.lang3.ArrayUtils; +import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.azurebfs.services.AbfsClientThrottlingIntercept; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; @@ -71,6 +71,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Progressable; @@ -843,6 +844,24 @@ public AclStatus getAclStatus(final Path path) throws IOException { } } + /** + * Checks if the user can access a path. The mode specifies which access + * checks to perform. If the requested permissions are granted, then the + * method returns normally. If access is denied, then the method throws an + * {@link AccessControlException}. + * + * @param path Path to check + * @param mode type of access to check + * @throws AccessControlException if access is denied + * @throws java.io.FileNotFoundException if the path does not exist + * @throws IOException see specific implementation + */ + @Override + public void access(final Path path, FsAction mode) throws IOException { + // TODO: make it no-op to unblock hive permission issue for now. + // Will add a long term fix similar to the implementation in AdlFileSystem. + } + private FileStatus tryGetFileStatus(final Path f) { try { return getFileStatus(f); From cc1630288e437796c01a8e547a0345689aa96a15 Mon Sep 17 00:00:00 2001 From: mpicker90 Date: Sun, 16 Jun 2019 21:32:42 -0400 Subject: [PATCH 0200/1308] HDFS-14556: Spelling Mistake "gloablly" (#938) --- .../hadoop-hdfs-client/src/main/proto/hdfs.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto index 441b9d6b76176..a48d9812c000d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto @@ -38,7 +38,7 @@ import "acl.proto"; * Extended block idenfies a block */ message ExtendedBlockProto { - required string poolId = 1; // Block pool id - gloablly unique across clusters + required string poolId = 1; // Block pool id - globally unique across clusters required uint64 blockId = 2; // the local id within a pool required uint64 generationStamp = 3; optional uint64 numBytes = 4 [default = 0]; // len does not belong in ebid From ba681bb80ef246e937bf0ddc11bd22327feb1c3c Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Sun, 16 Jun 2019 22:00:39 -0700 Subject: [PATCH 0201/1308] HDDS-1692. RDBTable#iterator should disabled caching of the keys during iterator. (#975) --- .../src/main/java/org/apache/hadoop/utils/db/RDBTable.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java index 4213e2baac295..43479179d249e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java @@ -162,6 +162,7 @@ public void deleteWithBatch(BatchOperation batch, byte[] key) @Override public TableIterator iterator() { ReadOptions readOptions = new ReadOptions(); + readOptions.setFillCache(false); return new RDBStoreIterator(db.newIterator(handle, readOptions)); } From 304a47e22cb836cfde227803c853ecf4def870e1 Mon Sep 17 00:00:00 2001 From: Zhankun Tang Date: Mon, 17 Jun 2019 17:08:23 +0800 Subject: [PATCH 0202/1308] YARN-9608. DecommissioningNodesWatcher should get lists of running applications on node from RMNode. Contributed by Abhishek Modi. --- .../DecommissioningNodesWatcher.java | 47 ++------ .../TestDecommissioningNodesWatcher.java | 101 +++++++++++++++--- 2 files changed, 92 insertions(+), 56 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java index b0cec5a84cb1d..c476c611b36ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.yarn.server.resourcemanager; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.Timer; @@ -36,7 +38,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; @@ -58,13 +59,8 @@ * a DECOMMISSIONING node will be DECOMMISSIONED no later than * DECOMMISSIONING_TIMEOUT regardless of running containers or applications. * - * To be efficient, DecommissioningNodesWatcher skip tracking application - * containers on a particular node before the node is in DECOMMISSIONING state. - * It only tracks containers once the node is in DECOMMISSIONING state. * DecommissioningNodesWatcher basically is no cost when no node is - * DECOMMISSIONING. This sacrifices the possibility that the node once - * host containers of an application that is still running - * (the affected map tasks will be rescheduled). + * DECOMMISSIONING. */ public class DecommissioningNodesWatcher { private static final Logger LOG = @@ -88,8 +84,8 @@ class DecommissioningNodeContext { // number of running containers at the moment. private int numActiveContainers; - // All applications run on the node at or after decommissioningStartTime. - private Set appIds; + // All applications run on the node. + private List appIds; // First moment the node is observed in DECOMMISSIONED state. private long decommissionedTime; @@ -102,7 +98,7 @@ class DecommissioningNodeContext { public DecommissioningNodeContext(NodeId nodeId, int timeoutSec) { this.nodeId = nodeId; - this.appIds = new HashSet(); + this.appIds = new ArrayList<>(); this.decommissioningStartTime = mclock.getTime(); this.timeoutMs = 1000L * timeoutSec; } @@ -164,9 +160,7 @@ public synchronized void update(RMNode rmNode, NodeStatus remoteNodeStatus) { context.updateTimeout(rmNode.getDecommissioningTimeout()); context.lastUpdateTime = now; - if (remoteNodeStatus.getKeepAliveApplications() != null) { - context.appIds.addAll(remoteNodeStatus.getKeepAliveApplications()); - } + context.appIds = rmNode.getRunningApps(); // Count number of active containers. int numActiveContainers = 0; @@ -176,14 +170,7 @@ public synchronized void update(RMNode rmNode, NodeStatus remoteNodeStatus) { newState == ContainerState.NEW) { numActiveContainers++; } - context.numActiveContainers = numActiveContainers; - ApplicationId aid = cs.getContainerId() - .getApplicationAttemptId().getApplicationId(); - if (!context.appIds.contains(aid)) { - context.appIds.add(aid); - } } - context.numActiveContainers = numActiveContainers; // maintain lastContainerFinishTime. @@ -254,7 +241,6 @@ public DecommissioningNodeStatus checkDecommissioningStatus(NodeId nodeId) { DecommissioningNodeStatus.TIMEOUT; } - removeCompletedApps(context); if (context.appIds.size() == 0) { return DecommissioningNodeStatus.READY; } else { @@ -336,25 +322,6 @@ private RMNode getRmNode(NodeId nodeId) { return rmNode; } - private void removeCompletedApps(DecommissioningNodeContext context) { - Iterator it = context.appIds.iterator(); - while (it.hasNext()) { - ApplicationId appId = it.next(); - RMApp rmApp = rmContext.getRMApps().get(appId); - if (rmApp == null) { - LOG.debug("Consider non-existing app {} as completed", appId); - it.remove(); - continue; - } - if (rmApp.getState() == RMAppState.FINISHED || - rmApp.getState() == RMAppState.FAILED || - rmApp.getState() == RMAppState.KILLED) { - LOG.debug("Remove {} app {}", rmApp.getState(), appId); - it.remove(); - } - } - } - // Time in second to be decommissioned. private int getTimeoutInSec(DecommissioningNodeContext context) { if (context.nodeState == NodeState.DECOMMISSIONED) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestDecommissioningNodesWatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestDecommissioningNodesWatcher.java index 4371156085565..0695689e1656f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestDecommissioningNodesWatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestDecommissioningNodesWatcher.java @@ -19,11 +19,11 @@ package org.apache.hadoop.yarn.server.resourcemanager; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; @@ -35,7 +35,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.DecommissioningNodesWatcher.DecommissioningNodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent; import org.junit.After; import org.junit.Assert; import org.junit.Test; @@ -58,38 +59,106 @@ public void testDecommissioningNodesWatcher() throws Exception { new DecommissioningNodesWatcher(rm.getRMContext()); MockNM nm1 = rm.registerNode("host1:1234", 10240); - RMNode node1 = rm.getRMContext().getRMNodes().get(nm1.getNodeId()); + RMNodeImpl node1 = + (RMNodeImpl) rm.getRMContext().getRMNodes().get(nm1.getNodeId()); NodeId id1 = nm1.getNodeId(); rm.waitForState(id1, NodeState.RUNNING); - Assert.assertFalse(watcher.checkReadyToBeDecommissioned(id1)); RMApp app = rm.submitApp(2000); MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1); + NodeStatus nodeStatus = createNodeStatus(id1, app, 3); + node1.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus)); + // Setup nm1 as DECOMMISSIONING for DecommissioningNodesWatcher. rm.sendNodeGracefulDecommission(nm1, YarnConfiguration.DEFAULT_RM_NODE_GRACEFUL_DECOMMISSION_TIMEOUT); rm.waitForState(id1, NodeState.DECOMMISSIONING); // Update status with decreasing number of running containers until 0. - watcher.update(node1, createNodeStatus(id1, app, 12)); - watcher.update(node1, createNodeStatus(id1, app, 11)); + nodeStatus = createNodeStatus(id1, app, 3); + node1.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus)); + watcher.update(node1, nodeStatus); + + nodeStatus = createNodeStatus(id1, app, 2); + node1.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus)); + watcher.update(node1, nodeStatus); Assert.assertFalse(watcher.checkReadyToBeDecommissioned(id1)); - watcher.update(node1, createNodeStatus(id1, app, 1)); + nodeStatus = createNodeStatus(id1, app, 1); + node1.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus)); + watcher.update(node1, nodeStatus); Assert.assertEquals(DecommissioningNodeStatus.WAIT_CONTAINER, - watcher.checkDecommissioningStatus(id1)); + watcher.checkDecommissioningStatus(id1)); + + nodeStatus = createNodeStatus(id1, app, 0); + watcher.update(node1, nodeStatus); + node1.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus)); + Assert.assertEquals(DecommissioningNodeStatus.WAIT_APP, + watcher.checkDecommissioningStatus(id1)); + + // Set app to be FINISHED and verified DecommissioningNodeStatus is READY. + MockRM.finishAMAndVerifyAppState(app, rm, nm1, am); + rm.waitForState(app.getApplicationId(), RMAppState.FINISHED); + watcher.update(node1, nodeStatus); + Assert.assertEquals(DecommissioningNodeStatus.READY, + watcher.checkDecommissioningStatus(id1)); + } + + @Test + public void testDecommissioningNodesWatcherWithPreviousRunningApps() + throws Exception { + Configuration conf = new Configuration(); + conf.set(YarnConfiguration.RM_NODE_GRACEFUL_DECOMMISSION_TIMEOUT, "40"); + + rm = new MockRM(conf); + rm.start(); + + DecommissioningNodesWatcher watcher = + new DecommissioningNodesWatcher(rm.getRMContext()); + + MockNM nm1 = rm.registerNode("host1:1234", 10240); + RMNodeImpl node1 = + (RMNodeImpl) rm.getRMContext().getRMNodes().get(nm1.getNodeId()); + NodeId id1 = nm1.getNodeId(); + + rm.waitForState(id1, NodeState.RUNNING); + + RMApp app = rm.submitApp(2000); + MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1); - watcher.update(node1, createNodeStatus(id1, app, 0)); + NodeStatus nodeStatus = createNodeStatus(id1, app, 3); + node1.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus)); + + Assert.assertEquals(1, node1.getRunningApps().size()); + + // update node with 0 running containers + nodeStatus = createNodeStatus(id1, app, 0); + node1.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus)); + + Assert.assertEquals(1, node1.getRunningApps().size()); + + // Setup nm1 as DECOMMISSIONING for DecommissioningNodesWatcher. Right now + // there is no container running on the node. + rm.sendNodeGracefulDecommission(nm1, + YarnConfiguration.DEFAULT_RM_NODE_GRACEFUL_DECOMMISSION_TIMEOUT); + rm.waitForState(id1, NodeState.DECOMMISSIONING); + + // we should still get WAIT_APP as container for a running app previously + // ran on this node. + watcher.update(node1, nodeStatus); + Assert.assertFalse(watcher.checkReadyToBeDecommissioned(id1)); Assert.assertEquals(DecommissioningNodeStatus.WAIT_APP, - watcher.checkDecommissioningStatus(id1)); + watcher.checkDecommissioningStatus(id1)); // Set app to be FINISHED and verified DecommissioningNodeStatus is READY. MockRM.finishAMAndVerifyAppState(app, rm, nm1, am); rm.waitForState(app.getApplicationId(), RMAppState.FINISHED); + Assert.assertEquals(0, node1.getRunningApps().size()); + watcher.update(node1, nodeStatus); Assert.assertEquals(DecommissioningNodeStatus.READY, - watcher.checkDecommissioningStatus(id1)); + watcher.checkDecommissioningStatus(id1)); } @After @@ -103,7 +172,7 @@ private NodeStatus createNodeStatus( NodeId nodeId, RMApp app, int numRunningContainers) { return NodeStatus.newInstance( nodeId, 0, getContainerStatuses(app, numRunningContainers), - new ArrayList(), + Collections.emptyList(), NodeHealthStatus.newInstance( true, "", System.currentTimeMillis() - 1000), null, null, null); @@ -113,8 +182,8 @@ private NodeStatus createNodeStatus( // where numRunningContainers are RUNNING. private List getContainerStatuses( RMApp app, int numRunningContainers) { - // Total 12 containers - final int total = 12; + // Total 3 containers + final int total = 3; numRunningContainers = Math.min(total, numRunningContainers); List output = new ArrayList(); for (int i = 0; i < total; i++) { @@ -122,8 +191,8 @@ private List getContainerStatuses( ContainerState.COMPLETE : ContainerState.RUNNING; output.add(ContainerStatus.newInstance( ContainerId.newContainerId( - ApplicationAttemptId.newInstance(app.getApplicationId(), i), 1), - cstate, "Dummy", 0)); + ApplicationAttemptId.newInstance(app.getApplicationId(), 0), i), + cstate, "", 0)); } return output; } From 22b36dd31139bab1aa75dc9c3f196c6305c7d3c6 Mon Sep 17 00:00:00 2001 From: S O'Donnell Date: Mon, 17 Jun 2019 09:05:27 +0200 Subject: [PATCH 0203/1308] HDDS-1660. Use Picocli for Ozone Manager Closes #925 --- hadoop-ozone/common/src/main/bin/ozone | 2 +- .../runConfigurations/OzoneManager.xml | 4 +- .../runConfigurations/OzoneManagerInit.xml | 4 +- .../hadoop/ozone/MiniOzoneClusterImpl.java | 2 +- .../hadoop/ozone/MiniOzoneHAClusterImpl.java | 2 +- .../hadoop/ozone/TestSecureOzoneCluster.java | 30 +++- .../apache/hadoop/ozone/om/TestOmInit.java | 6 +- .../hadoop/ozone/om/TestOzoneManager.java | 4 +- .../hadoop/ozone/om/OMStarterInterface.java | 33 ++++ .../apache/hadoop/ozone/om/OzoneManager.java | 144 ++-------------- .../hadoop/ozone/om/OzoneManagerStarter.java | 130 +++++++++++++++ .../ozone/om/TestOzoneManagerStarter.java | 154 ++++++++++++++++++ .../hadoop/ozone/genesis/GenesisUtil.java | 2 +- 13 files changed, 366 insertions(+), 151 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone index de8f47f1ae8f5..f6fe147e5076c 100755 --- a/hadoop-ozone/common/src/main/bin/ozone +++ b/hadoop-ozone/common/src/main/bin/ozone @@ -129,7 +129,7 @@ function ozonecmd_case ;; om) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_CLASSNAME=org.apache.hadoop.ozone.om.OzoneManager + HADOOP_CLASSNAME=org.apache.hadoop.ozone.om.OzoneManagerStarter HDFS_OM_OPTS="${HDFS_OM_OPTS} -Dlog4j.configurationFile=${HADOOP_CONF_DIR}/om-audit-log4j2.properties" HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_OM_OPTS}" OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager" diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManager.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManager.xml index 2d4a308798a58..c2aaf1c829b27 100644 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManager.xml +++ b/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManager.xml @@ -16,7 +16,7 @@ --> - - \ No newline at end of file + diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManagerInit.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManagerInit.xml index 7988ff17f0810..70fab5df640f2 100644 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManagerInit.xml +++ b/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManagerInit.xml @@ -16,7 +16,7 @@ --> - - \ No newline at end of file + diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index ee1e34ad7bbce..3c4407ce84a0b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -517,7 +517,7 @@ OzoneManager createOM() configureOM(); OMStorage omStore = new OMStorage(conf); initializeOmStorage(omStore); - return OzoneManager.createOm(null, conf); + return OzoneManager.createOm(conf); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 7818d9ea7e544..816b99ce7f947 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -211,7 +211,7 @@ private Map createOMService() throws IOException, OMStorage omStore = new OMStorage(conf); initializeOmStorage(omStore); - OzoneManager om = OzoneManager.createOm(null, conf); + OzoneManager om = OzoneManager.createOm(conf); om.setCertClient(certClient); omMap.put(nodeId, om); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 498261999783d..247c9d7b4ac84 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -383,7 +383,7 @@ public void testSecureOMInitializationFailure() throws Exception { setupOm(conf); conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY, "non-existent-user@EXAMPLE.com"); - testCommonKerberosFailures(() -> OzoneManager.createOm(null, conf)); + testCommonKerberosFailures(() -> OzoneManager.createOm(conf)); } /** @@ -662,7 +662,7 @@ private void setupOm(OzoneConfiguration config) throws Exception { // writes the version file properties omStore.initialize(); OzoneManager.setTestSecureOmFlag(true); - om = OzoneManager.createOm(null, config); + om = OzoneManager.createOm(config); } @Test @@ -717,6 +717,26 @@ public void testSecureOmReInit() throws Exception { LogCapturer omLogs = LogCapturer.captureLogs(OzoneManager.getLogger()); omLogs.clearOutput(); + + /** + * As all these processes run inside the same JVM, there are issues around + * the Hadoop UGI if different processes run with different principals. + * In this test, the OM has to contact the SCM to download certs. SCM runs + * as scm/host@REALM, but the OM logs in as om/host@REALM, and then the test + * fails, and the OM is unable to contact the SCM due to kerberos login + * issues. To work around that, have the OM run as the same principal as the + * SCM, and then the test passes. + * + * TODO: Need to look into this further to see if there is a better way to + * address this problem. + */ + String realm = miniKdc.getRealm(); + conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY, + "scm/" + host + "@" + realm); + omKeyTab = new File(workDir, "scm.keytab"); + conf.set(OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY, + omKeyTab.getAbsolutePath()); + initSCM(); try { scm = StorageContainerManager.createSCM(conf); @@ -725,7 +745,7 @@ public void testSecureOmReInit() throws Exception { OMStorage omStore = new OMStorage(conf); initializeOmStorage(omStore); OzoneManager.setTestSecureOmFlag(true); - om = OzoneManager.createOm(null, conf); + om = OzoneManager.createOm(conf); assertNull(om.getCertificateClient()); assertFalse(omLogs.getOutput().contains("Init response: GETCERT")); @@ -735,7 +755,7 @@ public void testSecureOmReInit() throws Exception { conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); OzoneManager.omInit(conf); om.stop(); - om = OzoneManager.createOm(null, conf); + om = OzoneManager.createOm(conf); Assert.assertNotNull(om.getCertificateClient()); Assert.assertNotNull(om.getCertificateClient().getPublicKey()); @@ -771,7 +791,7 @@ public void testSecureOmInitSuccess() throws Exception { OMStorage omStore = new OMStorage(conf); initializeOmStorage(omStore); OzoneManager.setTestSecureOmFlag(true); - om = OzoneManager.createOm(null, conf); + om = OzoneManager.createOm(conf); Assert.assertNotNull(om.getCertificateClient()); Assert.assertNotNull(om.getCertificateClient().getPublicKey()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java index b8ea2350ff942..4b736c9436d64 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java @@ -23,6 +23,7 @@ import org.apache.hadoop.ozone.web.handlers.UserArgs; import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -90,10 +91,11 @@ public static void shutdown() { /** * Tests the OM Initialization. - * @throws IOException + * @throws IOException, AuthenticationException */ @Test - public void testOmInitAgain() throws IOException { + public void testOmInitAgain() throws IOException, + AuthenticationException { // Stop the Ozone Manager cluster.getOzoneManager().stop(); // Now try to init the OM again. It should succeed diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java index 1057e7a23ba28..62464ba2ef394 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java @@ -1326,7 +1326,7 @@ public void testOmInitializationFailure() throws Exception { conf.get(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY)); OzoneTestUtils.expectOmException(ResultCodes.OM_NOT_INITIALIZED, () -> { - OzoneManager.createOm(null, config); + OzoneManager.createOm(config); }); OzoneTestUtils @@ -1336,7 +1336,7 @@ public void testOmInitializationFailure() throws Exception { omStore.setScmId("testScmId"); // writes the version file properties omStore.initialize(); - OzoneManager.createOm(null, config); + OzoneManager.createOm(config); }); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java new file mode 100644 index 0000000000000..f632ad143c461 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import java.io.IOException; + +/** + * This interface is used by the OzoneManagerStarter class to allow the + * dependencies to be injected to the CLI class. + */ +public interface OMStarterInterface { + void start(OzoneConfiguration conf) throws IOException, + AuthenticationException; + boolean init(OzoneConfiguration conf) throws IOException, + AuthenticationException; +} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 4041670eaa9ce..287c2ded96055 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -142,12 +142,10 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.KMSUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.utils.RetriableTask; import org.apache.ratis.util.LifeCycle; import org.bouncycastle.pkcs.PKCS10CertificationRequest; @@ -160,7 +158,6 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; -import java.io.PrintStream; import java.net.InetSocketAddress; import java.nio.file.Files; import java.nio.file.StandardCopyOption; @@ -206,7 +203,6 @@ import static org.apache.hadoop.ozone.protocol.proto .OzoneManagerProtocolProtos.OzoneManagerService .newReflectiveBlockingService; -import static org.apache.hadoop.util.ExitUtil.terminate; /** * Ozone Manager is the metadata manager of ozone. @@ -220,10 +216,6 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private static final AuditLogger AUDIT = new AuditLogger( AuditLoggerType.OMLOGGER); - private static final String USAGE = - "Usage: \n ozone om [genericOptions] " + "[ " - + StartupOption.INIT.getName() + " ]\n " + "ozone om [ " - + StartupOption.HELP.getName() + " ]\n"; private static final String OM_DAEMON = "om"; private static boolean securityEnabled = false; private OzoneDelegationTokenSecretManager delegationTokenMgr; @@ -924,109 +916,36 @@ private RPC.Server startRpcServer(OzoneConfiguration conf, return rpcServer; } - /** - * Main entry point for starting OzoneManager. - * - * @param argv arguments - * @throws IOException if startup fails due to I/O error - */ - public static void main(String[] argv) throws IOException { - if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) { - System.exit(0); - } - try { - TracingUtil.initTracing("OzoneManager"); - OzoneConfiguration conf = new OzoneConfiguration(); - GenericOptionsParser hParser = new GenericOptionsParser(conf, argv); - if (!hParser.isParseSuccessful()) { - System.err.println("USAGE: " + USAGE + " \n"); - hParser.printGenericCommandUsage(System.err); - System.exit(1); - } - OzoneManager om = createOm(hParser.getRemainingArgs(), conf, true); - if (om != null) { - om.start(); - om.join(); - } - } catch (Throwable t) { - LOG.error("Failed to start the OzoneManager.", t); - terminate(1, t); - } - } - - private static void printUsage(PrintStream out) { - out.println(USAGE + "\n"); - } - private static boolean isOzoneSecurityEnabled() { return securityEnabled; } /** - * Constructs OM instance based on command line arguments. - * - * This method is intended for unit tests only. It suppresses the - * startup/shutdown message and skips registering Unix signal - * handlers. + * Constructs OM instance based on the configuration. * - * @param argv Command line arguments * @param conf OzoneConfiguration * @return OM instance * @throws IOException, AuthenticationException in case OM instance * creation fails. */ - @VisibleForTesting - public static OzoneManager createOm( - String[] argv, OzoneConfiguration conf) + public static OzoneManager createOm(OzoneConfiguration conf) throws IOException, AuthenticationException { - return createOm(argv, conf, false); + loginOMUserIfSecurityEnabled(conf); + return new OzoneManager(conf); } /** - * Constructs OM instance based on command line arguments. + * Logs in the OM use if security is enabled in the configuration. * - * @param argv Command line arguments * @param conf OzoneConfiguration - * @param printBanner if true then log a verbose startup message. - * @return OM instance - * @throws IOException, AuthenticationException in case OM instance - * creation fails. + * @throws IOException, AuthenticationException in case login failes. */ - private static OzoneManager createOm(String[] argv, - OzoneConfiguration conf, boolean printBanner) + private static void loginOMUserIfSecurityEnabled(OzoneConfiguration conf) throws IOException, AuthenticationException { - StartupOption startOpt = parseArguments(argv); - if (startOpt == null) { - printUsage(System.err); - terminate(1); - return null; - } - securityEnabled = OzoneSecurityUtil.isSecurityEnabled(conf); if (securityEnabled) { loginOMUser(conf); } - - switch (startOpt) { - case INIT: - if (printBanner) { - StringUtils.startupShutdownMessage(OzoneManager.class, argv, LOG); - } - terminate(omInit(conf) ? 0 : 1); - return null; - case HELP: - printUsage(System.err); - terminate(0); - return null; - default: - if (argv == null) { - argv = new String[]{}; - } - if (printBanner) { - StringUtils.startupShutdownMessage(OzoneManager.class, argv, LOG); - } - return new OzoneManager(conf); - } } /** @@ -1038,7 +957,9 @@ private static OzoneManager createOm(String[] argv, * accessible */ @VisibleForTesting - public static boolean omInit(OzoneConfiguration conf) throws IOException { + public static boolean omInit(OzoneConfiguration conf) throws IOException, + AuthenticationException { + loginOMUserIfSecurityEnabled(conf); OMStorage omStorage = new OMStorage(conf); StorageState state = omStorage.getState(); if (state != StorageState.INITIALIZED) { @@ -1135,23 +1056,6 @@ private static ScmInfo getScmInfo(OzoneConfiguration conf) } } - /** - * Parses the command line options for OM initialization. - * - * @param args command line arguments - * @return StartupOption if options are valid, null otherwise - */ - private static StartupOption parseArguments(String[] args) { - if (args == null || args.length == 0) { - return StartupOption.REGULAR; - } else { - if (args.length == 1) { - return StartupOption.parse(args[0]); - } - } - return null; - } - /** * Builds a message for logging startup information about an RPC server. * @@ -3144,34 +3048,6 @@ public List getAcl(OzoneObj obj) throws IOException { } } - /** - * Startup options. - */ - public enum StartupOption { - INIT("--init"), - HELP("--help"), - REGULAR("--regular"); - - private final String name; - - StartupOption(String arg) { - this.name = arg; - } - - public static StartupOption parse(String value) { - for (StartupOption option : StartupOption.values()) { - if (option.name.equalsIgnoreCase(value)) { - return option; - } - } - return null; - } - - public String getName() { - return name; - } - } - public static Logger getLogger() { return LOG; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java new file mode 100644 index 0000000000000..8a0c317858756 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java @@ -0,0 +1,130 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.util.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine; +import picocli.CommandLine.Command; + +import java.io.IOException; + +/** + * This class provides a command line interface to start the OM + * using Picocli. + */ +@Command(name = "ozone om", + hidden = true, description = "Start or initialize the Ozone Manager.", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true) +public class OzoneManagerStarter extends GenericCli { + + private OzoneConfiguration conf; + private OMStarterInterface receiver; + private static final Logger LOG = + LoggerFactory.getLogger(OzoneManagerStarter.class); + + public static void main(String[] args) throws Exception { + TracingUtil.initTracing("OzoneManager"); + new OzoneManagerStarter( + new OzoneManagerStarter.OMStarterHelper()).run(args); + } + + public OzoneManagerStarter(OMStarterInterface receiverObj) { + super(); + receiver = receiverObj; + } + + @Override + public Void call() throws Exception { + /** + * This method is invoked only when a sub-command is not called. Therefore + * if someone runs "ozone om" with no parameters, this is the methood + * which runs and starts the OM. + */ + commonInit(); + startOm(); + return null; + } + + /** + * This function is used by the command line to start the OM. + */ + private void startOm() throws Exception { + receiver.start(conf); + } + + /** + * This function implements a sub-command to allow the OM to be + * initialized from the command line. + */ + @CommandLine.Command(name = "--init", + customSynopsis = "ozone om [global options] --init", + hidden = false, + description = "Initialize the Ozone Manager if not already initialized", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) + public void initOm() + throws Exception { + commonInit(); + boolean result = receiver.init(conf); + if (!result) { + throw new IOException("OM Init failed."); + } + } + + /** + * This function should be called by each command to ensure the configuration + * is set and print the startup banner message. + */ + private void commonInit() { + conf = createOzoneConfiguration(); + + String[] originalArgs = getCmd().getParseResult().originalArgs() + .toArray(new String[0]); + StringUtils.startupShutdownMessage(OzoneManager.class, + originalArgs, LOG); + } + + /** + * This static class wraps the external dependencies needed for this command + * to execute its tasks. This allows the dependency to be injected for unit + * testing. + */ + static class OMStarterHelper implements OMStarterInterface{ + + public void start(OzoneConfiguration conf) throws IOException, + AuthenticationException { + OzoneManager om = OzoneManager.createOm(conf); + om.start(); + om.join(); + } + + public boolean init(OzoneConfiguration conf) throws IOException, + AuthenticationException { + return OzoneManager.omInit(conf); + } + } + +} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java new file mode 100644 index 0000000000000..80281693c1b62 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import static org.junit.Assert.*; + +/** + * This class is used to test the CLI provided by OzoneManagerStarter, which is + * used to start and init the OzoneManager. The calls to the Ozone Manager are + * mocked so the tests only validate the CLI calls the correct methods are + * invoked. + */ +public class TestOzoneManagerStarter { + + private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); + private final PrintStream originalOut = System.out; + private final PrintStream originalErr = System.err; + + private MockOMStarter mock; + + @Before + public void setUpStreams() { + System.setOut(new PrintStream(outContent)); + System.setErr(new PrintStream(errContent)); + mock = new MockOMStarter(); + } + + @After + public void restoreStreams() { + System.setOut(originalOut); + System.setErr(originalErr); + } + + @Test + public void testCallsStartWhenServerStarted() throws Exception { + executeCommand(); + assertTrue(mock.startCalled); + } + + @Test + public void testExceptionThrownWhenStartFails() throws Exception { + mock.throwOnStart = true; + try { + executeCommand(); + fail("Exception should have been thrown"); + } catch (Exception e) { + assertTrue(true); + } + } + + @Test + public void testStartNotCalledWithInvalidParam() throws Exception { + executeCommand("--invalid"); + assertFalse(mock.startCalled); + } + + @Test + public void testPassingInitSwitchCallsInit() { + executeCommand("--init"); + assertTrue(mock.initCalled); + } + + @Test + public void testInitSwitchWithInvalidParamDoesNotRun() { + executeCommand("--init", "--invalid"); + assertFalse(mock.initCalled); + } + + @Test + public void testUnSuccessfulInitThrowsException() { + mock.throwOnInit = true; + try { + executeCommand("--init"); + fail("Exception show have been thrown"); + } catch (Exception e) { + assertTrue(true); + } + } + + @Test + public void testInitThatReturnsFalseThrowsException() { + mock.initStatus = false; + try { + executeCommand("--init"); + fail("Exception show have been thrown"); + } catch (Exception e) { + assertTrue(true); + } + } + + @Test + public void testUsagePrintedOnInvalidInput() { + executeCommand("--invalid"); + Pattern p = Pattern.compile("^Unknown option:.*--invalid.*\nUsage"); + Matcher m = p.matcher(errContent.toString()); + assertTrue(m.find()); + } + + private void executeCommand(String... args) { + new OzoneManagerStarter(mock).execute(args); + } + + static class MockOMStarter implements OMStarterInterface { + + private boolean startCalled = false; + private boolean initCalled = false; + private boolean initStatus = true; + private boolean throwOnStart = false; + private boolean throwOnInit = false; + + public void start(OzoneConfiguration conf) throws IOException, + AuthenticationException { + startCalled = true; + if (throwOnStart) { + throw new IOException("Simulated Exception"); + } + } + + public boolean init(OzoneConfiguration conf) throws IOException, + AuthenticationException { + initCalled = true; + if (throwOnInit) { + throw new IOException("Simulated Exception"); + } + return initStatus; + } + } +} \ No newline at end of file diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java index 6bff82b5755dc..d08d26ad28938 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java @@ -186,7 +186,7 @@ static OzoneManager getOm(OzoneConfiguration conf) omStorage.setOmId(UUID.randomUUID().toString()); omStorage.initialize(); } - return OzoneManager.createOm(null, conf); + return OzoneManager.createOm(conf); } static void configureOM(Configuration conf, int numHandlers) { From 3d020e914fa65b96a208a2c4bb3dc2f35f80e21d Mon Sep 17 00:00:00 2001 From: Xudong Cao Date: Tue, 18 Jun 2019 02:09:05 +0800 Subject: [PATCH 0204/1308] HDDS-1532. Improve the concurrent testing framework of Freon. (#957) --- .../ozone/freon/RandomKeyGenerator.java | 220 ++++++++++-------- .../ozone/freon/TestRandomKeyGenerator.java | 15 ++ 2 files changed, 144 insertions(+), 91 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java index e6888b9d40be0..6e1e02ccdbe07 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java @@ -64,7 +64,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; import com.google.common.annotations.VisibleForTesting; -import static java.lang.Math.min; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.time.DurationFormatUtils; import org.slf4j.Logger; @@ -102,6 +101,8 @@ enum FreonOps { private static final int QUANTILES = 10; + private static final int CHECK_INTERVAL_MILLIS = 5000; + private byte[] keyValueBuffer = null; private static final String DIGEST_ALGORITHM = "MD5"; @@ -180,7 +181,7 @@ enum FreonOps { private OzoneClient ozoneClient; private ObjectStore objectStore; - private ExecutorService processor; + private ExecutorService executor; private long startTime; private long jobStartTime; @@ -259,9 +260,8 @@ public Void call() throws Exception { } LOG.info("Number of Threads: " + numOfThreads); - threadPoolSize = - min(numOfVolumes, numOfThreads); - processor = Executors.newFixedThreadPool(threadPoolSize); + threadPoolSize = numOfThreads; + executor = Executors.newFixedThreadPool(threadPoolSize); addShutdownHook(); LOG.info("Number of Volumes: {}.", numOfVolumes); @@ -270,9 +270,8 @@ public Void call() throws Exception { LOG.info("Key size: {} bytes", keySize); LOG.info("Buffer size: {} bytes", bufferSize); for (int i = 0; i < numOfVolumes; i++) { - String volume = "vol-" + i + "-" + - RandomStringUtils.randomNumeric(5); - processor.submit(new OfflineProcessor(volume)); + String volumeName = "vol-" + i + "-" + RandomStringUtils.randomNumeric(5); + executor.submit(new VolumeProcessor(volumeName)); } Thread validator = null; @@ -301,8 +300,17 @@ public Void call() throws Exception { progressbar.start(); - processor.shutdown(); - processor.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS); + // wait until all keys are added or exception occurred. + while ((numberOfKeysAdded.get() != numOfVolumes * numOfBuckets * numOfKeys) + && exception == null) { + try { + Thread.sleep(CHECK_INTERVAL_MILLIS); + } catch (InterruptedException e) { + throw e; + } + } + executor.shutdown(); + executor.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS); completed = true; if (exception != null) { @@ -571,15 +579,10 @@ private static class KeyValidate { } } - private class OfflineProcessor implements Runnable { - - private int totalBuckets; - private int totalKeys; + private class VolumeProcessor implements Runnable { private String volumeName; - OfflineProcessor(String volumeName) { - this.totalBuckets = numOfBuckets; - this.totalKeys = numOfKeys; + VolumeProcessor(String volumeName) { this.volumeName = volumeName; } @@ -604,88 +607,118 @@ public void run() { return; } - Long threadKeyWriteTime = 0L; - for (int j = 0; j < totalBuckets; j++) { - String bucketName = "bucket-" + j + "-" + + for (int i = 0; i < numOfBuckets; i++) { + String bucketName = "bucket-" + i + "-" + RandomStringUtils.randomNumeric(5); - try { - LOG.trace("Creating bucket: {} in volume: {}", + BucketProcessor bp = new BucketProcessor(volume, bucketName); + executor.submit(bp); + } + } + } + + private class BucketProcessor implements Runnable { + private OzoneVolume volume; + private String bucketName; + + BucketProcessor(OzoneVolume volume, String bucketName) { + this.volume = volume; + this.bucketName = bucketName; + } + + @Override + @SuppressFBWarnings("REC_CATCH_EXCEPTION") + public void run() { + LOG.trace("Creating bucket: {} in volume: {}", bucketName, volume.getName()); - start = System.nanoTime(); - try (Scope scope = GlobalTracer.get().buildSpan("createBucket") - .startActive(true)) { - volume.createBucket(bucketName); - long bucketCreationDuration = System.nanoTime() - start; - histograms.get(FreonOps.BUCKET_CREATE.ordinal()) - .update(bucketCreationDuration); - bucketCreationTime.getAndAdd(bucketCreationDuration); - numberOfBucketsCreated.getAndIncrement(); - } - OzoneBucket bucket = volume.getBucket(bucketName); - for (int k = 0; k < totalKeys; k++) { - String key = "key-" + k + "-" + - RandomStringUtils.randomNumeric(5); - byte[] randomValue = - DFSUtil.string2Bytes(UUID.randomUUID().toString()); - try { - LOG.trace("Adding key: {} in bucket: {} of volume: {}", - key, bucket, volume); - long keyCreateStart = System.nanoTime(); - try (Scope scope = GlobalTracer.get().buildSpan("createKey") - .startActive(true)) { - OzoneOutputStream os = - bucket - .createKey(key, keySize, type, factor, new HashMap<>()); - long keyCreationDuration = System.nanoTime() - keyCreateStart; - histograms.get(FreonOps.KEY_CREATE.ordinal()) + long start = System.nanoTime(); + OzoneBucket bucket; + try (Scope scope = GlobalTracer.get().buildSpan("createBucket") + .startActive(true)) { + volume.createBucket(bucketName); + long bucketCreationDuration = System.nanoTime() - start; + histograms.get(FreonOps.BUCKET_CREATE.ordinal()) + .update(bucketCreationDuration); + bucketCreationTime.getAndAdd(bucketCreationDuration); + numberOfBucketsCreated.getAndIncrement(); + + bucket = volume.getBucket(bucketName); + } catch (IOException e) { + exception = e; + LOG.error("Could not create bucket ", e); + return; + } + + for (int i = 0; i < numOfKeys; i++) { + String keyName = "key-" + i + "-" + RandomStringUtils.randomNumeric(5); + KeyProcessor kp = new KeyProcessor(bucket, keyName); + executor.submit(kp); + } + } + } + + private class KeyProcessor implements Runnable { + private OzoneBucket bucket; + private String keyName; + + KeyProcessor(OzoneBucket bucket, String keyName) { + this.bucket = bucket; + this.keyName = keyName; + } + + @Override + @SuppressFBWarnings("REC_CATCH_EXCEPTION") + public void run() { + String bucketName = bucket.getName(); + String volumeName = bucket.getVolumeName(); + LOG.trace("Adding key: {} in bucket: {} of volume: {}", + keyName, bucketName, volumeName); + byte[] randomValue = DFSUtil.string2Bytes(UUID.randomUUID().toString()); + try { + long keyCreateStart = System.nanoTime(); + try (Scope scope = GlobalTracer.get().buildSpan("createKey") + .startActive(true)) { + OzoneOutputStream os = bucket.createKey(keyName, keySize, type, + factor, new HashMap<>()); + long keyCreationDuration = System.nanoTime() - keyCreateStart; + histograms.get(FreonOps.KEY_CREATE.ordinal()) .update(keyCreationDuration); - keyCreationTime.getAndAdd(keyCreationDuration); - long keyWriteStart = System.nanoTime(); - try (Scope writeScope = GlobalTracer.get() - .buildSpan("writeKeyData") - .startActive(true)) { - for (long nrRemaining = keySize - randomValue.length; - nrRemaining > 0; nrRemaining -= bufferSize) { - int curSize = (int)Math.min(bufferSize, nrRemaining); - os.write(keyValueBuffer, 0, curSize); - } - os.write(randomValue); - os.close(); - } - - long keyWriteDuration = System.nanoTime() - keyWriteStart; - - threadKeyWriteTime += keyWriteDuration; - histograms.get(FreonOps.KEY_WRITE.ordinal()) - .update(keyWriteDuration); - totalBytesWritten.getAndAdd(keySize); - numberOfKeysAdded.getAndIncrement(); - } - if (validateWrites) { - MessageDigest tmpMD = (MessageDigest)commonInitialMD.clone(); - tmpMD.update(randomValue); - boolean validate = validationQueue.offer( - new KeyValidate(bucket, key, tmpMD.digest())); - if (validate) { - LOG.trace("Key {}, is queued for validation.", key); - } - } - } catch (Exception e) { - exception = e; - LOG.error("Exception while adding key: {} in bucket: {}" + - " of volume: {}.", key, bucket, volume, e); + keyCreationTime.getAndAdd(keyCreationDuration); + + long keyWriteStart = System.nanoTime(); + try (Scope writeScope = GlobalTracer.get().buildSpan("writeKeyData") + .startActive(true)) { + for (long nrRemaining = keySize - randomValue.length; + nrRemaining > 0; nrRemaining -= bufferSize) { + int curSize = (int)Math.min(bufferSize, nrRemaining); + os.write(keyValueBuffer, 0, curSize); } + os.write(randomValue); + os.close(); + + long keyWriteDuration = System.nanoTime() - keyWriteStart; + histograms.get(FreonOps.KEY_WRITE.ordinal()) + .update(keyWriteDuration); + keyWriteTime.getAndAdd(keyWriteDuration); + totalBytesWritten.getAndAdd(keySize); + numberOfKeysAdded.getAndIncrement(); } - } catch (Exception e) { - exception = e; - LOG.error("Exception while creating bucket: {}" + - " in volume: {}.", bucketName, volume, e); } - } - keyWriteTime.getAndAdd(threadKeyWriteTime); + if (validateWrites) { + MessageDigest tmpMD = (MessageDigest)commonInitialMD.clone(); + tmpMD.update(randomValue); + boolean validate = validationQueue.offer( + new KeyValidate(bucket, keyName, tmpMD.digest())); + if (validate) { + LOG.trace("Key {}, is queued for validation.", keyName); + } + } + } catch (Exception e) { + exception = e; + LOG.error("Exception while adding key: {} in bucket: {}" + + " of volume: {}.", keyName, bucketName, volumeName, e); + } } - } private final class FreonJobInfo { @@ -1028,4 +1061,9 @@ public void setFactor(ReplicationFactor factor) { public void setValidateWrites(boolean validateWrites) { this.validateWrites = validateWrites; } + + @VisibleForTesting + public int getThreadPoolSize() { + return threadPoolSize; + } } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java index c0873d2df6149..748972eafe592 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java @@ -127,4 +127,19 @@ public void bigFileThan2GB() throws Exception { Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); Assert.assertEquals(1, randomKeyGenerator.getSuccessfulValidationCount()); } + + @Test + public void testThreadPoolSize() throws Exception { + RandomKeyGenerator randomKeyGenerator = + new RandomKeyGenerator((OzoneConfiguration) cluster.getConf()); + randomKeyGenerator.setNumOfVolumes(1); + randomKeyGenerator.setNumOfBuckets(1); + randomKeyGenerator.setNumOfKeys(1); + randomKeyGenerator.setFactor(ReplicationFactor.THREE); + randomKeyGenerator.setType(ReplicationType.RATIS); + randomKeyGenerator.setNumOfThreads(10); + randomKeyGenerator.call(); + Assert.assertEquals(10, randomKeyGenerator.getThreadPoolSize()); + Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); + } } From f9a7b442fdd7855e3c7b28e19a12580df48d92bf Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 17 Jun 2019 15:16:41 -0700 Subject: [PATCH 0205/1308] HDFS-14465. When the Block expected replications is larger than the number of DataNodes, entering maintenance will never exit. Contributed by Yicong Cai. --- .../server/blockmanagement/DatanodeAdminManager.java | 11 +++++++++-- .../org/apache/hadoop/hdfs/TestMaintenanceState.java | 1 + 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java index f2ae4dfbf7406..6327b4f25466f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java @@ -345,7 +345,9 @@ private void setInMaintenance(DatanodeDescriptor dn) { * @return true if sufficient, else false. */ private boolean isSufficient(BlockInfo block, BlockCollection bc, - NumberReplicas numberReplicas, boolean isDecommission) { + NumberReplicas numberReplicas, + boolean isDecommission, + boolean isMaintenance) { if (blockManager.hasEnoughEffectiveReplicas(block, numberReplicas, 0)) { // Block has enough replica, skip LOG.trace("Block {} does not need replication.", block); @@ -379,6 +381,10 @@ private boolean isSufficient(BlockInfo block, BlockCollection bc, } } } + if (isMaintenance + && numLive >= blockManager.getMinReplicationToBeInMaintenance()) { + return true; + } return false; } @@ -705,6 +711,7 @@ private void processBlocksInternal( // Schedule low redundancy blocks for reconstruction // if not already pending. boolean isDecommission = datanode.isDecommissionInProgress(); + boolean isMaintenance = datanode.isEnteringMaintenance(); boolean neededReconstruction = isDecommission ? blockManager.isNeededReconstruction(block, num) : blockManager.isNeededReconstructionForMaintenance(block, num); @@ -723,7 +730,7 @@ private void processBlocksInternal( // Even if the block is without sufficient redundancy, // it might not block decommission/maintenance if it // has sufficient redundancy. - if (isSufficient(block, bc, num, isDecommission)) { + if (isSufficient(block, bc, num, isDecommission, isMaintenance)) { if (pruneReliableBlocks) { it.remove(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java index 092b5af2dd128..fcff0e2fa49b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java @@ -373,6 +373,7 @@ public void testExpectedReplications() throws IOException { testExpectedReplication(2); testExpectedReplication(3); testExpectedReplication(4); + testExpectedReplication(10); } private void testExpectedReplication(int replicationFactor) From a50c35bb81105936dc0129b81f913e7307e306fc Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 17 Jun 2019 16:18:37 -0700 Subject: [PATCH 0206/1308] Revert "HDFS-12914. Addendum patch. Block report leases cause missing blocks until next report. Contributed by Santosh Marella, He Xiaoqiao." This reverts commit cdc5de6448e429d6cb523b8a61bed8b1cb2fc263. --- .../blockmanagement/TestBlockReportLease.java | 156 ------------------ 1 file changed, 156 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java deleted file mode 100644 index 3e60aa6278224..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java +++ /dev/null @@ -1,156 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.server.blockmanagement; - -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; -import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; -import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; -import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; -import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; -import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; -import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; -import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; -import org.apache.hadoop.hdfs.server.protocol.StorageReport; -import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.spy; - -/** - * Tests that BlockReportLease in BlockManager. - */ -public class TestBlockReportLease { - - /** - * Test check lease about one BlockReport with many StorageBlockReport. - * Before HDFS-12914, when batch storage report to NameNode, it will check - * less for one storage by one, So it could part storage report can - * be process normally, however, the rest storage report can not be process - * since check lease failed. - * After HDFS-12914, NameNode check lease once for every blockreport request, - * So this issue will not exist anymore. - */ - @Test - public void testCheckBlockReportLease() throws Exception { - HdfsConfiguration conf = new HdfsConfiguration(); - Random rand = new Random(); - - try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(1).build()) { - cluster.waitActive(); - - FSNamesystem fsn = cluster.getNamesystem(); - BlockManager blockManager = fsn.getBlockManager(); - BlockManager spyBlockManager = spy(blockManager); - fsn.setBlockManagerForTesting(spyBlockManager); - String poolId = cluster.getNamesystem().getBlockPoolId(); - - NamenodeProtocols rpcServer = cluster.getNameNodeRpc(); - - // Test based on one DataNode report to Namenode - DataNode dn = cluster.getDataNodes().get(0); - DatanodeDescriptor datanodeDescriptor = spyBlockManager - .getDatanodeManager().getDatanode(dn.getDatanodeId()); - - DatanodeRegistration dnRegistration = dn.getDNRegistrationForBP(poolId); - StorageReport[] storages = dn.getFSDataset().getStorageReports(poolId); - - // Send heartbeat and request full block report lease - HeartbeatResponse hbResponse = rpcServer.sendHeartbeat( - dnRegistration, storages, 0, 0, 0, 0, 0, null, true, null, null); - - DelayAnswer delayer = new DelayAnswer(BlockManager.LOG); - doAnswer(delayer).when(spyBlockManager).processReport( - any(DatanodeStorageInfo.class), - any(BlockListAsLongs.class), - any(BlockReportContext.class)); - - ExecutorService pool = Executors.newFixedThreadPool(1); - - // Trigger sendBlockReport - BlockReportContext brContext = new BlockReportContext(1, 0, - rand.nextLong(), hbResponse.getFullBlockReportLeaseId(), true); - Future sendBRfuturea = pool.submit(() -> { - // Build every storage with 100 blocks for sending report - DatanodeStorage[] datanodeStorages - = new DatanodeStorage[storages.length]; - for (int i = 0; i < storages.length; i++) { - datanodeStorages[i] = storages[i].getStorage(); - } - StorageBlockReport[] reports = createReports(datanodeStorages, 100); - - // Send blockReport - return rpcServer.blockReport(dnRegistration, poolId, reports, - brContext); - }); - - // Wait until BlockManager calls processReport - delayer.waitForCall(); - - // Remove full block report lease about dn - spyBlockManager.getBlockReportLeaseManager() - .removeLease(datanodeDescriptor); - - // Allow blockreport to proceed - delayer.proceed(); - - // Get result, it will not null if process successfully - DatanodeCommand datanodeCommand = sendBRfuturea.get(); - assertTrue(datanodeCommand instanceof FinalizeCommand); - assertEquals(poolId, ((FinalizeCommand)datanodeCommand) - .getBlockPoolId()); - } - } - - private StorageBlockReport[] createReports(DatanodeStorage[] dnStorages, - int numBlocks) { - int longsPerBlock = 3; - int blockListSize = 2 + numBlocks * longsPerBlock; - int numStorages = dnStorages.length; - StorageBlockReport[] storageBlockReports - = new StorageBlockReport[numStorages]; - for (int i = 0; i < numStorages; i++) { - List longs = new ArrayList(blockListSize); - longs.add(Long.valueOf(numBlocks)); - longs.add(0L); - for (int j = 0; j < blockListSize; ++j) { - longs.add(Long.valueOf(j)); - } - BlockListAsLongs blockList = BlockListAsLongs.decodeLongs(longs); - storageBlockReports[i] = new StorageBlockReport(dnStorages[i], blockList); - } - return storageBlockReports; - } -} From 7314185c4a313842115e18b5f42d118392cee929 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 17 Jun 2019 16:18:48 -0700 Subject: [PATCH 0207/1308] Revert "HDFS-12914. Block report leases cause missing blocks until next report. Contributed by Santosh Marella, He Xiaoqiao." This reverts commit ae4143a529d74d94f205ca627c31360abfa11bfa. --- .../server/blockmanagement/BlockManager.java | 21 ++++-------- .../server/namenode/NameNodeRpcServer.java | 34 ++++++++----------- 2 files changed, 21 insertions(+), 34 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 8b9788a6fc236..2947b72730524 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2572,21 +2572,6 @@ private static class BlockInfoToAdd { } } - /** - * Check block report lease. - * @return true if lease exist and not expire - */ - public boolean checkBlockReportLease(BlockReportContext context, - final DatanodeID nodeID) throws UnregisteredNodeException { - if (context == null) { - return true; - } - DatanodeDescriptor node = datanodeManager.getDatanode(nodeID); - final long startTime = Time.monotonicNow(); - return blockReportLeaseManager.checkLease(node, startTime, - context.getLeaseId()); - } - /** * The given storage is reporting all its blocks. * Update the (storage{@literal -->}block list) and @@ -2634,6 +2619,12 @@ public boolean processReport(final DatanodeID nodeID, blockReportLeaseManager.removeLease(node); return !node.hasStaleStorages(); } + if (context != null) { + if (!blockReportLeaseManager.checkLease(node, startTime, + context.getLeaseId())) { + return false; + } + } if (storageInfo.getBlockReportCount() == 0) { // The first block report can be processed a lot more efficiently than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 31a5eb0b41ab1..7a2a81cdf3c9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -45,6 +45,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.Callable; import com.google.common.collect.Lists; @@ -174,7 +175,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports; @@ -1591,25 +1591,21 @@ public DatanodeCommand blockReport(final DatanodeRegistration nodeReg, } final BlockManager bm = namesystem.getBlockManager(); boolean noStaleStorages = false; - try { - if (bm.checkBlockReportLease(context, nodeReg)) { - for (int r = 0; r < reports.length; r++) { - final BlockListAsLongs blocks = reports[r].getBlocks(); - // - // BlockManager.processReport accumulates information of prior calls - // for the same node and storage, so the value returned by the last - // call of this loop is the final updated value for noStaleStorage. - // - final int index = r; - noStaleStorages = bm.runBlockOp(() -> - bm.processReport(nodeReg, reports[index].getStorage(), - blocks, context)); + for (int r = 0; r < reports.length; r++) { + final BlockListAsLongs blocks = reports[r].getBlocks(); + // + // BlockManager.processReport accumulates information of prior calls + // for the same node and storage, so the value returned by the last + // call of this loop is the final updated value for noStaleStorage. + // + final int index = r; + noStaleStorages = bm.runBlockOp(new Callable() { + @Override + public Boolean call() throws IOException { + return bm.processReport(nodeReg, reports[index].getStorage(), + blocks, context); } - } - } catch (UnregisteredNodeException une) { - LOG.debug("Datanode {} is attempting to report but not register yet.", - nodeReg); - return RegisterCommand.REGISTER; + }); } bm.removeBRLeaseIfNeeded(nodeReg, context); From 6822193ee6d6ac8b08822fa76c89e1dd61c5ddca Mon Sep 17 00:00:00 2001 From: Santosh Marella Date: Fri, 14 Jun 2019 10:35:33 -0700 Subject: [PATCH 0208/1308] HDFS-12914. Block report leases cause missing blocks until next report. Contributed by Santosh Marella, He Xiaoqiao. Signed-off-by: Wei-Chiu Chuang Co-authored-by: He Xiaoqiao --- .../server/blockmanagement/BlockManager.java | 21 ++- .../server/namenode/NameNodeRpcServer.java | 34 ++-- .../blockmanagement/TestBlockReportLease.java | 159 ++++++++++++++++++ 3 files changed, 193 insertions(+), 21 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 2947b72730524..8b9788a6fc236 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2572,6 +2572,21 @@ private static class BlockInfoToAdd { } } + /** + * Check block report lease. + * @return true if lease exist and not expire + */ + public boolean checkBlockReportLease(BlockReportContext context, + final DatanodeID nodeID) throws UnregisteredNodeException { + if (context == null) { + return true; + } + DatanodeDescriptor node = datanodeManager.getDatanode(nodeID); + final long startTime = Time.monotonicNow(); + return blockReportLeaseManager.checkLease(node, startTime, + context.getLeaseId()); + } + /** * The given storage is reporting all its blocks. * Update the (storage{@literal -->}block list) and @@ -2619,12 +2634,6 @@ public boolean processReport(final DatanodeID nodeID, blockReportLeaseManager.removeLease(node); return !node.hasStaleStorages(); } - if (context != null) { - if (!blockReportLeaseManager.checkLease(node, startTime, - context.getLeaseId())) { - return false; - } - } if (storageInfo.getBlockReportCount() == 0) { // The first block report can be processed a lot more efficiently than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 7a2a81cdf3c9e..31a5eb0b41ab1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -45,7 +45,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.Callable; import com.google.common.collect.Lists; @@ -175,6 +174,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports; @@ -1591,21 +1591,25 @@ public DatanodeCommand blockReport(final DatanodeRegistration nodeReg, } final BlockManager bm = namesystem.getBlockManager(); boolean noStaleStorages = false; - for (int r = 0; r < reports.length; r++) { - final BlockListAsLongs blocks = reports[r].getBlocks(); - // - // BlockManager.processReport accumulates information of prior calls - // for the same node and storage, so the value returned by the last - // call of this loop is the final updated value for noStaleStorage. - // - final int index = r; - noStaleStorages = bm.runBlockOp(new Callable() { - @Override - public Boolean call() throws IOException { - return bm.processReport(nodeReg, reports[index].getStorage(), - blocks, context); + try { + if (bm.checkBlockReportLease(context, nodeReg)) { + for (int r = 0; r < reports.length; r++) { + final BlockListAsLongs blocks = reports[r].getBlocks(); + // + // BlockManager.processReport accumulates information of prior calls + // for the same node and storage, so the value returned by the last + // call of this loop is the final updated value for noStaleStorage. + // + final int index = r; + noStaleStorages = bm.runBlockOp(() -> + bm.processReport(nodeReg, reports[index].getStorage(), + blocks, context)); } - }); + } + } catch (UnregisteredNodeException une) { + LOG.debug("Datanode {} is attempting to report but not register yet.", + nodeReg); + return RegisterCommand.REGISTER; } bm.removeBRLeaseIfNeeded(nodeReg, context); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java new file mode 100644 index 0000000000000..40408b1924413 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.blockmanagement; + +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; +import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; +import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; +import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports; +import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; + +/** + * Tests that BlockReportLease in BlockManager. + */ +public class TestBlockReportLease { + + /** + * Test check lease about one BlockReport with many StorageBlockReport. + * Before HDFS-12914, when batch storage report to NameNode, it will check + * less for one storage by one, So it could part storage report can + * be process normally, however, the rest storage report can not be process + * since check lease failed. + * After HDFS-12914, NameNode check lease once for every blockreport request, + * So this issue will not exist anymore. + */ + @Test + public void testCheckBlockReportLease() throws Exception { + HdfsConfiguration conf = new HdfsConfiguration(); + Random rand = new Random(); + + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(1).build()) { + cluster.waitActive(); + + FSNamesystem fsn = cluster.getNamesystem(); + BlockManager blockManager = fsn.getBlockManager(); + BlockManager spyBlockManager = spy(blockManager); + fsn.setBlockManagerForTesting(spyBlockManager); + String poolId = cluster.getNamesystem().getBlockPoolId(); + + NamenodeProtocols rpcServer = cluster.getNameNodeRpc(); + + // Test based on one DataNode report to Namenode + DataNode dn = cluster.getDataNodes().get(0); + DatanodeDescriptor datanodeDescriptor = spyBlockManager + .getDatanodeManager().getDatanode(dn.getDatanodeId()); + + DatanodeRegistration dnRegistration = dn.getDNRegistrationForBP(poolId); + StorageReport[] storages = dn.getFSDataset().getStorageReports(poolId); + + // Send heartbeat and request full block report lease + HeartbeatResponse hbResponse = rpcServer.sendHeartbeat( + dnRegistration, storages, 0, 0, 0, 0, 0, null, true, + SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT); + + DelayAnswer delayer = new DelayAnswer(BlockManager.LOG); + doAnswer(delayer).when(spyBlockManager).processReport( + any(DatanodeStorageInfo.class), + any(BlockListAsLongs.class), + any(BlockReportContext.class)); + + ExecutorService pool = Executors.newFixedThreadPool(1); + + // Trigger sendBlockReport + BlockReportContext brContext = new BlockReportContext(1, 0, + rand.nextLong(), hbResponse.getFullBlockReportLeaseId(), true); + Future sendBRfuturea = pool.submit(() -> { + // Build every storage with 100 blocks for sending report + DatanodeStorage[] datanodeStorages + = new DatanodeStorage[storages.length]; + for (int i = 0; i < storages.length; i++) { + datanodeStorages[i] = storages[i].getStorage(); + } + StorageBlockReport[] reports = createReports(datanodeStorages, 100); + + // Send blockReport + return rpcServer.blockReport(dnRegistration, poolId, reports, + brContext); + }); + + // Wait until BlockManager calls processReport + delayer.waitForCall(); + + // Remove full block report lease about dn + spyBlockManager.getBlockReportLeaseManager() + .removeLease(datanodeDescriptor); + + // Allow blockreport to proceed + delayer.proceed(); + + // Get result, it will not null if process successfully + DatanodeCommand datanodeCommand = sendBRfuturea.get(); + assertTrue(datanodeCommand instanceof FinalizeCommand); + assertEquals(poolId, ((FinalizeCommand)datanodeCommand) + .getBlockPoolId()); + } + } + + private StorageBlockReport[] createReports(DatanodeStorage[] dnStorages, + int numBlocks) { + int longsPerBlock = 3; + int blockListSize = 2 + numBlocks * longsPerBlock; + int numStorages = dnStorages.length; + StorageBlockReport[] storageBlockReports + = new StorageBlockReport[numStorages]; + for (int i = 0; i < numStorages; i++) { + List longs = new ArrayList(blockListSize); + longs.add(Long.valueOf(numBlocks)); + longs.add(0L); + for (int j = 0; j < blockListSize; ++j) { + longs.add(Long.valueOf(j)); + } + BlockListAsLongs blockList = BlockListAsLongs.decodeLongs(longs); + storageBlockReports[i] = new StorageBlockReport(dnStorages[i], blockList); + } + return storageBlockReports; + } +} From a95e87d827381a7f87c01f0befc2475f9478e63b Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Mon, 17 Jun 2019 17:02:39 -0700 Subject: [PATCH 0209/1308] HDFS-11950. Disable libhdfs zerocopy test on Mac. Contributed by Akira Ajisaka. Signed-off-by: Wei-Chiu Chuang --- .../src/main/native/libhdfs/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt index 3a5749848b39b..a7fb311125110 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt @@ -62,7 +62,7 @@ build_libhdfs_test(test_libhdfs_threaded hdfs_static expect.c test_libhdfs_threa link_libhdfs_test(test_libhdfs_threaded hdfs_static native_mini_dfs) add_libhdfs_test(test_libhdfs_threaded hdfs_static) -if (NOT WIN32) +if (NOT WIN32 AND NOT APPLE) build_libhdfs_test(test_libhdfs_zerocopy hdfs_static expect.c test_libhdfs_zerocopy.c) link_libhdfs_test(test_libhdfs_zerocopy hdfs_static native_mini_dfs ${OS_LINK_LIBRARIES}) add_libhdfs_test(test_libhdfs_zerocopy hdfs_static) From 1e92db5a1e293fea6b696d9bf51d91e712b33127 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 17 Jun 2019 18:29:34 -0700 Subject: [PATCH 0210/1308] HDFS-11949. Add testcase for ensuring that FsShell cann't move file to the target directory that file exists. Contributed by legend. --- .../org/apache/hadoop/fs/TestFsShellCopy.java | 47 ++++++++++++++----- 1 file changed, 36 insertions(+), 11 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java index f9b2420067029..72ae296c957b5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java @@ -189,7 +189,7 @@ private void checkPut(Path srcPath, Path targetDir, boolean useWindowsPath) // copy to new file, then again prepPut(dstPath, false, false); checkPut(0, srcPath, dstPath, useWindowsPath); - if (lfs.isFile(srcPath)) { + if (lfs.getFileStatus(srcPath).isFile()) { checkPut(1, srcPath, dstPath, useWindowsPath); } else { // directory works because it copies into the dir // clear contents so the check won't think there are extra paths @@ -228,11 +228,11 @@ private void prepPut(Path dst, boolean create, if (create) { if (isDir) { lfs.mkdirs(dst); - assertTrue(lfs.isDirectory(dst)); + assertTrue(lfs.getFileStatus(dst).isDirectory()); } else { lfs.mkdirs(new Path(dst.getName())); lfs.create(dst).close(); - assertTrue(lfs.isFile(dst)); + assertTrue(lfs.getFileStatus(dst).isFile()); } } } @@ -253,7 +253,7 @@ private void checkPut(int exitCode, Path src, Path dest, Path target; if (lfs.exists(dest)) { - if (lfs.isDirectory(dest)) { + if (lfs.getFileStatus(dest).isDirectory()) { target = new Path(pathAsString(dest), src.getName()); } else { target = dest; @@ -276,7 +276,8 @@ private void checkPut(int exitCode, Path src, Path dest, if (exitCode == 0) { assertTrue(lfs.exists(target)); - assertTrue(lfs.isFile(src) == lfs.isFile(target)); + assertTrue(lfs.getFileStatus(src).isFile() == + lfs.getFileStatus(target).isFile()); assertEquals(1, lfs.listStatus(lfs.makeQualified(target).getParent()).length); } else { assertEquals(targetExists, lfs.exists(target)); @@ -293,7 +294,7 @@ public void testRepresentsDir() throws Exception { argv = new String[]{ "-put", srcPath.toString(), dstPath.toString() }; assertEquals(0, shell.run(argv)); - assertTrue(lfs.exists(dstPath) && lfs.isFile(dstPath)); + assertTrue(lfs.exists(dstPath) && lfs.getFileStatus(dstPath).isFile()); lfs.delete(dstPath, true); assertFalse(lfs.exists(dstPath)); @@ -319,7 +320,7 @@ public void testRepresentsDir() throws Exception { "-put", srcPath.toString(), dstPath.toString()+suffix }; assertEquals(0, shell.run(argv)); assertTrue(lfs.exists(subdirDstPath)); - assertTrue(lfs.isFile(subdirDstPath)); + assertTrue(lfs.getFileStatus(subdirDstPath).isFile()); } // ensure .. is interpreted as a dir @@ -329,7 +330,7 @@ public void testRepresentsDir() throws Exception { argv = new String[]{ "-put", srcPath.toString(), dotdotDst }; assertEquals(0, shell.run(argv)); assertTrue(lfs.exists(subdirDstPath)); - assertTrue(lfs.isFile(subdirDstPath)); + assertTrue(lfs.getFileStatus(subdirDstPath).isFile()); } @Test @@ -442,9 +443,33 @@ public void testMoveFileFromLocal() throws Exception { assertEquals(0, exit); assertFalse(lfs.exists(srcFile)); assertTrue(lfs.exists(target)); - assertTrue(lfs.isFile(target)); + assertTrue(lfs.getFileStatus(target).isFile()); } - + + @Test + public void testMoveFileFromLocalDestExists() throws Exception{ + Path testRoot = new Path(testRootDir, "testPutFile"); + lfs.delete(testRoot, true); + lfs.mkdirs(testRoot); + + Path target = new Path(testRoot, "target"); + Path srcFile = new Path(testRoot, new Path("srcFile")); + lfs.createNewFile(srcFile); + + int exit = shell.run(new String[]{ + "-moveFromLocal", srcFile.toString(), target.toString()}); + assertEquals(0, exit); + assertFalse(lfs.exists(srcFile)); + assertTrue(lfs.exists(target)); + assertTrue(lfs.getFileStatus(target).isFile()); + + lfs.createNewFile(srcFile); + exit = shell.run(new String[]{ + "-moveFromLocal", srcFile.toString(), target.toString()}); + assertEquals(1, exit); + assertTrue(lfs.exists(srcFile)); + } + @Test public void testMoveDirFromLocal() throws Exception { Path testRoot = new Path(testRootDir, "testPutDir"); @@ -502,7 +527,7 @@ public void testMoveFromWindowsLocalPath() throws Exception { shellRun(0, "-moveFromLocal", winSrcFile, target.toString()); assertFalse(lfs.exists(srcFile)); assertTrue(lfs.exists(target)); - assertTrue(lfs.isFile(target)); + assertTrue(lfs.getFileStatus(target).isFile()); } @Test From 10311c30b02d984a11f2cedfd06eb2a766ad1576 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 17 Jun 2019 19:55:56 -0700 Subject: [PATCH 0211/1308] HADOOP-14807. should prevent the possibility of NPE about ReconfigurableBase.java. Contributed by hu xiaodong. --- .../main/java/org/apache/hadoop/conf/ReconfigurableBase.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java index 23e1fda9053ca..8cacbdcdac039 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java @@ -146,7 +146,8 @@ public void run() { oldConf.unset(change.prop); } } catch (ReconfigurationException e) { - errorMessage = e.getCause().getMessage(); + Throwable cause = e.getCause(); + errorMessage = cause == null ? e.getMessage() : cause.getMessage(); } results.put(change, Optional.ofNullable(errorMessage)); } From 62ad9885ea8c75c134de43a3a925c76b253658e1 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 17 Jun 2019 20:18:53 -0700 Subject: [PATCH 0212/1308] HDFS-13730. BlockReaderRemote.sendReadResult throws NPE. Contributed by Yuanbo Liu. --- .../main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java | 4 +++- .../src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java index f18c5a0c1eeb6..b9f4e24d15c8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java @@ -21,6 +21,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.net.Socket; +import java.net.SocketAddress; import java.nio.channels.ReadableByteChannel; import org.apache.hadoop.net.unix.DomainSocket; @@ -93,7 +94,8 @@ public void close() throws IOException { @Override public String getRemoteAddressString() { - return socket.getRemoteSocketAddress().toString(); + SocketAddress address = socket.getRemoteSocketAddress(); + return address == null ? null : address.toString(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java index 23a45b7fe453b..164f31e37adde 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java @@ -21,6 +21,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.net.Socket; +import java.net.SocketAddress; import java.nio.channels.ReadableByteChannel; import org.apache.hadoop.net.SocketInputStream; @@ -96,7 +97,8 @@ public void close() throws IOException { @Override public String getRemoteAddressString() { - return socket.getRemoteSocketAddress().toString(); + SocketAddress address = socket.getRemoteSocketAddress(); + return address == null ? null : address.toString(); } @Override From 67414a1a80039e70e0afc1de171831a6e981f37a Mon Sep 17 00:00:00 2001 From: Zhankun Tang Date: Tue, 18 Jun 2019 12:23:52 +0800 Subject: [PATCH 0213/1308] YARN-9584. Should put initializeProcessTrees method call before get pid. Contributed by Wanqiang Ji. --- .../containermanager/monitor/ContainersMonitorImpl.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java index 43c7820e39e12..6d3791e955032 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java @@ -487,11 +487,10 @@ public void run() { ContainerId containerId = entry.getKey(); ProcessTreeInfo ptInfo = entry.getValue(); try { - String pId = ptInfo.getPID(); - // Initialize uninitialized process trees initializeProcessTrees(entry); + String pId = ptInfo.getPID(); if (pId == null || !isResourceCalculatorAvailable()) { continue; // processTree cannot be tracked } From e3172221eaa6e40a775367221974610b3c693f1a Mon Sep 17 00:00:00 2001 From: Weiwei Yang Date: Mon, 17 Jun 2019 21:45:28 -0700 Subject: [PATCH 0214/1308] HDFS-12770. Add doc about how to disable client socket cache. Contributed by Weiwei Yang. Signed-off-by: Wei-Chiu Chuang --- .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 0f29aa081b190..d4f8abf9bf3d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -3907,6 +3907,7 @@ 16 Socket cache capacity (in entries) for short-circuit reads. + If this value is set to 0, the client socket cache is disabled. From f1c239c6a4c26e9057373b9b9400e54083290f65 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 17 Jun 2019 21:51:33 -0700 Subject: [PATCH 0215/1308] HADOOP-9157. Better option for curl in hadoop-auth-examples. Contributed by Andras Bokor. --- .../hadoop-auth/src/site/markdown/Examples.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-auth/src/site/markdown/Examples.md b/hadoop-common-project/hadoop-auth/src/site/markdown/Examples.md index 4dad79dd063ae..4f29c8da5cf40 100644 --- a/hadoop-common-project/hadoop-auth/src/site/markdown/Examples.md +++ b/hadoop-common-project/hadoop-auth/src/site/markdown/Examples.md @@ -36,14 +36,14 @@ Login to the KDC using **kinit** and then use `curl` to fetch protected URL: $ kinit Please enter the password for tucu@LOCALHOST: - $ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/hadoop-auth-examples/kerberos/who + $ curl --negotiate -u : -b ~/cookiejar.txt -c ~/cookiejar.txt http://$(hostname -f):8080/hadoop-auth-examples/kerberos/who Enter host password for user 'tucu': Hello Hadoop Auth Examples! * The `--negotiate` option enables SPNEGO in `curl`. -* The `-u foo` option is required but the user ignored (the principal +* The `-u :` option is required but the user ignored (the principal that has been kinit-ed is used). * The `-b` and `-c` are use to store and send HTTP Cookies. @@ -88,7 +88,7 @@ Try accessing protected resources using `curl`. The protected resources are: $ curl http://localhost:8080/hadoop-auth-examples/simple/who?user.name=foo - $ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/hadoop-auth-examples/kerberos/who + $ curl --negotiate -u : -b ~/cookiejar.txt -c ~/cookiejar.txt http://$(hostname -f):8080/hadoop-auth-examples/kerberos/who ### Accessing the server using the Java client example From 098c325a78dce2697ec94a22a4a3981fa5a3849e Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 17 Jun 2019 22:45:52 -0700 Subject: [PATCH 0216/1308] HDFS-14340. Lower the log level when can't get postOpAttr. Contributed by Anuhan Torgonshar. --- .../java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 6e63543abbfa1..ea5cdceffd947 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -1037,7 +1037,7 @@ preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug); } catch (IOException e1) { - LOG.error("Can't get postOpDirAttr for dirFileId: {}", + LOG.info("Can't get postOpDirAttr for dirFileId: {}", dirHandle.getFileId(), e1); } } From eb6be4643f77b3284297950da4f7e6ca9db11793 Mon Sep 17 00:00:00 2001 From: Wanqiang Ji Date: Mon, 17 Jun 2019 17:35:10 +0800 Subject: [PATCH 0217/1308] YARN-9630. [UI2] Add a link in docs's top page Signed-off-by: Masatake Iwasaki --- hadoop-project/src/site/site.xml | 1 + .../hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml index 9328b7e54bebf..9a0cdac0a2a82 100644 --- a/hadoop-project/src/site/site.xml +++ b/hadoop-project/src/site/site.xml @@ -162,6 +162,7 @@ +

    diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md index 4c9daed7bdd1b..e05f0256bf99e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md @@ -43,7 +43,7 @@ origin (CORS) support. | `yarn.nodemanager.webapp.cross-origin.enabled` | true | Enable CORS support for Node Manager | Also please ensure that CORS related configurations are enabled in `core-site.xml`. -Kindly refer [here](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/HttpAuthentication.html) +Kindly refer [here](../../hadoop-project-dist/hadoop-common/HttpAuthentication.html) Use it ------------- From 56c621c68f5b50474a69e3bd6284d06aeb192517 Mon Sep 17 00:00:00 2001 From: Adam Antal Date: Mon, 17 Jun 2019 22:55:06 -0700 Subject: [PATCH 0218/1308] HADOOP-15914. hadoop jar command has no help argument. Contributed by Adam Antal. Signed-off-by: Wei-Chiu Chuang --- hadoop-common-project/hadoop-common/src/main/bin/hadoop | 4 ++++ hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop index 750dca31457a2..7d9ffc69bc503 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop @@ -140,6 +140,10 @@ function hadoopcmd_case if [[ -n "${YARN_OPTS}" ]] || [[ -n "${YARN_CLIENT_OPTS}" ]]; then hadoop_error "WARNING: Use \"yarn jar\" to launch YARN applications." fi + if [[ -z $1 || $1 = "--help" ]]; then + echo "Usage: hadoop jar [mainClass] args..." + exit 0 + fi HADOOP_CLASSNAME=org.apache.hadoop.util.RunJar ;; jnipath) diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd b/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd index 91c65d1f2d6f2..04e5039d19812 100644 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd @@ -189,6 +189,11 @@ call :updatepath %HADOOP_BIN_PATH% ) else if defined YARN_CLIENT_OPTS ( @echo WARNING: Use "yarn jar" to launch YARN applications. ) + @rem if --help option is used, no need to call command + if [!hadoop-command-arguments[%1%]!]==["--help"] ( + @echo Usage: hadoop jar [mainClass] args... + goto :eof + ) set CLASS=org.apache.hadoop.util.RunJar goto :eof From d41310a15dfd2b6c9b5b1102327335e550917e0a Mon Sep 17 00:00:00 2001 From: Oleg Danilov Date: Tue, 18 Jun 2019 09:06:02 +0300 Subject: [PATCH 0219/1308] HDFS-12315. Use Path instead of String to check closedFiles set. Contributed by Oleg Danilov. --- .../src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java index cc32a3cf44d33..6d8ba1a9f471b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java @@ -262,7 +262,7 @@ private void verifyOpenFiles(HashSet closedFiles, while (openFilesRemoteItr.hasNext()) { String filePath = openFilesRemoteItr.next().getFilePath(); assertFalse(filePath + " should not be listed under open files!", - closedFiles.contains(filePath)); + closedFiles.contains(new Path(filePath))); assertTrue(filePath + " is not listed under open files!", openFiles.remove(new Path(filePath))); } From 54cdde38c7f3ca56ebfdad914a93be232a41a261 Mon Sep 17 00:00:00 2001 From: Oleg Danilov Date: Tue, 18 Jun 2019 09:11:25 +0300 Subject: [PATCH 0220/1308] HDFS-12314. Typo in the TestDataNodeHotSwapVolumes.testAddOneNewVolume(). Contributed by Oleg Danilov. --- .../hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 6672cd2375cfd..e72c499624dbd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -416,7 +416,7 @@ public void testAddOneNewVolume() minNumBlocks = Math.min(minNumBlocks, blockList.getNumberOfBlocks()); maxNumBlocks = Math.max(maxNumBlocks, blockList.getNumberOfBlocks()); } - assertTrue(Math.abs(maxNumBlocks - maxNumBlocks) <= 1); + assertTrue(Math.abs(maxNumBlocks - minNumBlocks) <= 1); verifyFileLength(cluster.getFileSystem(), testFile, numBlocks); } From dd4a7633ece11c528a58146f92522b55e7be4dc6 Mon Sep 17 00:00:00 2001 From: Shweta Yakkali Date: Mon, 17 Jun 2019 23:47:01 -0700 Subject: [PATCH 0221/1308] HADOOP-16156. [Clean-up] Remove NULL check before instanceof and fix checkstyle in InnerNodeImpl. Contributed by Shweta Yakkali. Signed-off-by: Wei-Chiu Chuang --- .../org/apache/hadoop/net/InnerNodeImpl.java | 44 +++++++++++-------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java index 5a2931bf6a153..a0a977334f417 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java @@ -41,26 +41,29 @@ public InnerNodeImpl newInnerNode(String path) { protected final Map childrenMap = new HashMap<>(); protected int numOfLeaves; - /** Construct an InnerNode from a path-like string */ + /** Construct an InnerNode from a path-like string. */ protected InnerNodeImpl(String path) { super(path); } /** Construct an InnerNode - * from its name, its network location, its parent, and its level */ - protected InnerNodeImpl(String name, String location, InnerNode parent, int level) { + * from its name, its network location, its parent, and its level. */ + protected InnerNodeImpl(String name, String location, + InnerNode parent, int level) { super(name, location, parent, level); } @Override - public List getChildren() {return children;} + public List getChildren() { + return children; + } - /** @return the number of children this node has */ + /** @return the number of children this node has. */ int getNumOfChildren() { return children.size(); } - /** Judge if this node represents a rack + /** Judge if this node represents a rack. * @return true if it has no child or its children are not InnerNodes */ public boolean isRack() { @@ -76,7 +79,7 @@ public boolean isRack() { return true; } - /** Judge if this node is an ancestor of node n + /** Judge if this node is an ancestor of node n. * * @param n a node * @return true if this node is an ancestor of n @@ -87,7 +90,7 @@ public boolean isAncestor(Node n) { startsWith(getPath(this)+NodeBase.PATH_SEPARATOR_STR); } - /** Judge if this node is the parent of node n + /** Judge if this node is the parent of node n. * * @param n a node * @return true if this node is the parent of n @@ -107,8 +110,9 @@ public String getNextAncestorName(Node n) { name = name.substring(1); } int index=name.indexOf(PATH_SEPARATOR); - if (index !=-1) + if (index != -1) { name = name.substring(0, index); + } return name; } @@ -168,7 +172,8 @@ public boolean add(Node n) { * @see InnerNodeImpl(String, String, InnerNode, int) */ private InnerNodeImpl createParentNode(String parentName) { - return new InnerNodeImpl(parentName, getPath(this), this, this.getLevel()+1); + return new InnerNodeImpl(parentName, + getPath(this), this, this.getLevel() + 1); } @Override @@ -220,14 +225,16 @@ public boolean remove(Node n) { @Override public Node getLoc(String loc) { - if (loc == null || loc.length() == 0) return this; + if (loc == null || loc.length() == 0) { + return this; + } String[] path = loc.split(PATH_SEPARATOR_STR, 2); - Node childnode = childrenMap.get(path[0]); - if (childnode == null) return null; // non-existing node - if (path.length == 1) return childnode; - if (childnode instanceof InnerNode) { - return ((InnerNode)childnode).getLoc(path[1]); + Node childNode = childrenMap.get(path[0]); + if (childNode == null || path.length == 1) { + return childNode; + } else if (childNode instanceof InnerNode) { + return ((InnerNode)childNode).getLoc(path[1]); } else { return null; } @@ -237,11 +244,10 @@ public Node getLoc(String loc) { public Node getLeaf(int leafIndex, Node excludedNode) { int count=0; // check if the excluded node a leaf - boolean isLeaf = - excludedNode == null || !(excludedNode instanceof InnerNode); + boolean isLeaf = !(excludedNode instanceof InnerNode); // calculate the total number of excluded leaf nodes int numOfExcludedLeaves = - isLeaf ? 1 : ((InnerNode)excludedNode).getNumOfLeaves(); + isLeaf ? 1 : ((InnerNode)excludedNode).getNumOfLeaves(); if (isLeafParent()) { // children are leaves if (isLeaf) { // excluded node is a leaf node if (excludedNode != null && From e188bb12b0e715ab623e4e803aa5e69f381a99ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Elek?= Date: Tue, 18 Jun 2019 08:46:20 +0200 Subject: [PATCH 0222/1308] HDDS-1694. TestNodeReportHandler is failing with NPE Closes #978 --- .../hadoop/hdds/scm/node/SCMNodeManager.java | 23 ++++++++----------- .../scm/server/StorageContainerManager.java | 2 +- .../hdds/scm/node/TestContainerPlacement.java | 8 +++++-- .../hdds/scm/node/TestNodeReportHandler.java | 9 +++++++- 4 files changed, 25 insertions(+), 17 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 9a5ea11b412eb..eaa2255cb0db3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.VersionInfo; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; @@ -94,13 +94,12 @@ public class SCMNodeManager implements NodeManager { LoggerFactory.getLogger(SCMNodeManager.class); private final NodeStateManager nodeStateManager; - private final String clusterID; private final VersionInfo version; private final CommandQueue commandQueue; private final SCMNodeMetrics metrics; // Node manager MXBean private ObjectName nmInfoBean; - private final StorageContainerManager scmManager; + private final SCMStorageConfig scmStorageConfig; private final NetworkTopology clusterMap; private final DNSToSwitchMapping dnsToSwitchMapping; private final boolean useHostname; @@ -108,18 +107,17 @@ public class SCMNodeManager implements NodeManager { /** * Constructs SCM machine Manager. */ - public SCMNodeManager(OzoneConfiguration conf, String clusterID, - StorageContainerManager scmManager, EventPublisher eventPublisher) - throws IOException { + public SCMNodeManager(OzoneConfiguration conf, + SCMStorageConfig scmStorageConfig, EventPublisher eventPublisher, + NetworkTopology networkTopology) { this.nodeStateManager = new NodeStateManager(conf, eventPublisher); - this.clusterID = clusterID; this.version = VersionInfo.getLatestVersion(); this.commandQueue = new CommandQueue(); - this.scmManager = scmManager; + this.scmStorageConfig = scmStorageConfig; LOG.info("Entering startup safe mode."); registerMXBean(); this.metrics = SCMNodeMetrics.create(this); - this.clusterMap = scmManager.getClusterMap(); + this.clusterMap = networkTopology; Class dnsToSwitchMappingClass = conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, TableMapping.class, DNSToSwitchMapping.class); @@ -221,9 +219,8 @@ public VersionResponse getVersion(SCMVersionRequestProto versionRequest) { return VersionResponse.newBuilder() .setVersion(this.version.getVersion()) .addValue(OzoneConsts.SCM_ID, - this.scmManager.getScmStorageConfig().getScmId()) - .addValue(OzoneConsts.CLUSTER_ID, this.scmManager.getScmStorageConfig() - .getClusterID()) + this.scmStorageConfig.getScmId()) + .addValue(OzoneConsts.CLUSTER_ID, this.scmStorageConfig.getClusterID()) .build(); } @@ -274,7 +271,7 @@ public RegisteredCommand register( return RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success) .setDatanodeUUID(datanodeDetails.getUuidString()) - .setClusterID(this.clusterID) + .setClusterID(this.scmStorageConfig.getClusterID()) .setHostname(datanodeDetails.getHostName()) .setIpAddress(datanodeDetails.getIpAddress()) .build(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index ca60a5dd115f6..08712ccdbc5b1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -378,7 +378,7 @@ private void initializeSystemManagers(OzoneConfiguration conf, scmNodeManager = configurator.getScmNodeManager(); } else { scmNodeManager = new SCMNodeManager( - conf, scmStorageConfig.getClusterID(), this, eventQueue); + conf, scmStorageConfig, eventQueue, clusterMap); } ContainerPlacementPolicy containerPlacementPolicy = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index ad24a1079ab61..ec0c4c3447042 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; +import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.test.PathUtils; @@ -48,7 +49,6 @@ import java.io.File; import java.io.IOException; import java.util.List; -import java.util.UUID; import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdds.scm.ScmConfigKeys @@ -94,8 +94,12 @@ SCMNodeManager createNodeManager(OzoneConfiguration config) Mockito.mock(StaleNodeHandler.class)); eventQueue.addHandler(SCMEvents.DEAD_NODE, Mockito.mock(DeadNodeHandler.class)); + + SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class); + Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1"); + SCMNodeManager nodeManager = new SCMNodeManager(config, - UUID.randomUUID().toString(), null, eventQueue); + storageConfig, eventQueue, null); return nodeManager; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java index 1cb9bcdc96313..88de27d996507 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java @@ -24,7 +24,9 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode; +import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.Event; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; @@ -32,6 +34,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,7 +53,11 @@ public class TestNodeReportHandler implements EventPublisher { @Before public void resetEventCollector() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); - nodeManager = new SCMNodeManager(conf, "cluster1", null, new EventQueue()); + SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class); + Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1"); + nodeManager = + new SCMNodeManager(conf, storageConfig, new EventQueue(), Mockito.mock( + NetworkTopology.class)); nodeReportHandler = new NodeReportHandler(nodeManager); } From 335c1c9938147a464f893f5368c1851c6c0becc6 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Tue, 18 Jun 2019 00:33:10 -0700 Subject: [PATCH 0223/1308] HDFS-14010. Pass correct DF usage to ReservedSpaceCalculator builder. Contributed by Virajith Jalaparti. --- .../server/datanode/fsdataset/impl/FsVolumeImpl.java | 11 ++++++++--- .../datanode/fsdataset/impl/TestProvidedImpl.java | 8 ++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 517bbc68c8eb0..9ffced138438f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -163,6 +163,13 @@ public class FsVolumeImpl implements FsVolumeSpi { this.storageType = storageLocation.getStorageType(); this.configuredCapacity = -1; this.usage = usage; + if (this.usage != null) { + reserved = new ReservedSpaceCalculator.Builder(conf) + .setUsage(this.usage).setStorageType(storageType).build(); + } else { + reserved = null; + LOG.warn("Setting reserved to null as usage is null"); + } if (currentDir != null) { File parent = currentDir.getParentFile(); cacheExecutor = initializeCacheExecutor(parent); @@ -173,8 +180,6 @@ public class FsVolumeImpl implements FsVolumeSpi { } this.conf = conf; this.fileIoProvider = fileIoProvider; - this.reserved = new ReservedSpaceCalculator.Builder(conf) - .setUsage(usage).setStorageType(storageType).build(); } protected ThreadPoolExecutor initializeCacheExecutor(File parent) { @@ -473,7 +478,7 @@ long getRecentReserved() { } long getReserved(){ - return reserved.getReserved(); + return reserved != null ? reserved.getReserved() : 0; } @VisibleForTesting diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java index a48e2f8f379ad..f38359042e694 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java @@ -353,6 +353,14 @@ public void setUp() throws IOException { } } + @Test + public void testReserved() throws Exception { + for (FsVolumeSpi vol : providedVolumes) { + // the reserved space for provided volumes should be 0. + assertEquals(0, ((FsVolumeImpl) vol).getReserved()); + } + } + @Test public void testProvidedVolumeImpl() throws IOException { From fb1ce0d50a4a2b2feea6833b616b180084e0801c Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Tue, 18 Jun 2019 08:51:16 -0700 Subject: [PATCH 0224/1308] HDDS-1670. Add limit support to /api/containers and /api/containers/{id} endpoints (#954) --- .../ozone/recon/api/ContainerKeyService.java | 19 +++++-- .../recon/spi/ContainerDBServiceProvider.java | 12 ++++- .../impl/ContainerDBServiceProviderImpl.java | 27 ++++++++-- .../recon/api/TestContainerKeyService.java | 51 +++++++++++-------- 4 files changed, 80 insertions(+), 29 deletions(-) diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerKeyService.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerKeyService.java index 35ae724b93f67..ff355dbac27d9 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerKeyService.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerKeyService.java @@ -21,17 +21,19 @@ import java.time.Instant; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.stream.Collectors; import javax.inject.Inject; +import javax.ws.rs.DefaultValue; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; @@ -72,10 +74,11 @@ public class ContainerKeyService { * @return {@link Response} */ @GET - public Response getContainers() { + public Response getContainers( + @DefaultValue("-1") @QueryParam("limit") int limit) { Map containersMap; try { - containersMap = containerDBServiceProvider.getContainers(); + containersMap = containerDBServiceProvider.getContainers(limit); } catch (IOException ioEx) { throw new WebApplicationException(ioEx, Response.Status.INTERNAL_SERVER_ERROR); @@ -92,8 +95,10 @@ public Response getContainers() { */ @GET @Path("/{id}") - public Response getKeysForContainer(@PathParam("id") Long containerId) { - Map keyMetadataMap = new HashMap<>(); + public Response getKeysForContainer( + @PathParam("id") Long containerId, + @DefaultValue("-1") @QueryParam("limit") int limit) { + Map keyMetadataMap = new LinkedHashMap<>(); try { Map containerKeyPrefixMap = containerDBServiceProvider.getKeyPrefixesForContainer(containerId); @@ -143,6 +148,10 @@ public Response getKeysForContainer(@PathParam("id") Long containerId) { Collections.singletonMap(containerKeyPrefix.getKeyVersion(), blockIds)); } else { + // break the for loop if limit has been reached + if (keyMetadataMap.size() == limit) { + break; + } KeyMetadata keyMetadata = new KeyMetadata(); keyMetadata.setBucket(omKeyInfo.getBucketName()); keyMetadata.setVolume(omKeyInfo.getVolumeName()); diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java index 0449e7cf774f7..93178b0fde097 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java @@ -70,13 +70,23 @@ Map getKeyPrefixesForContainer(long containerId) throws IOException; /** - * Get a Map of containerID, containerMetadata of all Containers. + * Get a Map of containerID, containerMetadata of all the Containers. * * @return Map of containerID -> containerMetadata. * @throws IOException */ Map getContainers() throws IOException; + /** + * Get a Map of containerID, containerMetadata of Containers only for the + * given limit. If the limit is -1 or any integer <0, then return all + * the containers without any limit. + * + * @return Map of containerID -> containerMetadata. + * @throws IOException + */ + Map getContainers(int limit) throws IOException; + /** * Delete an entry in the container DB. * @param containerKeyPrefix container key prefix to be deleted. diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java index e79b8044f087a..532f74792c05c 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java @@ -22,7 +22,6 @@ import java.io.File; import java.io.IOException; -import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; @@ -139,7 +138,7 @@ public Integer getCountForForContainerKeyPrefix( public Map getKeyPrefixesForContainer( long containerId) throws IOException { - Map prefixes = new HashMap<>(); + Map prefixes = new LinkedHashMap<>(); TableIterator> containerIterator = containerKeyTable.iterator(); containerIterator.seek(new ContainerKeyPrefix(containerId)); @@ -166,13 +165,29 @@ public Map getKeyPrefixesForContainer( } /** - * Iterate the DB to construct a Map of containerID -> containerMetadata. + * Get all the containers. * * @return Map of containerID -> containerMetadata. * @throws IOException */ @Override public Map getContainers() throws IOException { + // Set a negative limit to get all the containers. + return getContainers(-1); + } + + /** + * Iterate the DB to construct a Map of containerID -> containerMetadata + * only for the given limit. + * + * Return all the containers if limit < 0. + * + * @return Map of containerID -> containerMetadata. + * @throws IOException + */ + @Override + public Map getContainers(int limit) + throws IOException { Map containers = new LinkedHashMap<>(); TableIterator> containerIterator = containerKeyTable.iterator(); @@ -181,6 +196,12 @@ public Map getContainers() throws IOException { Long containerID = keyValue.getKey().getContainerId(); Integer numberOfKeys = keyValue.getValue(); + // break the loop if limit has been reached + // and one more new entity needs to be added to the containers map + if (containers.size() == limit && !containers.containsKey(containerID)) { + break; + } + // initialize containerMetadata with 0 as number of keys. containers.computeIfAbsent(containerID, ContainerMetadata::new); // increment number of keys for the containerID diff --git a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java index 6363e9c4bd65c..620e03b9279f5 100644 --- a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java +++ b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.File; @@ -200,56 +201,67 @@ protected void configure() { @Test public void testGetKeysForContainer() { - Response response = containerKeyService.getKeysForContainer(1L); + Response response = containerKeyService.getKeysForContainer(1L, -1); Collection keyMetadataList = (Collection) response.getEntity(); - assertTrue(keyMetadataList.size() == 2); + assertEquals(keyMetadataList.size(), 2); Iterator iterator = keyMetadataList.iterator(); KeyMetadata keyMetadata = iterator.next(); - assertTrue(keyMetadata.getKey().equals("key_one")); - assertTrue(keyMetadata.getVersions().size() == 1); - assertTrue(keyMetadata.getBlockIds().size() == 1); + assertEquals(keyMetadata.getKey(), "key_one"); + assertEquals(keyMetadata.getVersions().size(), 1); + assertEquals(keyMetadata.getBlockIds().size(), 1); Map> blockIds = keyMetadata.getBlockIds(); - assertTrue(blockIds.get(0L).iterator().next().getLocalID() == 101); + assertEquals(blockIds.get(0L).iterator().next().getLocalID(), 101); keyMetadata = iterator.next(); - assertTrue(keyMetadata.getKey().equals("key_two")); - assertTrue(keyMetadata.getVersions().size() == 2); + assertEquals(keyMetadata.getKey(), "key_two"); + assertEquals(keyMetadata.getVersions().size(), 2); assertTrue(keyMetadata.getVersions().contains(0L) && keyMetadata .getVersions().contains(1L)); - assertTrue(keyMetadata.getBlockIds().size() == 2); + assertEquals(keyMetadata.getBlockIds().size(), 2); blockIds = keyMetadata.getBlockIds(); - assertTrue(blockIds.get(0L).iterator().next().getLocalID() == 103); - assertTrue(blockIds.get(1L).iterator().next().getLocalID() == 104); + assertEquals(blockIds.get(0L).iterator().next().getLocalID(), 103); + assertEquals(blockIds.get(1L).iterator().next().getLocalID(), 104); - response = containerKeyService.getKeysForContainer(3L); + response = containerKeyService.getKeysForContainer(3L, -1); keyMetadataList = (Collection) response.getEntity(); assertTrue(keyMetadataList.isEmpty()); + + // test if limit works as expected + response = containerKeyService.getKeysForContainer(1L, 1); + keyMetadataList = (Collection) response.getEntity(); + assertEquals(keyMetadataList.size(), 1); } @Test public void testGetContainers() { - Response response = containerKeyService.getContainers(); + Response response = containerKeyService.getContainers(-1); List containers = new ArrayList<>( (Collection) response.getEntity()); - assertTrue(containers.size() == 2); - Iterator iterator = containers.iterator(); ContainerMetadata containerMetadata = iterator.next(); - assertTrue(containerMetadata.getContainerID() == 1L); - assertTrue(containerMetadata.getNumberOfKeys() == 3L); + assertEquals(containerMetadata.getContainerID(), 1L); + // Number of keys for CID:1 should be 3 because of two different versions + // of key_two stored in CID:1 + assertEquals(containerMetadata.getNumberOfKeys(), 3L); containerMetadata = iterator.next(); - assertTrue(containerMetadata.getContainerID() == 2L); - assertTrue(containerMetadata.getNumberOfKeys() == 2L); + assertEquals(containerMetadata.getContainerID(), 2L); + assertEquals(containerMetadata.getNumberOfKeys(), 2L); + + // test if limit works as expected + response = containerKeyService.getContainers(1); + containers = new ArrayList<>( + (Collection) response.getEntity()); + assertEquals(containers.size(), 1); } /** @@ -266,5 +278,4 @@ private OzoneConfiguration getTestOzoneConfiguration() .getAbsolutePath()); return configuration; } - } \ No newline at end of file From 3ab77d9bc9eacfdb218b68988235a921c810b0d1 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Tue, 18 Jun 2019 09:58:29 -0700 Subject: [PATCH 0225/1308] HDFS-14201. Ability to disallow safemode NN to become active. Contributed by Xiao Liang and He Xiaoqiao. --- .../java/org/apache/hadoop/ipc/Server.java | 3 ++ .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 +++ .../hadoop/hdfs/server/namenode/NameNode.java | 13 ++++++++ .../src/main/resources/hdfs-default.xml | 9 ++++++ .../markdown/HDFSHighAvailabilityWithNFS.md | 12 +++++++ .../markdown/HDFSHighAvailabilityWithQJM.md | 12 +++++++ .../server/namenode/ha/TestHASafeMode.java | 31 +++++++++++++++++++ .../server/namenode/ha/TestNNHealthCheck.java | 30 ++++++++++++++++++ 8 files changed, 114 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 9018bed80e2f1..8c0edbb748107 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -81,6 +81,7 @@ import org.apache.hadoop.conf.Configuration.IntegerRanges; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.ha.HealthCheckFailedException; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; @@ -3090,6 +3091,8 @@ protected Server(String bindAddress, int port, } this.exceptionsHandler.addTerseLoggingExceptions(StandbyException.class); + this.exceptionsHandler.addTerseLoggingExceptions( + HealthCheckFailedException.class); } public synchronized void addAuxiliaryListener(int auxiliaryPort) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index f4a8def17713a..fb83baf8f6730 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -1016,6 +1016,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_HA_ZKFC_PORT_DEFAULT = 8019; public static final String DFS_HA_ZKFC_NN_HTTP_TIMEOUT_KEY = "dfs.ha.zkfc.nn.http.timeout.ms"; public static final int DFS_HA_ZKFC_NN_HTTP_TIMEOUT_KEY_DEFAULT = 20000; + public static final String DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE = + "dfs.ha.nn.not-become-active-in-safemode"; + public static final boolean DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE_DEFAULT = + false; // Security-related configs public static final String DFS_ENCRYPT_DATA_TRANSFER_KEY = "dfs.encrypt.data.transfer"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index e4c88563732df..126ac0bde1c9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -118,6 +118,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT; @@ -390,6 +392,7 @@ public long getProtocolVersion(String protocol, private final HAContext haContext; protected final boolean allowStaleStandbyReads; private AtomicBoolean started = new AtomicBoolean(false); + private final boolean notBecomeActiveInSafemode; private final static int HEALTH_MONITOR_WARN_THRESHOLD_MS = 5000; @@ -983,6 +986,9 @@ protected NameNode(Configuration conf, NamenodeRole role) this.stopAtException(e); throw e; } + notBecomeActiveInSafemode = conf.getBoolean( + DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE, + DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE_DEFAULT); this.started.set(true); } @@ -1802,6 +1808,10 @@ synchronized void monitorHealth() throw new HealthCheckFailedException( "The NameNode has no resources available"); } + if (notBecomeActiveInSafemode && isInSafeMode()) { + throw new HealthCheckFailedException("The NameNode is configured to " + + "report UNHEALTHY to ZKFC in Safemode."); + } } synchronized void transitionToActive() @@ -1815,6 +1825,9 @@ synchronized void transitionToActive() "Cannot transition from '" + OBSERVER_STATE + "' to '" + ACTIVE_STATE + "'"); } + if (notBecomeActiveInSafemode && isInSafeMode()) { + throw new ServiceFailedException(getRole() + " still not leave safemode"); + } state.setState(haContext, ACTIVE_STATE); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index d4f8abf9bf3d2..890d03475a0a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -3192,6 +3192,15 @@ + + dfs.ha.nn.not-become-active-in-safemode + false + + This will prevent safe mode namenodes to become active while other standby + namenodes might be ready to serve requests when it is set to true. + + + dfs.ha.tail-edits.in-progress false diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md index d607561b6dd39..06cda83651e56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md @@ -296,6 +296,18 @@ The order in which you set these configurations is unimportant, but the values y hdfs://mycluster +* **dfs.ha.nn.not-become-active-in-safemode** - if prevent safe mode namenodes to become active + + Whether allow namenode to become active when it is in safemode, when it is + set to true, namenode in safemode will report SERVICE_UNHEALTHY to ZKFC if + auto failover is on, or will throw exception to fail the transition to + active if auto failover is off. For example: + + + dfs.ha.nn.not-become-active-in-safemode + true + + ### Deployment details After all of the necessary configuration options have been set, one must initially synchronize the two HA NameNodes' on-disk metadata. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md index 4f3df2752895b..eaa1a86db422b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md @@ -347,6 +347,18 @@ The order in which you set these configurations is unimportant, but the values y /path/to/journal/node/local/data +* **dfs.ha.nn.not-become-active-in-safemode** - if prevent safe mode namenodes to become active + + Whether allow namenode to become active when it is in safemode, when it is + set to true, namenode in safemode will report SERVICE_UNHEALTHY to ZKFC if + auto failover is on, or will throw exception to fail the transition to + active if auto failover is off. For example: + + + dfs.ha.nn.not-become-active-in-safemode + true + + ### Deployment details After all of the necessary configuration options have been set, you must start the JournalNode daemons on the set of machines where they will run. This can be done by running the command "*hdfs \--daemon start journalnode*" and waiting for the daemon to start on each of the relevant machines. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index f9445fa12eedf..3f1a979d1de74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; @@ -30,6 +32,8 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.ha.ServiceFailedException; +import org.apache.hadoop.test.LambdaTestUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -887,4 +891,31 @@ public Boolean get() { cluster.transitionToActive(1); assertSafeMode(nn1, 3, 3, 3, 0); } + + /** + * Test transition to active when namenode in safemode. + * + * @throws IOException + */ + @Test + public void testTransitionToActiveWhenSafeMode() throws Exception { + Configuration config = new Configuration(); + config.setBoolean(DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE, true); + try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(config, + new File(GenericTestUtils.getRandomizedTempPath())) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(1) + .build()) { + miniCluster.waitActive(); + miniCluster.transitionToStandby(0); + miniCluster.transitionToStandby(1); + NameNode namenode0 = miniCluster.getNameNode(0); + NameNode namenode1 = miniCluster.getNameNode(1); + NameNodeAdapter.enterSafeMode(namenode0, false); + NameNodeAdapter.enterSafeMode(namenode1, false); + LambdaTestUtils.intercept(ServiceFailedException.class, + "NameNode still not leave safemode", + () -> miniCluster.transitionToActive(0)); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java index e0f794f285db0..ab7e0afbca16d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.HA_HM_RPC_TIMEOUT_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeys.HA_HM_RPC_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -31,10 +32,12 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.MockNameNodeResourceChecker; import org.apache.hadoop.hdfs.tools.NNHAServiceTarget; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -76,6 +79,33 @@ public void testNNHealthCheckWithLifelineAddress() throws IOException { doNNHealthCheckTest(); } + @Test + public void testNNHealthCheckWithSafemodeAsUnhealthy() throws Exception { + conf.setBoolean(DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE, true); + + // now bring up just the NameNode. + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) + .nnTopology(MiniDFSNNTopology.simpleHATopology()).build(); + cluster.waitActive(); + + // manually set safemode. + cluster.getFileSystem(0) + .setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); + + NNHAServiceTarget haTarget = new NNHAServiceTarget(conf, + DFSUtil.getNamenodeNameServiceId(conf), "nn1"); + final String expectedTargetString = haTarget.getAddress().toString(); + + assertTrue("Expected haTarget " + haTarget + " containing " + + expectedTargetString, + haTarget.toString().contains(expectedTargetString)); + HAServiceProtocol rpc = haTarget.getHealthMonitorProxy(conf, 5000); + + LambdaTestUtils.intercept(RemoteException.class, + "The NameNode is configured to report UNHEALTHY to ZKFC in Safemode.", + () -> rpc.monitorHealth()); + } + private void doNNHealthCheckTest() throws IOException { MockNameNodeResourceChecker mockResourceChecker = new MockNameNodeResourceChecker(conf); From 3c1a1ceea9e35ac53376276139416b728ed57f10 Mon Sep 17 00:00:00 2001 From: Shweta Yakkali Date: Tue, 18 Jun 2019 10:20:40 -0700 Subject: [PATCH 0226/1308] HDFS-14487. Missing Space in Client Error Message (Contributed by Shweta Yakkali via Daniel Templeton) Change-Id: I0f8ce74a35ab24fe94fd0e57d8247bb3fa575e6f --- .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index a4e0742fa9e0c..4a0d75e2604fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -965,7 +965,7 @@ protected void completeFile(ExtendedBlock last) throws IOException { } try { if (retries == 0) { - throw new IOException("Unable to close file because the last block" + throw new IOException("Unable to close file because the last block " + last + " does not have enough number of replicas."); } retries--; From b14f0569bb8f4c6ea56de13f797d1d5155e99ceb Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Tue, 18 Jun 2019 13:48:38 -0400 Subject: [PATCH 0227/1308] YARN-9574. Update hadoop-yarn-applications-mawo artifactId to match directory name. Contributed by Wanqiang Ji (cherry picked from commit 5f758a69ede8fafd214857a74f5d3b46198094c4) --- .../hadoop-yarn-applications-mawo-core/pom.xml | 4 ++-- .../hadoop-yarn-applications-mawo/pom.xml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml index 93eab69a43c0e..1e3584b2e8fcf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml @@ -13,14 +13,14 @@ - hadoop-applications-mawo + hadoop-yarn-applications-mawo org.apache.hadoop.applications.mawo 3.3.0-SNAPSHOT 4.0.0 - hadoop-applications-mawo-core + hadoop-yarn-applications-mawo-core jar Apache Hadoop YARN Application MaWo Core diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml index 3a5cd9f8d287a..5594a30fed505 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml @@ -20,7 +20,7 @@ 4.0.0 org.apache.hadoop.applications.mawo - hadoop-applications-mawo + hadoop-yarn-applications-mawo pom Apache Hadoop YARN Application MaWo From 71edfce187e78d63f2ee623fda1d77d1f3b1a7a6 Mon Sep 17 00:00:00 2001 From: avijayanhwx <14299376+avijayanhwx@users.noreply.github.com> Date: Tue, 18 Jun 2019 11:13:13 -0700 Subject: [PATCH 0228/1308] HDDS-1699. Update RocksDB version to 6.0.1 (#980) --- hadoop-hdds/common/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 31a7311b0e322..85cdbdfeed959 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -88,7 +88,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.rocksdb rocksdbjni - 5.14.2 + 6.0.1 org.apache.hadoop From 81ec90941160cc5f75370ee45af22c6b47c3c94b Mon Sep 17 00:00:00 2001 From: "Elek, Marton" Date: Tue, 18 Jun 2019 11:59:10 -0700 Subject: [PATCH 0229/1308] HDFS-14078. Admin helper fails to prettify NullPointerExceptions. Contributed by Elek, Marton. Signed-off-by: Wei-Chiu Chuang --- .../apache/hadoop/hdfs/tools/AdminHelper.java | 10 +++- .../hadoop/hdfs/tools/TestAdminHelper.java | 50 +++++++++++++++++++ 2 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestAdminHelper.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java index 391dcb6346758..9cb646b38f6f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java @@ -65,8 +65,14 @@ static DistributedFileSystem getDFS(URI uri, Configuration conf) * When it's a known error, pretty-print the error and squish the stack trace. */ static String prettifyException(Exception e) { - return e.getClass().getSimpleName() + ": " - + e.getLocalizedMessage().split("\n")[0]; + if (e.getLocalizedMessage() != null) { + return e.getClass().getSimpleName() + ": " + + e.getLocalizedMessage().split("\n")[0]; + } else if (e.getStackTrace() != null && e.getStackTrace().length > 0) { + return e.getClass().getSimpleName() + " at " + e.getStackTrace()[0]; + } else { + return e.getClass().getSimpleName(); + } } static TableListing getOptionDescriptionListing() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestAdminHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestAdminHelper.java new file mode 100644 index 0000000000000..f99ef0186ccec --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestAdminHelper.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools; + +import org.junit.Assert; +import org.junit.Test; + +/** + * Test class to test Admin Helper. + */ +public class TestAdminHelper { + + @Test + public void prettifyExceptionWithNpe() { + String pretty = AdminHelper.prettifyException(new NullPointerException()); + Assert.assertTrue( + "Prettified exception message doesn't contain the required exception " + + "message", + pretty.startsWith("NullPointerException at org.apache.hadoop.hdfs.tools" + + ".TestAdminHelper.prettifyExceptionWithNpe")); + } + + @Test + public void prettifyException() { + + String pretty = AdminHelper.prettifyException( + new IllegalArgumentException("Something is wrong", + new IllegalArgumentException("Something is illegal"))); + + Assert.assertEquals( + "IllegalArgumentException: Something is wrong", + pretty); + + } +} \ No newline at end of file From 37bd5bb34225fed254d4a2ca5e617fe025cdb279 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Tue, 18 Jun 2019 14:44:23 -0700 Subject: [PATCH 0230/1308] HDDS-1702. Optimize Ozone Recon build time (#982) --- hadoop-ozone/ozone-recon/pom.xml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hadoop-ozone/ozone-recon/pom.xml b/hadoop-ozone/ozone-recon/pom.xml index 0debaccdbd543..9672796a843e6 100644 --- a/hadoop-ozone/ozone-recon/pom.xml +++ b/hadoop-ozone/ozone-recon/pom.xml @@ -28,6 +28,14 @@ 5.1.3.RELEASE + + + src/main/resources + + **/node_modules/** + + + org.codehaus.mojo From 2dfa932818bca2abfb9f69cb973418abbdc4f45d Mon Sep 17 00:00:00 2001 From: Hanisha Koneru Date: Tue, 18 Jun 2019 16:08:48 -0700 Subject: [PATCH 0231/1308] HDDS-1684. OM should create Ratis related dirs only if ratis is enabled (#965) --- .../common/impl/TestHddsDispatcher.java | 2 +- .../hadoop/ozone/om/TestOzoneManager.java | 22 -------- .../TestOzoneManagerSnapshotProvider.java | 1 + .../apache/hadoop/ozone/om/OzoneManager.java | 51 +++++++++---------- 4 files changed, 26 insertions(+), 50 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 54dbe94c1c212..fe27eeb02d6b1 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -74,12 +74,12 @@ public void testContainerCloseActionWhenFull() throws IOException { String testDir = GenericTestUtils.getTempPath( TestHddsDispatcher.class.getSimpleName()); OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HDDS_DATANODE_DIR_KEY, testDir); DatanodeDetails dd = randomDatanodeDetails(); VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf); try { UUID scmId = UUID.randomUUID(); - conf.set(HDDS_DATANODE_DIR_KEY, testDir); ContainerSet containerSet = new ContainerSet(); DatanodeStateMachine stateMachine = Mockito.mock( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java index 62464ba2ef394..30bca7089742e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java @@ -85,7 +85,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import org.apache.ratis.util.LifeCycle; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -1365,27 +1364,6 @@ public void testGetServiceList() throws IOException { conf.get(OZONE_SCM_CLIENT_ADDRESS_KEY)), scmAddress); } - /** - * Test that OM Ratis server is started only when OZONE_OM_RATIS_ENABLE_KEY is - * set to true. - */ - @Test - public void testRatisServerOnOMInitialization() throws IOException { - // OM Ratis server should not be started when OZONE_OM_RATIS_ENABLE_KEY - // is not set to true - Assert.assertNull("OM Ratis server started though OM Ratis is disabled.", - cluster.getOzoneManager().getOmRatisServerState()); - - // Enable OM Ratis and restart OM - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - cluster.restartOzoneManager(); - - // On enabling OM Ratis, the Ratis server should be started - Assert.assertEquals("OM Ratis server did not start", - LifeCycle.State.RUNNING, - cluster.getOzoneManager().getOmRatisServerState()); - } - @Test public void testVersion() { String expectedVersion = OzoneVersionInfo.OZONE_VERSION_INFO.getVersion(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java index f5e39f70e9c99..2f7550ce69148 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java @@ -63,6 +63,7 @@ public void init() throws Exception { clusterId = UUID.randomUUID().toString(); scmId = UUID.randomUUID().toString(); conf.setBoolean(OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY, true); + conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) .setClusterId(clusterId) .setScmId(scmId) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 287c2ded96055..2f3daf37a08db 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -313,12 +313,33 @@ private OzoneManager(OzoneConfiguration conf) throws IOException, ProtobufRpcEngine.class); metadataManager = new OmMetadataManagerImpl(configuration); + + // This is a temporary check. Once fully implemented, all OM state change + // should go through Ratis - be it standalone (for non-HA) or replicated + // (for HA). + isRatisEnabled = configuration.getBoolean( + OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, + OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); startRatisServer(); startRatisClient(); - if (peerNodes != null && !peerNodes.isEmpty()) { - this.omSnapshotProvider = new OzoneManagerSnapshotProvider(configuration, - omRatisSnapshotDir, peerNodes); + if (isRatisEnabled) { + // Create Ratis storage dir + String omRatisDirectory = OmUtils.getOMRatisDirectory(configuration); + if (omRatisDirectory == null || omRatisDirectory.isEmpty()) { + throw new IllegalArgumentException(HddsConfigKeys.OZONE_METADATA_DIRS + + " must be defined."); + } + OmUtils.createOMDir(omRatisDirectory); + + // Create Ratis snapshot dir + omRatisSnapshotDir = OmUtils.createOMDir( + OmUtils.getOMRatisSnapshotDirectory(configuration)); + + if (peerNodes != null && !peerNodes.isEmpty()) { + this.omSnapshotProvider = new OzoneManagerSnapshotProvider( + configuration, omRatisSnapshotDir, peerNodes); + } } this.ratisSnapshotFile = new File(omStorage.getCurrentDir(), @@ -542,18 +563,6 @@ private void setOMNodeDetails(String serviceId, String nodeId, configuration.set(OZONE_OM_ADDRESS_KEY, NetUtils.getHostPortString(rpcAddress)); - // Create Ratis storage dir - String omRatisDirectory = OmUtils.getOMRatisDirectory(configuration); - if (omRatisDirectory == null || omRatisDirectory.isEmpty()) { - throw new IllegalArgumentException(HddsConfigKeys.OZONE_METADATA_DIRS + - " must be defined."); - } - OmUtils.createOMDir(omRatisDirectory); - - // Create Ratis snapshot dir - omRatisSnapshotDir = OmUtils.createOMDir( - OmUtils.getOMRatisSnapshotDirectory(configuration)); - // Get and set Http(s) address of local node. If base config keys are // not set, check for keys suffixed with OM serivce ID and node ID. setOMNodeSpecificConfigs(serviceId, nodeId); @@ -1253,12 +1262,6 @@ private RPC.Server getRpcServer(OzoneConfiguration conf) throws IOException { * Creates an instance of ratis server. */ private void startRatisServer() throws IOException { - // This is a temporary check. Once fully implemented, all OM state change - // should go through Ratis - be it standalone (for non-HA) or replicated - // (for HA). - isRatisEnabled = configuration.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, - OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); if (isRatisEnabled) { if (omRatisServer == null) { omRatisServer = OzoneManagerRatisServer.newOMRatisServer( @@ -1277,12 +1280,6 @@ private void startRatisServer() throws IOException { * Creates an instance of ratis client. */ private void startRatisClient() throws IOException { - // This is a temporary check. Once fully implemented, all OM state change - // should go through Ratis - be it standalone (for non-HA) or replicated - // (for HA). - isRatisEnabled = configuration.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, - OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); if (isRatisEnabled) { if (omRatisClient == null) { omRatisClient = OzoneManagerRatisClient.newOzoneManagerRatisClient( From f9ee97de98e1371a2760286070e339a1fd7c5fde Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Tue, 18 Jun 2019 21:58:37 -0700 Subject: [PATCH 0232/1308] HDFS-14398. Update HAState.java to fix typos. Contributed by Nikhil Navadiya. --- .../org/apache/hadoop/hdfs/server/namenode/ha/HAState.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java index f56974864d6a2..6684e8e2f6f64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java @@ -109,7 +109,7 @@ public abstract void enterState(final HAContext context) * that are going on. It can also be used to check any preconditions * for the state transition. * - * This method should not make any destructuve changes to the state + * This method should not make any destructive changes to the state * (eg stopping threads) since {@link #prepareToEnterState(HAContext)} * may subsequently cancel the state transition. * @param context HA context @@ -134,10 +134,10 @@ public abstract void exitState(final HAContext context) * @throws ServiceFailedException on failure to transition to new state. */ public void setState(HAContext context, HAState s) throws ServiceFailedException { - if (this == s) { // Aleady in the new state + if (this == s) { // Already in the new state return; } - throw new ServiceFailedException("Transtion from state " + this + " to " + throw new ServiceFailedException("Transition from state " + this + " to " + s + " is not allowed."); } From a1c3868c4f027adcb814b30d842e60d1f94326ea Mon Sep 17 00:00:00 2001 From: Virajith Jalaparti Date: Tue, 18 Jun 2019 22:13:07 -0700 Subject: [PATCH 0233/1308] HDFS-13287. TestINodeFile#testGetBlockType results in NPE when run alone. Contributed by Virajith Jalaparti. Signed-off-by: Wei-Chiu Chuang --- .../org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 1392f9d9eb239..4674bd7308313 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -296,6 +296,7 @@ public void testGetBlockType() { preferredBlockSize = 128*1024*1024; INodeFile inf = createINodeFile(replication, preferredBlockSize); assertEquals(inf.getBlockType(), CONTIGUOUS); + ErasureCodingPolicyManager.getInstance().init(new Configuration()); INodeFile striped = createStripedINodeFile(preferredBlockSize); assertEquals(striped.getBlockType(), STRIPED); } From 48e564f7e2f9223ba8521c36431358adb47d8bf6 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Tue, 18 Jun 2019 22:28:21 -0700 Subject: [PATCH 0234/1308] HDFS-14537. Journaled Edits Cache is not cleared when formatting the JN. Contributed by Ranith Sardar. --- .../hadoop/hdfs/qjournal/server/Journal.java | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 17c09fee84eab..f684f30c8f990 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -138,12 +138,14 @@ public class Journal implements Closeable { private final FileJournalManager fjm; - private final JournaledEditsCache cache; + private JournaledEditsCache cache; private final JournalMetrics metrics; private long lastJournalTimestamp = 0; + private Configuration conf = null; + // This variable tracks, have we tried to start journalsyncer // with nameServiceId. This will help not to start the journalsyncer // on each rpc call, if it has failed to start @@ -157,6 +159,7 @@ public class Journal implements Closeable { Journal(Configuration conf, File logDir, String journalId, StartupOption startOpt, StorageErrorReporter errorReporter) throws IOException { + this.conf = conf; storage = new JNStorage(conf, logDir, startOpt, errorReporter); this.journalId = journalId; @@ -164,13 +167,8 @@ public class Journal implements Closeable { this.fjm = storage.getJournalManager(); - if (conf.getBoolean(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY, - DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_DEFAULT)) { - this.cache = new JournaledEditsCache(conf); - } else { - this.cache = null; - } - + this.cache = createCache(); + this.metrics = JournalMetrics.create(this); EditLogFile latest = scanStorageForLatestEdits(); @@ -179,6 +177,15 @@ public class Journal implements Closeable { } } + private JournaledEditsCache createCache() { + if (conf.getBoolean(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY, + DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_DEFAULT)) { + return new JournaledEditsCache(conf); + } else { + return null; + } + } + public void setTriedJournalSyncerStartedwithnsId(boolean started) { this.triedJournalSyncerStartedwithnsId = started; } @@ -248,6 +255,7 @@ void format(NamespaceInfo nsInfo, boolean force) throws IOException { LOG.info("Formatting journal id : " + journalId + " with namespace info: " + nsInfo + " and force: " + force); storage.format(nsInfo, force); + this.cache = createCache(); refreshCachedData(); } From d3ac516665b551ff0f9b55b668e2c9fca9a3fde1 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Wed, 19 Jun 2019 06:27:02 -0700 Subject: [PATCH 0235/1308] Revert "HDFS-13287. TestINodeFile#testGetBlockType results in NPE when run alone. Contributed by Virajith Jalaparti." This reverts commit a1c3868c4f027adcb814b30d842e60d1f94326ea. --- .../org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 4674bd7308313..1392f9d9eb239 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -296,7 +296,6 @@ public void testGetBlockType() { preferredBlockSize = 128*1024*1024; INodeFile inf = createINodeFile(replication, preferredBlockSize); assertEquals(inf.getBlockType(), CONTIGUOUS); - ErasureCodingPolicyManager.getInstance().init(new Configuration()); INodeFile striped = createStripedINodeFile(preferredBlockSize); assertEquals(striped.getBlockType(), STRIPED); } From 9d6842501c88304ca24062d2463480bc7fbe5e57 Mon Sep 17 00:00:00 2001 From: supratimdeka <46919641+supratimdeka@users.noreply.github.com> Date: Wed, 19 Jun 2019 20:11:16 +0530 Subject: [PATCH 0236/1308] HDDS-1454. GC other system pause events can trigger pipeline destroy for all the nodes in the cluster. Contributed by Supratim Deka (#852) --- .../hdds/scm/node/NodeStateManager.java | 167 ++++++++++++++---- .../hadoop/hdds/scm/node/SCMNodeManager.java | 29 +++ .../hdds/scm/node/TestSCMNodeManager.java | 92 ++++++++++ 3 files changed, 258 insertions(+), 30 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index c54944bc3553e..08a68be2e033e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.node; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.conf.Configuration; @@ -43,6 +44,7 @@ import java.io.Closeable; import java.util.*; import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; @@ -116,6 +118,26 @@ private enum NodeLifeCycleEvent { */ private final long deadNodeIntervalMs; + /** + * The future is used to pause/unpause the scheduled checks. + */ + private ScheduledFuture healthCheckFuture; + + /** + * Test utility - tracks if health check has been paused (unit tests). + */ + private boolean checkPaused; + + /** + * timestamp of the latest heartbeat check process. + */ + private long lastHealthCheck; + + /** + * number of times the heart beat check was skipped. + */ + private long skippedHealthChecks; + /** * Constructs a NodeStateManager instance with the given configuration. * @@ -143,10 +165,11 @@ public NodeStateManager(Configuration conf, EventPublisher eventPublisher) { executorService = HadoopExecutors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("SCM Heartbeat Processing Thread - %d").build()); - //BUG:BUG TODO: The return value is ignored, if an exception is thrown in - // the executing funtion, it will be ignored. - executorService.schedule(this, heartbeatCheckerIntervalMs, - TimeUnit.MILLISECONDS); + + skippedHealthChecks = 0; + checkPaused = false; // accessed only from test functions + + scheduleNextHealthCheck(); } /** @@ -464,6 +487,42 @@ public Set getContainers(UUID uuid) @Override public void run() { + if (shouldSkipCheck()) { + skippedHealthChecks++; + LOG.info("Detected long delay in scheduling HB processing thread. " + + "Skipping heartbeat checks for one iteration."); + } else { + checkNodesHealth(); + } + + // we purposefully make this non-deterministic. Instead of using a + // scheduleAtFixedFrequency we will just go to sleep + // and wake up at the next rendezvous point, which is currentTime + + // heartbeatCheckerIntervalMs. This leads to the issue that we are now + // heart beating not at a fixed cadence, but clock tick + time taken to + // work. + // + // This time taken to work can skew the heartbeat processor thread. + // The reason why we don't care is because of the following reasons. + // + // 1. checkerInterval is general many magnitudes faster than datanode HB + // frequency. + // + // 2. if we have too much nodes, the SCM would be doing only HB + // processing, this could lead to SCM's CPU starvation. With this + // approach we always guarantee that HB thread sleeps for a little while. + // + // 3. It is possible that we will never finish processing the HB's in the + // thread. But that means we have a mis-configured system. We will warn + // the users by logging that information. + // + // 4. And the most important reason, heartbeats are not blocked even if + // this thread does not run, they will go into the processing queue. + scheduleNextHealthCheck(); + } + + private void checkNodesHealth() { + /* * * staleNodeDeadline healthyNodeDeadline @@ -558,41 +617,36 @@ public void run() { heartbeatCheckerIntervalMs); } - // we purposefully make this non-deterministic. Instead of using a - // scheduleAtFixedFrequency we will just go to sleep - // and wake up at the next rendezvous point, which is currentTime + - // heartbeatCheckerIntervalMs. This leads to the issue that we are now - // heart beating not at a fixed cadence, but clock tick + time taken to - // work. - // - // This time taken to work can skew the heartbeat processor thread. - // The reason why we don't care is because of the following reasons. - // - // 1. checkerInterval is general many magnitudes faster than datanode HB - // frequency. - // - // 2. if we have too much nodes, the SCM would be doing only HB - // processing, this could lead to SCM's CPU starvation. With this - // approach we always guarantee that HB thread sleeps for a little while. - // - // 3. It is possible that we will never finish processing the HB's in the - // thread. But that means we have a mis-configured system. We will warn - // the users by logging that information. - // - // 4. And the most important reason, heartbeats are not blocked even if - // this thread does not run, they will go into the processing queue. + } + + private void scheduleNextHealthCheck() { if (!Thread.currentThread().isInterrupted() && !executorService.isShutdown()) { //BUGBUG: The return future needs to checked here to make sure the // exceptions are handled correctly. - executorService.schedule(this, heartbeatCheckerIntervalMs, - TimeUnit.MILLISECONDS); + healthCheckFuture = executorService.schedule(this, + heartbeatCheckerIntervalMs, TimeUnit.MILLISECONDS); } else { - LOG.info("Current Thread is interrupted, shutting down HB processing " + + LOG.warn("Current Thread is interrupted, shutting down HB processing " + "thread for Node Manager."); } + lastHealthCheck = Time.monotonicNow(); + } + + /** + * if the time since last check exceeds the stale|dead node interval, skip. + * such long delays might be caused by a JVM pause. SCM cannot make reliable + * conclusions about datanode health in such situations. + * @return : true indicates skip HB checks + */ + private boolean shouldSkipCheck() { + + long currentTime = Time.monotonicNow(); + long minInterval = Math.min(staleNodeIntervalMs, deadNodeIntervalMs); + + return ((currentTime - lastHealthCheck) >= minInterval); } /** @@ -640,4 +694,57 @@ public void close() { Thread.currentThread().interrupt(); } } + + /** + * Test Utility : return number of times heartbeat check was skipped. + * @return : count of times HB process was skipped + */ + @VisibleForTesting + long getSkippedHealthChecks() { + return skippedHealthChecks; + } + + /** + * Test Utility : Pause the periodic node hb check. + * @return ScheduledFuture for the scheduled check that got cancelled. + */ + @VisibleForTesting + ScheduledFuture pause() { + + if (executorService.isShutdown() || checkPaused) { + return null; + } + + checkPaused = healthCheckFuture.cancel(false); + + return healthCheckFuture; + } + + /** + * Test utility : unpause the periodic node hb check. + * @return ScheduledFuture for the next scheduled check + */ + @VisibleForTesting + ScheduledFuture unpause() { + + if (executorService.isShutdown()) { + return null; + } + + if (checkPaused) { + Preconditions.checkState(((healthCheckFuture == null) + || healthCheckFuture.isCancelled() + || healthCheckFuture.isDone())); + + checkPaused = false; + /** + * We do not call scheduleNextHealthCheck because we are + * not updating the lastHealthCheck timestamp. + */ + healthCheckFuture = executorService.schedule(this, + heartbeatCheckerIntervalMs, TimeUnit.MILLISECONDS); + } + + return healthCheckFuture; + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index eaa2255cb0db3..a85271e270c4c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.node; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos; @@ -72,6 +73,7 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.ScheduledFuture; import java.util.stream.Collectors; /** @@ -580,4 +582,31 @@ private String nodeResolve(String hostname) { return null; } } + + /** + * Test utility to stop heartbeat check process. + * @return ScheduledFuture of next scheduled check that got cancelled. + */ + @VisibleForTesting + ScheduledFuture pauseHealthCheck() { + return nodeStateManager.pause(); + } + + /** + * Test utility to resume the paused heartbeat check process. + * @return ScheduledFuture of the next scheduled check + */ + @VisibleForTesting + ScheduledFuture unpauseHealthCheck() { + return nodeStateManager.unpause(); + } + + /** + * Test utility to get the count of skipped heartbeat check iterations. + * @return count of skipped heartbeat check iterations + */ + @VisibleForTesting + long getSkippedHealthChecks() { + return nodeStateManager.getSkippedHealthChecks(); + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 60fc2045b022c..ae810716dab4f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -53,6 +53,8 @@ import java.util.ArrayList; import java.util.List; import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -345,6 +347,96 @@ public void testScmDetectStaleAndDeadNode() } } + /** + * Simulate a JVM Pause by pausing the health check process + * Ensure that none of the nodes with heartbeats become Dead or Stale. + * @throws IOException + * @throws InterruptedException + * @throws AuthenticationException + */ + @Test + public void testScmHandleJvmPause() + throws IOException, InterruptedException, AuthenticationException { + final int healthCheckInterval = 200; // milliseconds + final int heartbeatInterval = 1; // seconds + final int staleNodeInterval = 3; // seconds + final int deadNodeInterval = 6; // seconds + ScheduledFuture schedFuture; + + OzoneConfiguration conf = getConf(); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, + healthCheckInterval, MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, + heartbeatInterval, SECONDS); + conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, + staleNodeInterval, SECONDS); + conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, + deadNodeInterval, SECONDS); + + try (SCMNodeManager nodeManager = createNodeManager(conf)) { + DatanodeDetails node1 = + TestUtils.createRandomDatanodeAndRegister(nodeManager); + DatanodeDetails node2 = + TestUtils.createRandomDatanodeAndRegister(nodeManager); + + nodeManager.processHeartbeat(node1); + nodeManager.processHeartbeat(node2); + + // Sleep so that heartbeat processing thread gets to run. + Thread.sleep(1000); + + //Assert all nodes are healthy. + assertEquals(2, nodeManager.getAllNodes().size()); + assertEquals(2, nodeManager.getNodeCount(HEALTHY)); + + /** + * Simulate a JVM Pause and subsequent handling in following steps: + * Step 1 : stop heartbeat check process for stale node interval + * Step 2 : resume heartbeat check + * Step 3 : wait for 1 iteration of heartbeat check thread + * Step 4 : retrieve the state of all nodes - assert all are HEALTHY + * Step 5 : heartbeat for node1 + * [TODO : what if there is scheduling delay of test thread in Step 5?] + * Step 6 : wait for some time to allow iterations of check process + * Step 7 : retrieve the state of all nodes - assert node2 is STALE + * and node1 is HEALTHY + */ + + // Step 1 : stop health check process (simulate JVM pause) + nodeManager.pauseHealthCheck(); + Thread.sleep(MILLISECONDS.convert(staleNodeInterval, SECONDS)); + + // Step 2 : resume health check + assertTrue("Unexpected, already skipped heartbeat checks", + (nodeManager.getSkippedHealthChecks() == 0)); + schedFuture = nodeManager.unpauseHealthCheck(); + + // Step 3 : wait for 1 iteration of health check + try { + schedFuture.get(); + assertTrue("We did not skip any heartbeat checks", + nodeManager.getSkippedHealthChecks() > 0); + } catch (ExecutionException e) { + assertEquals("Unexpected exception waiting for Scheduled Health Check", + 0, 1); + } + + // Step 4 : all nodes should still be HEALTHY + assertEquals(2, nodeManager.getAllNodes().size()); + assertEquals(2, nodeManager.getNodeCount(HEALTHY)); + + // Step 5 : heartbeat for node1 + nodeManager.processHeartbeat(node1); + + // Step 6 : wait for health check process to run + Thread.sleep(1000); + + // Step 7 : node2 should transition to STALE + assertEquals(1, nodeManager.getNodeCount(HEALTHY)); + assertEquals(1, nodeManager.getNodeCount(STALE)); + } + } + /** * Check for NPE when datanodeDetails is passed null for sendHeartbeat. * From 450c070a8fb61910f4cd3cf263ce85091509a336 Mon Sep 17 00:00:00 2001 From: Robert Levas Date: Wed, 19 Jun 2019 17:43:14 +0100 Subject: [PATCH 0237/1308] HADOOP-16340. ABFS driver continues to retry on IOException responses from REST operations. Contributed by Robert Levas. This makes the HttpException constructor protected rather than public, so it is possible to implement custom subclasses of this exception -exceptions which will not be retried. Change-Id: Ie8aaa23a707233c2db35948784908b6778ff3a8f --- .../hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java index 39fba83f8acf4..6204a957e395e 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java @@ -165,6 +165,8 @@ public static AzureADToken getTokenUsingRefreshToken(String clientId, * requestId and error message, it is thrown when AzureADAuthenticator * failed to get the Azure Active Directory token. */ + @InterfaceAudience.LimitedPrivate("authorization-subsystems") + @InterfaceStability.Unstable public static class HttpException extends IOException { private final int httpErrorCode; private final String requestId; @@ -191,7 +193,7 @@ public String getRequestId() { return this.requestId; } - HttpException( + protected HttpException( final int httpErrorCode, final String requestId, final String message, From f5ecc0bc080cb8a64c6d4632fc1c121f93d95c5e Mon Sep 17 00:00:00 2001 From: David Mollitor Date: Wed, 19 Jun 2019 10:00:03 -0700 Subject: [PATCH 0238/1308] HDFS-14103. Review Logging of BlockPlacementPolicyDefault. Contributed by David Mollitor. Signed-off-by: Wei-Chiu Chuang --- .../BlockPlacementPolicyDefault.java | 51 ++++++++----------- 1 file changed, 20 insertions(+), 31 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 1320c80cde3eb..b50d479c786c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -220,10 +220,8 @@ DatanodeStorageInfo[] chooseTarget(String src, return getPipeline(writer, results.toArray(new DatanodeStorageInfo[results.size()])); } catch (NotEnoughReplicasException nr) { - if (LOG.isDebugEnabled()) { - LOG.debug("Failed to choose with favored nodes (=" + favoredNodes - + "), disregard favored nodes hint and retry.", nr); - } + LOG.debug("Failed to choose with favored nodes (={}), disregard favored" + + " nodes hint and retry.", favoredNodes, nr); // Fall back to regular block placement disregarding favored nodes hint return chooseTarget(src, numOfReplicas, writer, new ArrayList(numOfReplicas), false, @@ -428,9 +426,7 @@ private Node chooseTarget(int numOfReplicas, if (storageTypes == null) { storageTypes = getRequiredStorageTypes(requiredStorageTypes); } - if (LOG.isTraceEnabled()) { - LOG.trace("storageTypes=" + storageTypes); - } + LOG.trace("storageTypes={}", storageTypes); try { if ((numOfReplicas = requiredStorageTypes.size()) == 0) { @@ -449,11 +445,8 @@ private Node chooseTarget(int numOfReplicas, + ", storagePolicy=" + storagePolicy + ", newBlock=" + newBlock + ")"; - if (LOG.isTraceEnabled()) { - LOG.trace(message, e); - } else { - LOG.warn(message + " " + e.getMessage()); - } + LOG.trace(message, e); + LOG.warn(message + " " + e.getMessage()); if (avoidStaleNodes) { // Retry chooseTarget again, this time not avoiding stale nodes. @@ -666,10 +659,9 @@ protected DatanodeStorageInfo chooseLocalRack(Node localMachine, } } - if (LOG.isDebugEnabled()) { - LOG.debug("Failed to choose from local rack (location = " + localRack - + "); the second replica is not found, retry choosing randomly", e); - } + LOG.debug("Failed to choose from local rack (location = {}); the second" + + " replica is not found, retry choosing randomly", localRack, e); + //the second replica is not found, randomly choose one from the network return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); @@ -687,12 +679,10 @@ private DatanodeStorageInfo chooseFromNextRack(Node next, try { return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); - } catch(NotEnoughReplicasException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Failed to choose from the next rack (location = " + nextRack - + "), retry choosing randomly", e); - } - //otherwise randomly choose one from the network + } catch (NotEnoughReplicasException e) { + LOG.debug("Failed to choose from the next rack (location = {}), " + + "retry choosing randomly", nextRack, e); + // otherwise randomly choose one from the network return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } @@ -790,7 +780,7 @@ protected DatanodeStorageInfo chooseRandom(int numOfReplicas, } Preconditions.checkState(excludedNodes.add(chosenNode), "chosenNode " + chosenNode + " is already in excludedNodes " + excludedNodes); - if (LOG.isDebugEnabled() && builder != null) { + if (LOG.isDebugEnabled()) { builder.append("\nNode ").append(NodeBase.getPath(chosenNode)) .append(" ["); } @@ -826,7 +816,7 @@ protected DatanodeStorageInfo chooseRandom(int numOfReplicas, } } - if (LOG.isDebugEnabled() && builder != null) { + if (LOG.isDebugEnabled()) { builder.append("\n]"); } @@ -836,7 +826,7 @@ protected DatanodeStorageInfo chooseRandom(int numOfReplicas, } if (numOfReplicas>0) { String detail = enableDebugLogging; - if (LOG.isDebugEnabled() && builder != null) { + if (LOG.isDebugEnabled()) { detail = builder.toString(); if (badTarget) { builder.setLength(0); @@ -852,7 +842,7 @@ protected DatanodeStorageInfo chooseRandom(int numOfReplicas, final HashMap reasonMap = CHOOSE_RANDOM_REASONS.get(); if (!reasonMap.isEmpty()) { - LOG.info("Not enough replicas was chosen. Reason:{}", reasonMap); + LOG.info("Not enough replicas was chosen. Reason: {}", reasonMap); } throw new NotEnoughReplicasException(detail); } @@ -1166,11 +1156,10 @@ public List chooseReplicasToDelete( } firstOne = false; if (cur == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("No excess replica can be found. excessTypes: {}." + - " moreThanOne: {}. exactlyOne: {}.", excessTypes, - moreThanOne, exactlyOne); - } + LOG.debug( + "No excess replica can be found. excessTypes: {}. " + + "moreThanOne: {}. exactlyOne: {}.", + excessTypes, moreThanOne, exactlyOne); break; } From 71ecd2e41129670b2aecdfd859b8110806b95281 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Wed, 19 Jun 2019 10:27:53 -0700 Subject: [PATCH 0239/1308] HDFS-14303. check block directory logic not correct when there is only meta file, print no meaning warn log. Contributed by qiang Liu. --- .../datanode/fsdataset/impl/FsVolumeImpl.java | 2 +- .../server/datanode/TestDirectoryScanner.java | 68 +++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 9ffced138438f..0f240cf83e19f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -1374,7 +1374,7 @@ private void compileReport(File bpFinalizedDir, File dir, if (!Block.isBlockFilename(file)) { if (isBlockMetaFile(Block.BLOCK_FILE_PREFIX, file.getName())) { long blockId = Block.getBlockId(file.getName()); - verifyFileLocation(file.getParentFile(), bpFinalizedDir, + verifyFileLocation(file, bpFinalizedDir, blockId); report.add(new ScanInfo(blockId, null, file, this)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 8ea45c45f8787..a6fb1abf00ee3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -26,6 +26,7 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; @@ -72,6 +73,9 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.Time; +import org.apache.log4j.Level; +import org.apache.log4j.SimpleLayout; +import org.apache.log4j.WriterAppender; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -394,6 +398,70 @@ public void testRetainBlockOnPersistentStorage() throws Exception { } } + /** + * test scan only meta file NOT generate wrong folder structure warn log. + */ + @Test(timeout=600000) + public void testScanDirectoryStructureWarn() throws Exception { + + //add a logger stream to check what has printed to log + ByteArrayOutputStream loggerStream = new ByteArrayOutputStream(); + org.apache.log4j.Logger rootLogger = + org.apache.log4j.Logger.getRootLogger(); + rootLogger.setLevel(Level.INFO); + WriterAppender writerAppender = + new WriterAppender(new SimpleLayout(), loggerStream); + rootLogger.addAppender(writerAppender); + + cluster = new MiniDFSCluster + .Builder(CONF) + .storageTypes(new StorageType[] { + StorageType.RAM_DISK, StorageType.DEFAULT }) + .numDataNodes(1) + .build(); + try { + cluster.waitActive(); + bpid = cluster.getNamesystem().getBlockPoolId(); + fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0)); + client = cluster.getFileSystem().getClient(); + scanner = new DirectoryScanner(fds, CONF); + scanner.setRetainDiffs(true); + FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0)); + + // Create a file file on RAM_DISK + createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH, true); + + // Ensure no difference between volumeMap and disk. + scan(1, 0, 0, 0, 0, 0); + + //delete thre block file , left the meta file alone + deleteBlockFile(); + + //scan to ensure log warn not printed + scan(1, 1, 0, 1, 0, 0, 0); + + //ensure the warn log not appear and missing block log do appear + String logContent = new String(loggerStream.toByteArray()); + String missingBlockWarn = "Deleted a metadata file" + + " for the deleted block"; + String dirStructureWarnLog = " found in invalid directory." + + " Expected directory: "; + assertFalse("directory check print meaningless warning message", + logContent.contains(dirStructureWarnLog)); + assertTrue("missing block warn log not appear", + logContent.contains(missingBlockWarn)); + LOG.info("check pass"); + + } finally { + if (scanner != null) { + scanner.shutdown(); + scanner = null; + } + cluster.shutdown(); + cluster = null; + } + } + @Test(timeout = 300000) public void testDeleteBlockOnTransientStorage() throws Exception { cluster = new MiniDFSCluster.Builder(CONF) From 5bfdf62614735e09b67d6c70a0db4e0dbd2743b2 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Wed, 19 Jun 2019 18:45:23 -0400 Subject: [PATCH 0240/1308] YARN-9631. Added ability to select JavaScript test or skip JavaScript tests for YARN application catalog. Contributed by Eric Yang (cherry picked from commit 6002b0c5c6994965d3f7231330248c093869dba2) --- .../pom.xml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml index 273379d3d0816..8e716f81ebc76 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml @@ -36,6 +36,7 @@ target/generated-sources/vendor UTF-8 false + *Spec @@ -365,6 +366,7 @@ + ${skipTests} org.openqa.selenium.phantomjs.PhantomJSDriver @@ -381,7 +383,7 @@ src/main/javascript src/test/javascript - *Spec.js + ${javascript.test}.js @@ -458,6 +460,17 @@ + + test-selector + + + test + + + + ${test} + + rest-docs From 28291a9e8ade5dc4ebcebe1c9fbe9e92535c9333 Mon Sep 17 00:00:00 2001 From: Sahil Takiar Date: Thu, 20 Jun 2019 09:41:58 +0100 Subject: [PATCH 0241/1308] HADOOP-16379: S3AInputStream.unbuffer should merge input stream stats into fs-wide stats Contributed by Sahil Takiar Change-Id: I2bcfaaea00d12c633757069402dcd0b91a5f5c05 --- .../apache/hadoop/fs/s3a/S3AInputStream.java | 10 ++- .../hadoop/fs/s3a/S3AInstrumentation.java | 85 ++++++++++++++++++- .../hadoop/fs/s3a/ITestS3AUnbuffer.java | 79 +++++++++++++++-- 3 files changed, 164 insertions(+), 10 deletions(-) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java index 6e7a2511f6749..60221263cf3a0 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java @@ -823,9 +823,17 @@ public static long validateReadahead(@Nullable Long readahead) { } } + /** + * Closes the underlying S3 stream, and merges the {@link #streamStatistics} + * instance associated with the stream. + */ @Override public synchronized void unbuffer() { - closeStream("unbuffer()", contentRangeFinish, false); + try { + closeStream("unbuffer()", contentRangeFinish, false); + } finally { + streamStatistics.merge(false); + } } @Override diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java index 2bb8f682d8d42..fd7893f1bc405 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java @@ -647,6 +647,7 @@ public final class InputStreamStatistics implements AutoCloseable { public long inputPolicy; /** This is atomic so that it can be passed as a reference. */ private final AtomicLong versionMismatches = new AtomicLong(0); + private InputStreamStatistics mergedStats; private InputStreamStatistics() { } @@ -759,7 +760,7 @@ public void readOperationCompleted(int requested, int actual) { */ @Override public void close() { - mergeInputStreamStatistics(this); + merge(true); } /** @@ -816,6 +817,88 @@ public String toString() { sb.append('}'); return sb.toString(); } + + /** + * Merge the statistics into the filesystem's instrumentation instance. + * Takes a diff between the current version of the stats and the + * version of the stats when merge was last called, and merges the diff + * into the instrumentation instance. Used to periodically merge the + * stats into the fs-wide stats. Behavior is undefined if called on a + * closed instance. + */ + void merge(boolean isClosed) { + if (mergedStats != null) { + mergeInputStreamStatistics(diff(mergedStats)); + } else { + mergeInputStreamStatistics(this); + } + // If stats are closed, no need to create another copy + if (!isClosed) { + mergedStats = copy(); + } + } + + /** + * Returns a diff between this {@link InputStreamStatistics} instance and + * the given {@link InputStreamStatistics} instance. + */ + private InputStreamStatistics diff(InputStreamStatistics inputStats) { + InputStreamStatistics diff = new InputStreamStatistics(); + diff.openOperations = openOperations - inputStats.openOperations; + diff.closeOperations = closeOperations - inputStats.closeOperations; + diff.closed = closed - inputStats.closed; + diff.aborted = aborted - inputStats.aborted; + diff.seekOperations = seekOperations - inputStats.seekOperations; + diff.readExceptions = readExceptions - inputStats.readExceptions; + diff.forwardSeekOperations = + forwardSeekOperations - inputStats.forwardSeekOperations; + diff.backwardSeekOperations = + backwardSeekOperations - inputStats.backwardSeekOperations; + diff.bytesRead = bytesRead - inputStats.bytesRead; + diff.bytesSkippedOnSeek = + bytesSkippedOnSeek - inputStats.bytesSkippedOnSeek; + diff.bytesBackwardsOnSeek = + bytesBackwardsOnSeek - inputStats.bytesBackwardsOnSeek; + diff.readOperations = readOperations - inputStats.readOperations; + diff.readFullyOperations = + readFullyOperations - inputStats.readFullyOperations; + diff.readsIncomplete = readsIncomplete - inputStats.readsIncomplete; + diff.bytesReadInClose = bytesReadInClose - inputStats.bytesReadInClose; + diff.bytesDiscardedInAbort = + bytesDiscardedInAbort - inputStats.bytesDiscardedInAbort; + diff.policySetCount = policySetCount - inputStats.policySetCount; + diff.inputPolicy = inputPolicy - inputStats.inputPolicy; + diff.versionMismatches.set(versionMismatches.longValue() - + inputStats.versionMismatches.longValue()); + return diff; + } + + /** + * Returns a new {@link InputStreamStatistics} instance with all the same + * values as this {@link InputStreamStatistics}. + */ + private InputStreamStatistics copy() { + InputStreamStatistics copy = new InputStreamStatistics(); + copy.openOperations = openOperations; + copy.closeOperations = closeOperations; + copy.closed = closed; + copy.aborted = aborted; + copy.seekOperations = seekOperations; + copy.readExceptions = readExceptions; + copy.forwardSeekOperations = forwardSeekOperations; + copy.backwardSeekOperations = backwardSeekOperations; + copy.bytesRead = bytesRead; + copy.bytesSkippedOnSeek = bytesSkippedOnSeek; + copy.bytesBackwardsOnSeek = bytesBackwardsOnSeek; + copy.readOperations = readOperations; + copy.readFullyOperations = readFullyOperations; + copy.readsIncomplete = readsIncomplete; + copy.bytesReadInClose = bytesReadInClose; + copy.bytesDiscardedInAbort = bytesDiscardedInAbort; + copy.policySetCount = policySetCount; + copy.inputPolicy = inputPolicy; + return copy; + } } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUnbuffer.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUnbuffer.java index b04b9da486379..2ba3fd7a65cde 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUnbuffer.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUnbuffer.java @@ -19,14 +19,16 @@ package org.apache.hadoop.fs.s3a; import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.io.IOUtils; import org.junit.Test; import java.io.IOException; +import static org.apache.hadoop.fs.s3a.Statistic.STREAM_SEEK_BYTES_READ; + /** * Integration test for calling * {@link org.apache.hadoop.fs.CanUnbuffer#unbuffer} on {@link S3AInputStream}. @@ -38,20 +40,27 @@ */ public class ITestS3AUnbuffer extends AbstractS3ATestBase { + private Path dest; + + @Override + public void setup() throws Exception { + super.setup(); + dest = path("ITestS3AUnbuffer"); + describe("ITestS3AUnbuffer"); + + byte[] data = ContractTestUtils.dataset(16, 'a', 26); + ContractTestUtils.writeDataset(getFileSystem(), dest, data, data.length, + 16, true); + } + @Test public void testUnbuffer() throws IOException { - // Setup test file - Path dest = path("testUnbuffer"); describe("testUnbuffer"); - try (FSDataOutputStream outputStream = getFileSystem().create(dest, true)) { - byte[] data = ContractTestUtils.dataset(16, 'a', 26); - outputStream.write(data); - } // Open file, read half the data, and then call unbuffer try (FSDataInputStream inputStream = getFileSystem().open(dest)) { assertTrue(inputStream.getWrappedStream() instanceof S3AInputStream); - assertEquals(8, inputStream.read(new byte[8])); + readAndAssertBytesRead(inputStream, 8); assertTrue(isObjectStreamOpen(inputStream)); inputStream.unbuffer(); @@ -60,7 +69,61 @@ public void testUnbuffer() throws IOException { } } + /** + * Test that calling {@link S3AInputStream#unbuffer()} merges a stream's + * {@link org.apache.hadoop.fs.s3a.S3AInstrumentation.InputStreamStatistics} + * into the {@link S3AFileSystem}'s {@link S3AInstrumentation} instance. + */ + @Test + public void testUnbufferStreamStatistics() throws IOException { + describe("testUnbufferStreamStatistics"); + + // Validate bytesRead is updated correctly + S3ATestUtils.MetricDiff bytesRead = new S3ATestUtils.MetricDiff( + getFileSystem(), STREAM_SEEK_BYTES_READ); + + // Open file, read half the data, and then call unbuffer + FSDataInputStream inputStream = null; + try { + inputStream = getFileSystem().open(dest); + + readAndAssertBytesRead(inputStream, 8); + inputStream.unbuffer(); + + // Validate that calling unbuffer updates the input stream statistics + bytesRead.assertDiffEquals(8); + + // Validate that calling unbuffer twice in a row updates the statistics + // correctly + readAndAssertBytesRead(inputStream, 4); + inputStream.unbuffer(); + bytesRead.assertDiffEquals(12); + } finally { + IOUtils.closeStream(inputStream); + } + + // Validate that closing the file does not further change the statistics + bytesRead.assertDiffEquals(12); + + // Validate that the input stream stats are correct when the file is closed + assertEquals("S3AInputStream statistics were not updated properly", 12, + ((S3AInputStream) inputStream.getWrappedStream()) + .getS3AStreamStatistics().bytesRead); + } + private boolean isObjectStreamOpen(FSDataInputStream inputStream) { return ((S3AInputStream) inputStream.getWrappedStream()).isObjectStreamOpen(); } + + /** + * Read the specified number of bytes from the given + * {@link FSDataInputStream} and assert that + * {@link FSDataInputStream#read(byte[])} read the specified number of bytes. + */ + private static void readAndAssertBytesRead(FSDataInputStream inputStream, + int bytesToRead) throws IOException { + assertEquals("S3AInputStream#read did not read the correct number of " + + "bytes", bytesToRead, + inputStream.read(new byte[bytesToRead])); + } } From e02eb24e0a9139418120027b694492e0738df20a Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Thu, 20 Jun 2019 09:56:40 +0100 Subject: [PATCH 0242/1308] HADOOP-15183. S3Guard store becomes inconsistent after partial failure of rename. Contributed by Steve Loughran. Change-Id: I825b0bc36be960475d2d259b1cdab45ae1bb78eb --- .../hadoop/fs/impl/FunctionsRaisingIOE.java | 69 ++ .../hadoop/fs/impl/FutureIOSupport.java | 48 +- .../src/main/resources/core-default.xml | 31 +- .../contract/AbstractContractRenameTest.java | 2 +- hadoop-tools/hadoop-aws/pom.xml | 5 + .../org/apache/hadoop/fs/s3a/Constants.java | 28 +- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 685 +++++++++----- .../apache/hadoop/fs/s3a/S3AInputStream.java | 9 +- .../hadoop/fs/s3a/S3AInstrumentation.java | 31 +- .../org/apache/hadoop/fs/s3a/S3AUtils.java | 45 +- .../hadoop/fs/s3a/S3ObjectAttributes.java | 52 +- .../org/apache/hadoop/fs/s3a/Statistic.java | 11 + .../hadoop/fs/s3a/WriteOperationHelper.java | 62 +- .../fs/s3a/commit/AbstractS3ACommitter.java | 42 +- .../fs/s3a/commit/CommitOperations.java | 155 +++- .../commit/magic/MagicS3GuardCommitter.java | 7 +- .../s3a/commit/staging/StagingCommitter.java | 12 +- .../fs/s3a/impl/AbstractStoreOperation.java | 49 + .../hadoop/fs/s3a/impl/CallableSupplier.java | 126 +++ .../hadoop/fs/s3a/impl/ContextAccessors.java | 74 ++ .../hadoop/fs/s3a/impl/InternalConstants.java | 60 ++ .../fs/s3a/impl/MultiObjectDeleteSupport.java | 244 +++++ .../hadoop/fs/s3a/impl/RenameOperation.java | 634 +++++++++++++ .../hadoop/fs/s3a/impl/StoreContext.java | 335 +++++++ .../fs/s3a/s3guard/BulkOperationState.java | 82 ++ .../s3guard/DelayedUpdateRenameTracker.java | 192 ++++ .../fs/s3a/s3guard/DynamoDBMetadataStore.java | 688 ++++++++++++-- .../fs/s3a/s3guard/LocalMetadataStore.java | 79 +- .../hadoop/fs/s3a/s3guard/MetadataStore.java | 89 +- .../fs/s3a/s3guard/NullMetadataStore.java | 57 +- .../PathMetadataDynamoDBTranslation.java | 22 +- .../fs/s3a/s3guard/PathOrderComparators.java | 133 +++ .../s3a/s3guard/ProgressiveRenameTracker.java | 252 +++++ .../hadoop/fs/s3a/s3guard/RenameTracker.java | 275 ++++++ .../apache/hadoop/fs/s3a/s3guard/S3Guard.java | 197 +++- .../hadoop/fs/s3a/s3guard/S3GuardTool.java | 37 +- .../s3guard/TableDeleteTimeoutException.java | 34 + .../site/markdown/tools/hadoop-aws/s3guard.md | 12 + .../contract/s3a/ITestS3AContractRename.java | 71 +- .../fs/s3a/ITestS3AFailureHandling.java | 102 +- .../ITestS3AMetadataPersistenceException.java | 10 +- .../fs/s3a/ITestS3GuardListConsistency.java | 13 +- .../hadoop/fs/s3a/MockS3AFileSystem.java | 4 +- .../fs/s3a/TestStreamChangeTracker.java | 7 +- .../hadoop/fs/s3a/auth/ITestAssumeRole.java | 219 +---- .../hadoop/fs/s3a/auth/RoleTestUtils.java | 46 +- .../fs/s3a/commit/AbstractCommitITest.java | 2 + .../fs/s3a/commit/AbstractITCommitMRJob.java | 4 + .../s3a/commit/AbstractYarnClusterITest.java | 8 +- .../fs/s3a/commit/ITestCommitOperations.java | 127 ++- .../commit/magic/ITestMagicCommitMRJob.java | 26 +- .../magic/ITestS3AHugeMagicCommits.java | 9 +- .../ITestDirectoryCommitMRJob.java | 2 +- .../ITestPartitionCommitMRJob.java | 2 +- .../integration/ITestStagingCommitMRJob.java | 2 +- .../ITestStagingCommitMRJobBadDest.java | 2 +- .../terasort/AbstractCommitTerasortIT.java | 5 + .../s3a/impl/ITestPartialRenamesDeletes.java | 871 ++++++++++++++++++ .../s3a/impl/TestPartialDeleteFailures.java | 393 ++++++++ .../s3guard/AbstractS3GuardToolTestBase.java | 44 +- .../s3guard/ITestDynamoDBMetadataStore.java | 631 ++++++++++--- .../ITestDynamoDBMetadataStoreScale.java | 227 +++-- .../s3a/s3guard/ITestS3GuardToolDynamoDB.java | 5 +- .../fs/s3a/s3guard/ITestS3GuardToolLocal.java | 3 +- .../fs/s3a/s3guard/MetadataStoreTestBase.java | 246 +++-- .../s3guard/TestDynamoDBMiscOperations.java | 69 ++ .../s3a/s3guard/TestPathOrderComparators.java | 197 ++++ .../hadoop/fs/s3a/s3guard/TestS3Guard.java | 12 +- .../fs/s3a/s3guard/ThrottleTracker.java | 134 +++ .../AbstractITestS3AMetadataStoreScale.java | 19 +- .../hadoop/fs/s3a/test/ExtraAssertions.java | 138 +++ 71 files changed, 7564 insertions(+), 1051 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FunctionsRaisingIOE.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AbstractStoreOperation.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CallableSupplier.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MultiObjectDeleteSupport.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/BulkOperationState.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DelayedUpdateRenameTracker.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathOrderComparators.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ProgressiveRenameTracker.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/RenameTracker.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/TableDeleteTimeoutException.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathOrderComparators.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ThrottleTracker.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/ExtraAssertions.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FunctionsRaisingIOE.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FunctionsRaisingIOE.java new file mode 100644 index 0000000000000..7bbb34622647d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FunctionsRaisingIOE.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.impl; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Evolving support for functional programming/lambda-expressions. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class FunctionsRaisingIOE { + + private FunctionsRaisingIOE() { + } + + /** + * Function of arity 1 which may raise an IOException. + * @param type of arg1 + * @param type of return value. + */ + @FunctionalInterface + public interface FunctionRaisingIOE { + + R apply(T t) throws IOException; + } + + /** + * Function of arity 2 which may raise an IOException. + * @param type of arg1 + * @param type of arg2 + * @param type of return value. + */ + @FunctionalInterface + public interface BiFunctionRaisingIOE { + + R apply(T t, U u) throws IOException; + } + + /** + * This is a callable which only raises an IOException. + * @param return type + */ + @FunctionalInterface + public interface CallableRaisingIOE { + + R apply() throws IOException; + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java index 9d5f2bf4b6ed1..9fe402366c5df 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.InterruptedIOException; import java.util.Map; +import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; @@ -108,20 +109,55 @@ public static T awaitFuture(final Future future, */ public static T raiseInnerCause(final ExecutionException e) throws IOException { + throw unwrapInnerException(e); + } + + /** + * Extract the cause of a completion failure and rethrow it if an IOE + * or RTE. + * @param e exception. + * @param type of return value. + * @return nothing, ever. + * @throws IOException either the inner IOException, or a wrapper around + * any non-Runtime-Exception + * @throws RuntimeException if that is the inner cause. + */ + public static T raiseInnerCause(final CompletionException e) + throws IOException { + throw unwrapInnerException(e); + } + + /** + * From the inner cause of an execution exception, extract the inner cause. + * If it is an RTE: throw immediately. + * If it is an IOE: Return. + * If it is a WrappedIOException: Unwrap and return + * Else: create a new IOException. + * + * Recursively handles wrapped Execution and Completion Exceptions in + * case something very complicated has happened. + * @param e exception. + * @return an IOException extracted or built from the cause. + * @throws RuntimeException if that is the inner cause. + */ + private static IOException unwrapInnerException(final Throwable e) { Throwable cause = e.getCause(); if (cause instanceof IOException) { - throw (IOException) cause; + return (IOException) cause; } else if (cause instanceof WrappedIOException){ - throw ((WrappedIOException) cause).getCause(); + return ((WrappedIOException) cause).getCause(); + } else if (cause instanceof CompletionException){ + return unwrapInnerException(cause); + } else if (cause instanceof ExecutionException){ + return unwrapInnerException(cause); } else if (cause instanceof RuntimeException){ throw (RuntimeException) cause; } else if (cause != null) { // other type: wrap with a new IOE - throw new IOException(cause); + return new IOException(cause); } else { - // this only happens if somebody deliberately raises - // an ExecutionException - throw new IOException(e); + // this only happens if there was no cause. + return new IOException(e); } } diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 7ffc2adb461a6..4e22e0a8eb3d9 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -1213,8 +1213,12 @@ fs.s3a.connection.maximum - 15 - Controls the maximum number of simultaneous connections to S3. + 48 + Controls the maximum number of simultaneous connections to S3. + This must be bigger than the value of fs.s3a.threads.max so as to stop + threads being blocked waiting for new HTTPS connections. + Why not equal? The AWS SDK transfer manager also uses these connections. + @@ -1312,7 +1316,7 @@ fs.s3a.threads.max - 10 + 64 The total number of threads available in the filesystem for data uploads *or any other queued filesystem operation*. @@ -1326,8 +1330,25 @@ fs.s3a.max.total.tasks - 5 - The number of operations which can be queued for execution + 32 + The number of operations which can be queued for execution. + This is in addition to the number of active threads in fs.s3a.threads.max. + + + + + fs.s3a.executor.capacity + 16 + The maximum number of submitted tasks which is a single + operation (e.g. rename(), delete()) may submit simultaneously for + execution -excluding the IO-heavy block uploads, whose capacity + is set in "fs.s3a.fast.upload.active.blocks" + + All tasks are submitted to the shared thread pool whose size is + set in "fs.s3a.threads.max"; the value of capacity should be less than that + of the thread pool itself, as the goal is to stop a single operation + from overloading that thread pool. + diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java index 5b76a753de170..2751294beb92c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java @@ -268,7 +268,7 @@ public void testRenamePopulatesFileAncestors() throws IOException { * @param dst the destination root to move * @param nestedPath the nested path to move */ - private void validateAncestorsMoved(Path src, Path dst, String nestedPath) + protected void validateAncestorsMoved(Path src, Path dst, String nestedPath) throws IOException { assertIsDirectory(dst); assertPathDoesNotExist("src path should not exist", path(src + nestedPath)); diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index 9dc0acc2d2d96..dbe593d437670 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -417,6 +417,11 @@ aws-java-sdk-bundle compile + + org.assertj + assertj-core + test + junit junit diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index 7334506367a1e..7dc38db6aa534 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -138,9 +138,15 @@ private Constants() { public static final String ASSUMED_ROLE_CREDENTIALS_DEFAULT = SimpleAWSCredentialsProvider.NAME; + + // the maximum number of tasks cached if all threads are already uploading + public static final String MAX_TOTAL_TASKS = "fs.s3a.max.total.tasks"; + + public static final int DEFAULT_MAX_TOTAL_TASKS = 32; + // number of simultaneous connections to s3 public static final String MAXIMUM_CONNECTIONS = "fs.s3a.connection.maximum"; - public static final int DEFAULT_MAXIMUM_CONNECTIONS = 15; + public static final int DEFAULT_MAXIMUM_CONNECTIONS = 48; // connect to s3 over ssl? public static final String SECURE_CONNECTIONS = @@ -194,10 +200,6 @@ private Constants() { public static final String KEEPALIVE_TIME = "fs.s3a.threads.keepalivetime"; public static final int DEFAULT_KEEPALIVE_TIME = 60; - // the maximum number of tasks cached if all threads are already uploading - public static final String MAX_TOTAL_TASKS = "fs.s3a.max.total.tasks"; - public static final int DEFAULT_MAX_TOTAL_TASKS = 5; - // size of each of or multipart pieces in bytes public static final String MULTIPART_SIZE = "fs.s3a.multipart.size"; public static final long DEFAULT_MULTIPART_SIZE = 104857600; // 100 MB @@ -283,6 +285,22 @@ private Constants() { @InterfaceStability.Unstable public static final int DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS = 4; + /** + * The capacity of executor queues for operations other than block + * upload, where {@link #FAST_UPLOAD_ACTIVE_BLOCKS} is used instead. + * This should be less than {@link #MAX_THREADS} for fair + * submission. + * Value: {@value}. + */ + public static final String EXECUTOR_CAPACITY = "fs.s3a.executor.capacity"; + + /** + * The capacity of executor queues for operations other than block + * upload, where {@link #FAST_UPLOAD_ACTIVE_BLOCKS} is used instead. + * Value: {@value} + */ + public static final int DEFAULT_EXECUTOR_CAPACITY = 16; + // Private | PublicRead | PublicReadWrite | AuthenticatedRead | // LogDeliveryWrite | BucketOwnerRead | BucketOwnerFullControl public static final String CANNED_ACL = "fs.s3a.acl.default"; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 4bd58d5136860..874e2db952703 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -27,12 +27,14 @@ import java.nio.file.AccessDeniedException; import java.text.DateFormat; import java.text.SimpleDateFormat; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.EnumSet; -import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -84,6 +86,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.lang3.tuple.Triple; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -91,8 +95,15 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy; +import org.apache.hadoop.fs.s3a.impl.ContextAccessors; import org.apache.hadoop.fs.s3a.impl.CopyOutcome; +import org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport; +import org.apache.hadoop.fs.s3a.impl.RenameOperation; +import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; import org.apache.hadoop.fs.s3a.select.InternalSelectConstants; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.LambdaUtils; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; @@ -203,6 +214,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities, private TransferManager transfers; private ListeningExecutorService boundedThreadPool; private ExecutorService unboundedThreadPool; + private int executorCapacity; private long multiPartThreshold; public static final Logger LOG = LoggerFactory.getLogger(S3AFileSystem.class); private static final Logger PROGRESS = @@ -380,6 +392,9 @@ public void initialize(URI name, Configuration originalConf) LOG.debug("Using S3ABlockOutputStream with buffer = {}; block={};" + " queue limit={}", blockOutputBuffer, partSize, blockOutputActiveBlocks); + long authDirTtl = conf.getTimeDuration(METADATASTORE_METADATA_TTL, + DEFAULT_METADATASTORE_METADATA_TTL, TimeUnit.MILLISECONDS); + ttlTimeProvider = new S3Guard.TtlTimeProvider(authDirTtl); setMetadataStore(S3Guard.getMetadataStore(this)); allowAuthoritative = conf.getBoolean(METADATASTORE_AUTHORITATIVE, @@ -389,17 +404,18 @@ public void initialize(URI name, Configuration originalConf) getMetadataStore(), allowAuthoritative); } initMultipartUploads(conf); - if (hasMetadataStore()) { - long authDirTtl = conf.getTimeDuration(METADATASTORE_METADATA_TTL, - DEFAULT_METADATASTORE_METADATA_TTL, TimeUnit.MILLISECONDS); - ttlTimeProvider = new S3Guard.TtlTimeProvider(authDirTtl); - } } catch (AmazonClientException e) { throw translateException("initializing ", new Path(name), e); } } + /** + * Initialize the thread pool. + * This must be re-invoked after replacing the S3Client during test + * runs. + * @param conf configuration. + */ private void initThreadPools(Configuration conf) { int maxThreads = conf.getInt(MAX_THREADS, DEFAULT_MAX_THREADS); if (maxThreads < 2) { @@ -418,9 +434,11 @@ private void initThreadPools(Configuration conf) { unboundedThreadPool = new ThreadPoolExecutor( maxThreads, Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS, - new LinkedBlockingQueue(), + new LinkedBlockingQueue<>(), BlockingThreadPoolExecutorService.newDaemonThreadFactory( "s3a-transfer-unbounded")); + executorCapacity = intOption(conf, + EXECUTOR_CAPACITY, DEFAULT_EXECUTOR_CAPACITY, 1); } /** @@ -689,6 +707,7 @@ public String getBucketLocation() throws IOException { * @return the region in which a bucket is located * @throws IOException on any failure. */ + @VisibleForTesting @Retries.RetryTranslated public String getBucketLocation(String bucketName) throws IOException { return invoker.retry("getBucketLocation()", bucketName, true, @@ -733,21 +752,29 @@ public S3AEncryptionMethods getServerSideEncryptionAlgorithm() { /** * Demand create the directory allocator, then create a temporary file. + * This does not mark the file for deletion when a process exits. * {@link LocalDirAllocator#createTmpFileForWrite(String, long, Configuration)}. - * @param pathStr prefix for the temporary file - * @param size the size of the file that is going to be written - * @param conf the Configuration object - * @return a unique temporary file - * @throws IOException IO problems + * @param pathStr prefix for the temporary file + * @param size the size of the file that is going to be written + * @param conf the Configuration object + * @return a unique temporary file + * @throws IOException IO problems */ - synchronized File createTmpFileForWrite(String pathStr, long size, + File createTmpFileForWrite(String pathStr, long size, Configuration conf) throws IOException { if (directoryAllocator == null) { - String bufferDir = conf.get(BUFFER_DIR) != null - ? BUFFER_DIR : HADOOP_TMP_DIR; - directoryAllocator = new LocalDirAllocator(bufferDir); + synchronized (this) { + String bufferDir = conf.get(BUFFER_DIR) != null + ? BUFFER_DIR : HADOOP_TMP_DIR; + directoryAllocator = new LocalDirAllocator(bufferDir); + } } - return directoryAllocator.createTmpFileForWrite(pathStr, size, conf); + Path path = directoryAllocator.getLocalPathForWrite(pathStr, + size, conf); + File dir = new File(path.getParent().toUri().getPath()); + String prefix = path.getName(); + // create a temp file on this directory + return File.createTempFile(prefix, null, dir); } /** @@ -929,11 +956,7 @@ private FSDataInputStream open( return new FSDataInputStream( new S3AInputStream( readContext, - createObjectAttributes( - path, - fileStatus.getETag(), - fileStatus.getVersionId()), - fileStatus.getLen(), + createObjectAttributes(fileStatus), s3)); } @@ -963,22 +986,40 @@ private S3AReadOpContext createReadContext( } /** - * Create the attributes of an object for a get/select request. + * Create the attributes of an object for subsequent use. * @param f path path of the request. * @param eTag the eTag of the S3 object * @param versionId S3 object version ID + * @param len length of the file * @return attributes to use when building the query. */ private S3ObjectAttributes createObjectAttributes( final Path f, final String eTag, - final String versionId) { + final String versionId, + final long len) { return new S3ObjectAttributes(bucket, + f, pathToKey(f), getServerSideEncryptionAlgorithm(), encryptionSecrets.getEncryptionKey(), eTag, - versionId); + versionId, + len); + } + + /** + * Create the attributes of an object for subsequent use. + * @param fileStatus file status to build from. + * @return attributes to use when building the query. + */ + private S3ObjectAttributes createObjectAttributes( + final S3AFileStatus fileStatus) { + return createObjectAttributes( + fileStatus.getPath(), + fileStatus.getETag(), + fileStatus.getVersionId(), + fileStatus.getLen()); } /** @@ -1117,9 +1158,13 @@ public FSDataOutputStream append(Path f, int bufferSize, * @throws IOException on IO failure * @return true if rename is successful */ + @Retries.RetryTranslated public boolean rename(Path src, Path dst) throws IOException { - try { - return innerRename(src, dst); + try (DurationInfo ignored = new DurationInfo(LOG, false, + "rename(%s, %s", src, dst)) { + long bytesCopied = innerRename(src, dst); + LOG.debug("Copied {} bytes", bytesCopied); + return true; } catch (AmazonClientException e) { throw translateException("rename(" + src +", " + dst + ")", src, e); } catch (RenameFailedException e) { @@ -1132,33 +1177,22 @@ public boolean rename(Path src, Path dst) throws IOException { } /** - * The inner rename operation. See {@link #rename(Path, Path)} for - * the description of the operation. - * This operation throws an exception on any failure which needs to be - * reported and downgraded to a failure. - * Retries: retry translated, assuming all operations it is called do - * so. For safely, consider catch and handle AmazonClientException - * because this is such a complex method there's a risk it could surface. - * @param source path to be renamed - * @param dest new path after rename + * Validate the rename parameters and status of the filesystem; + * returns the source and any destination File Status. + * @param src qualified path to be renamed + * @param dst qualified path after rename + * @return the source and (possibly null) destination status entries. * @throws RenameFailedException if some criteria for a state changing * rename was not met. This means work didn't happen; it's not something * which is reported upstream to the FileSystem APIs, for which the semantics * of "false" are pretty vague. * @throws FileNotFoundException there's no source file. * @throws IOException on IO failure. - * @throws AmazonClientException on failures inside the AWS SDK */ - @Retries.RetryMixed - private boolean innerRename(Path source, Path dest) - throws RenameFailedException, FileNotFoundException, IOException, - AmazonClientException { - Path src = qualify(source); - Path dst = qualify(dest); - - LOG.debug("Rename path {} to {}", src, dst); - entryPoint(INVOCATION_RENAME); - + @Retries.RetryTranslated + private Pair initiateRename( + final Path src, + final Path dst) throws IOException { String srcKey = pathToKey(src); String dstKey = pathToKey(dst); @@ -1227,131 +1261,126 @@ private boolean innerRename(Path source, Path dest) } } } + return Pair.of(srcStatus, dstStatus); + } - // If we have a MetadataStore, track deletions/creations. - Collection srcPaths = null; - List dstMetas = null; - if (hasMetadataStore()) { - srcPaths = new HashSet<>(); // srcPaths need fast look up before put - dstMetas = new ArrayList<>(); - } - // TODO S3Guard HADOOP-13761: retries when source paths are not visible yet - // TODO S3Guard: performance: mark destination dirs as authoritative - - // Ok! Time to start - if (srcStatus.isFile()) { - LOG.debug("rename: renaming file {} to {}", src, dst); - long length = srcStatus.getLen(); - S3ObjectAttributes objectAttributes = - createObjectAttributes(srcStatus.getPath(), - srcStatus.getETag(), srcStatus.getVersionId()); - S3AReadOpContext readContext = createReadContext(srcStatus, inputPolicy, - changeDetectionPolicy, readAhead); - if (dstStatus != null && dstStatus.isDirectory()) { - String newDstKey = maybeAddTrailingSlash(dstKey); - String filename = - srcKey.substring(pathToKey(src.getParent()).length()+1); - newDstKey = newDstKey + filename; - CopyResult copyResult = copyFile(srcKey, newDstKey, length, - objectAttributes, readContext); - S3Guard.addMoveFile(metadataStore, srcPaths, dstMetas, src, - keyToQualifiedPath(newDstKey), length, getDefaultBlockSize(dst), - username, copyResult.getETag(), copyResult.getVersionId()); - } else { - CopyResult copyResult = copyFile(srcKey, dstKey, srcStatus.getLen(), - objectAttributes, readContext); - S3Guard.addMoveFile(metadataStore, srcPaths, dstMetas, src, dst, - length, getDefaultBlockSize(dst), username, - copyResult.getETag(), copyResult.getVersionId()); - } - innerDelete(srcStatus, false); - } else { - LOG.debug("rename: renaming directory {} to {}", src, dst); + /** + * The inner rename operation. See {@link #rename(Path, Path)} for + * the description of the operation. + * This operation throws an exception on any failure which needs to be + * reported and downgraded to a failure. + * Retries: retry translated, assuming all operations it is called do + * so. For safely, consider catch and handle AmazonClientException + * because this is such a complex method there's a risk it could surface. + * @param source path to be renamed + * @param dest new path after rename + * @throws RenameFailedException if some criteria for a state changing + * rename was not met. This means work didn't happen; it's not something + * which is reported upstream to the FileSystem APIs, for which the semantics + * of "false" are pretty vague. + * @return the number of bytes copied. + * @throws FileNotFoundException there's no source file. + * @throws IOException on IO failure. + * @throws AmazonClientException on failures inside the AWS SDK + */ + @Retries.RetryMixed + private long innerRename(Path source, Path dest) + throws RenameFailedException, FileNotFoundException, IOException, + AmazonClientException { + Path src = qualify(source); + Path dst = qualify(dest); - // This is a directory to directory copy - dstKey = maybeAddTrailingSlash(dstKey); - srcKey = maybeAddTrailingSlash(srcKey); + LOG.debug("Rename path {} to {}", src, dst); + entryPoint(INVOCATION_RENAME); - //Verify dest is not a child of the source directory - if (dstKey.startsWith(srcKey)) { - throw new RenameFailedException(srcKey, dstKey, - "cannot rename a directory to a subdirectory of itself "); - } + String srcKey = pathToKey(src); + String dstKey = pathToKey(dst); - List keysToDelete = new ArrayList<>(); - if (dstStatus != null && dstStatus.isEmptyDirectory() == Tristate.TRUE) { - // delete unnecessary fake directory. - keysToDelete.add(new DeleteObjectsRequest.KeyVersion(dstKey)); - } + Pair p = initiateRename(src, dst); - Path parentPath = keyToQualifiedPath(srcKey); - RemoteIterator iterator = - listFilesAndEmptyDirectories(parentPath, true); - while (iterator.hasNext()) { - S3ALocatedFileStatus status = iterator.next(); - long length = status.getLen(); - String key = pathToKey(status.getPath()); - if (status.isDirectory() && !key.endsWith("/")) { - key += "/"; - } - keysToDelete - .add(new DeleteObjectsRequest.KeyVersion(key)); - String newDstKey = - dstKey + key.substring(srcKey.length()); - S3ObjectAttributes objectAttributes = - createObjectAttributes(status.getPath(), - status.getETag(), status.getVersionId()); - S3AReadOpContext readContext = createReadContext(status, inputPolicy, - changeDetectionPolicy, readAhead); - CopyResult copyResult = copyFile(key, newDstKey, length, - objectAttributes, readContext); - - if (hasMetadataStore()) { - // with a metadata store, the object entries need to be updated, - // including, potentially, the ancestors - Path childSrc = keyToQualifiedPath(key); - Path childDst = keyToQualifiedPath(newDstKey); - if (objectRepresentsDirectory(key, length)) { - S3Guard.addMoveDir(metadataStore, srcPaths, dstMetas, childSrc, - childDst, username); - } else { - S3Guard.addMoveFile(metadataStore, srcPaths, dstMetas, childSrc, - childDst, length, getDefaultBlockSize(childDst), username, - copyResult.getETag(), copyResult.getVersionId()); - } - // Ancestor directories may not be listed, so we explicitly add them - S3Guard.addMoveAncestors(metadataStore, srcPaths, dstMetas, - keyToQualifiedPath(srcKey), childSrc, childDst, username); - } + // Initiate the rename. + // this will call back into this class via the rename callbacks + // and interact directly with any metastore. + RenameOperation renameOperation = new RenameOperation( + createStoreContext(), + src, srcKey, p.getLeft(), + dst, dstKey, p.getRight(), + new RenameOperationCallbacksImpl()); + return renameOperation.executeRename(); + } - if (keysToDelete.size() == MAX_ENTRIES_TO_DELETE) { - removeKeys(keysToDelete, true, false); - } - } - if (!keysToDelete.isEmpty()) { - removeKeys(keysToDelete, false, false); - } + /** + * All the callbacks made by the rename operation of the filesystem. + * This separation allows the operation to be factored out and + * still avoid knowledge of the S3AFilesystem implementation. + */ + private class RenameOperationCallbacksImpl implements + RenameOperation.RenameOperationCallbacks { - // We moved all the children, now move the top-level dir - // Empty directory should have been added as the object summary - if (hasMetadataStore() - && srcPaths != null - && !srcPaths.contains(src)) { - LOG.debug("To move the non-empty top-level dir src={} and dst={}", - src, dst); - S3Guard.addMoveDir(metadataStore, srcPaths, dstMetas, src, dst, - username); - } + @Override + public S3ObjectAttributes createObjectAttributes(final Path path, + final String eTag, + final String versionId, + final long len) { + return S3AFileSystem.this.createObjectAttributes(path, eTag, versionId, + len); } - metadataStore.move(srcPaths, dstMetas, ttlTimeProvider); + @Override + public S3ObjectAttributes createObjectAttributes(final S3AFileStatus fileStatus) { + return S3AFileSystem.this.createObjectAttributes(fileStatus); + } - if (!src.getParent().equals(dst.getParent())) { - LOG.debug("source & dest parents are different; fix up dir markers"); - deleteUnnecessaryFakeDirectories(dst.getParent()); - maybeCreateFakeParentDirectory(src); + @Override + public S3AReadOpContext createReadContext(final FileStatus fileStatus) { + return S3AFileSystem.this.createReadContext(fileStatus, + inputPolicy, + changeDetectionPolicy, readAhead); + } + + @Override + public void deleteObjectAtPath(final Path path, + final String key, + final boolean isFile) + throws IOException { + S3AFileSystem.this.deleteObjectAtPath(path, key, isFile); + } + + @Override + @Retries.RetryTranslated + public RemoteIterator listFilesAndEmptyDirectories( + final Path path) throws IOException { + return S3AFileSystem.this.listFilesAndEmptyDirectories(path, true); + } + + @Override + public CopyResult copyFile(final String srcKey, + final String destKey, + final S3ObjectAttributes srcAttributes, + final S3AReadOpContext readContext) throws IOException { + return S3AFileSystem.this.copyFile(srcKey, destKey, + srcAttributes.getLen(), srcAttributes, readContext); + } + + @Override + public void removeKeys(final List keysToDelete, + final boolean deleteFakeDir, + final List undeletedObjectsOnFailure) + throws MultiObjectDeleteException, AmazonClientException, IOException { + S3AFileSystem.this.removeKeys(keysToDelete, deleteFakeDir, + undeletedObjectsOnFailure); + } + + @Override + public void finishRename(final Path sourceRenamed, final Path destCreated) + throws IOException { + Path destParent = destCreated.getParent(); + if (!sourceRenamed.getParent().equals(destParent)) { + LOG.debug("source & dest parents are different; fix up dir markers"); + deleteUnnecessaryFakeDirectories(destParent); + maybeCreateFakeParentDirectory(sourceRenamed); + } } - return true; } /** @@ -1380,6 +1409,7 @@ public ObjectMetadata getObjectMetadata(Path path) throws IOException { public ObjectMetadata getObjectMetadata(Path path, ChangeTracker changeTracker, Invoker changeInvoker, String operation) throws IOException { + checkNotClosed(); return once("getObjectMetadata", path.toString(), () -> // this always does a full HEAD to the object @@ -1609,16 +1639,19 @@ protected S3ListResult listObjects(S3ListRequest request) throws IOException { incrementReadOperations(); incrementStatistic(OBJECT_LIST_REQUESTS); validateListArguments(request); - return invoker.retryUntranslated( - request.toString(), - true, - () -> { - if (useListV1) { - return S3ListResult.v1(s3.listObjects(request.getV1())); - } else { - return S3ListResult.v2(s3.listObjectsV2(request.getV2())); - } - }); + try(DurationInfo ignored = + new DurationInfo(LOG, false, "LIST")) { + return invoker.retryUntranslated( + request.toString(), + true, + () -> { + if (useListV1) { + return S3ListResult.v1(s3.listObjects(request.getV1())); + } else { + return S3ListResult.v2(s3.listObjectsV2(request.getV2())); + } + }); + } } /** @@ -1646,20 +1679,23 @@ protected S3ListResult continueListObjects(S3ListRequest request, S3ListResult prevResult) throws IOException { incrementReadOperations(); validateListArguments(request); - return invoker.retryUntranslated( - request.toString(), - true, - () -> { - incrementStatistic(OBJECT_CONTINUE_LIST_REQUESTS); - if (useListV1) { - return S3ListResult.v1( - s3.listNextBatchOfObjects(prevResult.getV1())); - } else { - request.getV2().setContinuationToken(prevResult.getV2() - .getNextContinuationToken()); - return S3ListResult.v2(s3.listObjectsV2(request.getV2())); - } - }); + try(DurationInfo ignored = + new DurationInfo(LOG, false, "LIST (continued)")) { + return invoker.retryUntranslated( + request.toString(), + true, + () -> { + incrementStatistic(OBJECT_CONTINUE_LIST_REQUESTS); + if (useListV1) { + return S3ListResult.v1( + s3.listNextBatchOfObjects(prevResult.getV1())); + } else { + request.getV2().setContinuationToken(prevResult.getV2() + .getNextContinuationToken()); + return S3ListResult.v2(s3.listObjectsV2(request.getV2())); + } + }); + } } /** @@ -1697,6 +1733,7 @@ protected void deleteObject(String key) throws AmazonClientException, IOException { blockRootDelete(key); incrementWriteOperations(); + LOG.debug("DELETE {}", key); invoker.retryUntranslated("Delete "+ bucket + ":/" + key, DELETE_CONSIDERED_IDEMPOTENT, ()-> { @@ -1714,9 +1751,9 @@ protected void deleteObject(String key) * @param key key of entry * @param isFile is the path a file (used for instrumentation only) * @throws AmazonClientException problems working with S3 - * @throws IOException IO failure + * @throws IOException IO failure in the metastore */ - @Retries.RetryRaw + @Retries.RetryMixed void deleteObjectAtPath(Path f, String key, boolean isFile) throws AmazonClientException, IOException { if (isFile) { @@ -1755,7 +1792,9 @@ private void blockRootDelete(String key) throws InvalidRequestException { private void deleteObjects(DeleteObjectsRequest deleteRequest) throws MultiObjectDeleteException, AmazonClientException, IOException { incrementWriteOperations(); - try { + try(DurationInfo ignored = + new DurationInfo(LOG, false, "DELETE %d keys", + deleteRequest.getKeys().size())) { invoker.retryUntranslated("delete", DELETE_CONSIDERED_IDEMPOTENT, () -> { @@ -1892,7 +1931,7 @@ PutObjectResult putObjectDirect(PutObjectRequest putObjectRequest) incrementPutCompletedStatistics(true, len); // update metadata finishedWrite(putObjectRequest.getKey(), len, - result.getETag(), result.getVersionId()); + result.getETag(), result.getVersionId(), null); return result; } catch (AmazonClientException e) { incrementPutCompletedStatistics(false, len); @@ -1993,23 +2032,23 @@ public void incrementPutProgressStatistics(String key, long bytes) { } /** - * A helper method to delete a list of keys on a s3-backend. + * Delete a list of keys on a s3-backend. + * This does not update the metastore. * Retry policy: retry untranslated; delete considered idempotent. * @param keysToDelete collection of keys to delete on the s3-backend. * if empty, no request is made of the object store. - * @param clearKeys clears the keysToDelete-list after processing the list - * when set to true * @param deleteFakeDir indicates whether this is for deleting fake dirs * @throws InvalidRequestException if the request was rejected due to * a mistaken attempt to delete the root directory. * @throws MultiObjectDeleteException one or more of the keys could not * be deleted in a multiple object delete operation. - * @throws AmazonClientException amazon-layer failure. + * The number of rejected objects will be added to the metric + * {@link Statistic#FILES_DELETE_REJECTED}. + * @throws AmazonClientException other amazon-layer failure. */ - @VisibleForTesting @Retries.RetryRaw - void removeKeys(List keysToDelete, - boolean clearKeys, boolean deleteFakeDir) + private void removeKeysS3(List keysToDelete, + boolean deleteFakeDir) throws MultiObjectDeleteException, AmazonClientException, IOException { if (keysToDelete.isEmpty()) { @@ -2019,22 +2058,118 @@ void removeKeys(List keysToDelete, for (DeleteObjectsRequest.KeyVersion keyVersion : keysToDelete) { blockRootDelete(keyVersion.getKey()); } - if (enableMultiObjectsDelete) { - deleteObjects(new DeleteObjectsRequest(bucket) - .withKeys(keysToDelete) - .withQuiet(true)); - } else { - for (DeleteObjectsRequest.KeyVersion keyVersion : keysToDelete) { - deleteObject(keyVersion.getKey()); + try { + if (enableMultiObjectsDelete) { + deleteObjects(new DeleteObjectsRequest(bucket) + .withKeys(keysToDelete) + .withQuiet(true)); + } else { + for (DeleteObjectsRequest.KeyVersion keyVersion : keysToDelete) { + deleteObject(keyVersion.getKey()); + } } + } catch (MultiObjectDeleteException ex) { + // partial delete. + // Update the stats with the count of the actual number of successful + // deletions. + int rejected = ex.getErrors().size(); + noteDeleted(keysToDelete.size() - rejected, deleteFakeDir); + incrementStatistic(FILES_DELETE_REJECTED, rejected); + throw ex; } + noteDeleted(keysToDelete.size(), deleteFakeDir); + } + + /** + * Note the deletion of files or fake directories deleted. + * @param count count of keys deleted. + * @param deleteFakeDir are the deletions fake directories? + */ + private void noteDeleted(final int count, final boolean deleteFakeDir) { if (!deleteFakeDir) { - instrumentation.fileDeleted(keysToDelete.size()); + instrumentation.fileDeleted(count); } else { - instrumentation.fakeDirsDeleted(keysToDelete.size()); + instrumentation.fakeDirsDeleted(count); } - if (clearKeys) { - keysToDelete.clear(); + } + + /** + * Invoke {@link #removeKeysS3(List, boolean)} with handling of + * {@code MultiObjectDeleteException}. + * + * @param keysToDelete collection of keys to delete on the s3-backend. + * if empty, no request is made of the object store. + * @param deleteFakeDir indicates whether this is for deleting fake dirs + * @throws InvalidRequestException if the request was rejected due to + * a mistaken attempt to delete the root directory. + * @throws MultiObjectDeleteException one or more of the keys could not + * be deleted in a multiple object delete operation. + * @throws AmazonClientException amazon-layer failure. + * @throws IOException other IO Exception. + */ + @VisibleForTesting + @Retries.RetryMixed + void removeKeys( + final List keysToDelete, + final boolean deleteFakeDir) + throws MultiObjectDeleteException, AmazonClientException, + IOException { + removeKeys(keysToDelete, deleteFakeDir, new ArrayList<>()); + } + + /** + * Invoke {@link #removeKeysS3(List, boolean)} with handling of + * {@code MultiObjectDeleteException} before the exception is rethrown. + * Specifically: + *

      + *
    1. Failure and !deleteFakeDir: S3Guard is updated with all + * deleted entries
    2. + *
    3. Failure where deleteFakeDir == true: do nothing with S3Guard
    4. + *
    5. Success: do nothing with S3Guard
    6. + *
    + * @param keysToDelete collection of keys to delete on the s3-backend. + * if empty, no request is made of the object store. + * @param deleteFakeDir indicates whether this is for deleting fake dirs. + * @param undeletedObjectsOnFailure List which will be built up of all + * files that were not deleted. This happens even as an exception + * is raised. + * @throws InvalidRequestException if the request was rejected due to + * a mistaken attempt to delete the root directory. + * @throws MultiObjectDeleteException one or more of the keys could not + * be deleted in a multiple object delete operation. + * @throws AmazonClientException amazon-layer failure. + * @throws IOException other IO Exception. + */ + @VisibleForTesting + @Retries.RetryMixed + void removeKeys( + final List keysToDelete, + final boolean deleteFakeDir, + final List undeletedObjectsOnFailure) + throws MultiObjectDeleteException, AmazonClientException, + IOException { + undeletedObjectsOnFailure.clear(); + try(DurationInfo ignored = new DurationInfo(LOG, false, "Deleting")) { + removeKeysS3(keysToDelete, deleteFakeDir); + } catch (MultiObjectDeleteException ex) { + LOG.debug("Partial delete failure"); + // what to do if an IOE was raised? Given an exception was being + // raised anyway, and the failures are logged, do nothing. + if (!deleteFakeDir) { + // when deleting fake directories we don't want to delete metastore + // entries so we only process these failures on "real" deletes. + Triple, List, List>> results = + new MultiObjectDeleteSupport(createStoreContext()) + .processDeleteFailure(ex, keysToDelete); + undeletedObjectsOnFailure.addAll(results.getMiddle()); + } + throw ex; + } catch (AmazonClientException | IOException ex) { + List paths = new MultiObjectDeleteSupport(createStoreContext()) + .processDeleteFailureGenericException(ex, keysToDelete); + // other failures. Assume nothing was deleted + undeletedObjectsOnFailure.addAll(paths); + throw ex; } } @@ -2067,7 +2202,7 @@ public boolean delete(Path f, boolean recursive) throws IOException { } return outcome; } catch (FileNotFoundException e) { - LOG.debug("Couldn't delete {} - does not exist", f); + LOG.debug("Couldn't delete {} - does not exist: {}", f, e.toString()); instrumentation.errorIgnored(); return false; } catch (AmazonClientException e) { @@ -2131,22 +2266,25 @@ private boolean innerDelete(S3AFileStatus status, boolean recursive) LOG.debug("Got object to delete {}", summary.getKey()); if (keys.size() == MAX_ENTRIES_TO_DELETE) { - removeKeys(keys, true, false); + // delete a single page of keys + removeKeys(keys, false); + keys.clear(); } } if (objects.isTruncated()) { objects = continueListObjects(request, objects); } else { - if (!keys.isEmpty()) { - // TODO: HADOOP-13761 S3Guard: retries - removeKeys(keys, false, false); - } + // there is no more data: delete the final set of entries. + removeKeys(keys, false); break; } } } - metadataStore.deleteSubtree(f, ttlTimeProvider); + try(DurationInfo ignored = + new DurationInfo(LOG, false, "Delete metastore")) { + metadataStore.deleteSubtree(f, ttlTimeProvider); + } } else { LOG.debug("delete: Path is a file"); deleteObjectAtPath(f, key, true); @@ -2276,6 +2414,7 @@ public FileStatus[] innerListStatus(Path f) throws FileNotFoundException, while (files.hasNext()) { result.add(files.next()); } + // merge the results. This will update the store as needed return S3Guard.dirListingUnion(metadataStore, path, result, dirMeta, allowAuthoritative, ttlTimeProvider); } else { @@ -2464,6 +2603,7 @@ public FileStatus getFileStatus(final Path f) throws IOException { S3AFileStatus innerGetFileStatus(final Path f, boolean needEmptyDirectoryFlag) throws IOException { entryPoint(INVOCATION_GET_FILE_STATUS); + checkNotClosed(); final Path path = qualify(f); String key = pathToKey(path); LOG.debug("Getting path status for {} ({})", path, key); @@ -2476,8 +2616,11 @@ S3AFileStatus innerGetFileStatus(final Path f, Set tombstones = Collections.emptySet(); if (pm != null) { if (pm.isDeleted()) { + OffsetDateTime deletedAt = OffsetDateTime.ofInstant( + Instant.ofEpochMilli(pm.getFileStatus().getModificationTime()), + ZoneOffset.UTC); throw new FileNotFoundException("Path " + f + " is recorded as " + - "deleted by S3Guard"); + "deleted by S3Guard at " + deletedAt); } // if ms is not authoritative, check S3 if there's any recent @@ -2504,8 +2647,9 @@ S3AFileStatus innerGetFileStatus(final Path f, final long s3ModTime = s3AFileStatus.getModificationTime(); if(s3ModTime > msModTime) { - LOG.debug("S3Guard metadata for {} is outdated, updating it", - path); + LOG.debug("S3Guard metadata for {} is outdated;" + + " s3modtime={}; msModTime={} updating metastore", + path, s3ModTime, msModTime); return S3Guard.putAndReturn(metadataStore, s3AFileStatus, instrumentation, ttlTimeProvider); } @@ -2835,7 +2979,7 @@ UploadResult executePut(PutObjectRequest putObjectRequest, listener.uploadCompleted(); // post-write actions finishedWrite(key, info.getLength(), - result.getETag(), result.getVersionId()); + result.getETag(), result.getVersionId(), null); return result; } @@ -3168,7 +3312,7 @@ private Optional generateSSECustomerKey() { /** * Perform post-write actions. * Calls {@link #deleteUnnecessaryFakeDirectories(Path)} and then - * {@link S3Guard#addAncestors(MetadataStore, Path, String)}}. + * updates any metastore. * This operation MUST be called after any PUT/multipart PUT completes * successfully. * @@ -3182,6 +3326,7 @@ private Optional generateSSECustomerKey() { * @param length total length of file written * @param eTag eTag of the written object * @param versionId S3 object versionId of the written object + * @param operationState state of any ongoing bulk operation. * @throws MetadataPersistenceException if metadata about the write could * not be saved to the metadata store and * fs.s3a.metadatastore.fail.on.write.error=true @@ -3189,22 +3334,39 @@ private Optional generateSSECustomerKey() { @InterfaceAudience.Private @Retries.RetryTranslated("Except if failOnMetadataWriteError=false, in which" + " case RetryExceptionsSwallowed") - void finishedWrite(String key, long length, String eTag, String versionId) + void finishedWrite(String key, long length, String eTag, String versionId, + @Nullable final BulkOperationState operationState) throws MetadataPersistenceException { - LOG.debug("Finished write to {}, len {}", key, length); + LOG.debug("Finished write to {}, len {}. etag {}, version {}", + key, length, eTag, versionId); Path p = keyToQualifiedPath(key); Preconditions.checkArgument(length >= 0, "content length is negative"); deleteUnnecessaryFakeDirectories(p.getParent()); + // this is only set if there is a metastore to update and the + // operationState parameter passed in was null. + BulkOperationState stateToClose = null; // See note about failure semantics in S3Guard documentation try { if (hasMetadataStore()) { - S3Guard.addAncestors(metadataStore, p, username, ttlTimeProvider); + BulkOperationState activeState = operationState; + if (activeState == null) { + // create an operation state if there was none, so that the + // information gleaned from addAncestors is preserved into the + // subsequent put. + stateToClose = S3Guard.initiateBulkWrite(metadataStore, + BulkOperationState.OperationType.Put, + keyToPath(key)); + activeState = stateToClose; + } + S3Guard.addAncestors(metadataStore, p, ttlTimeProvider, activeState); S3AFileStatus status = createUploadFileStatus(p, S3AUtils.objectRepresentsDirectory(key, length), length, getDefaultBlockSize(p), username, eTag, versionId); - S3Guard.putAndReturn(metadataStore, status, instrumentation, - ttlTimeProvider); + S3Guard.putAndReturn(metadataStore, status, + instrumentation, + ttlTimeProvider, + activeState); } } catch (IOException e) { if (failOnMetadataWriteError) { @@ -3214,6 +3376,9 @@ void finishedWrite(String key, long length, String eTag, String versionId) p, e); } instrumentation.errorIgnored(); + } finally { + // if a new operation state was created, close it. + IOUtils.cleanupWithLogger(LOG, stateToClose); } } @@ -3233,7 +3398,7 @@ private void deleteUnnecessaryFakeDirectories(Path path) { path = path.getParent(); } try { - removeKeys(keysToRemove, false, true); + removeKeys(keysToRemove, true); } catch(AmazonClientException | IOException e) { instrumentation.errorIgnored(); if (LOG.isDebugEnabled()) { @@ -3594,11 +3759,17 @@ public LocatedFileStatus next() throws IOException { }; } - @Retries.OnceTranslated + /** + * Recursive List of files and empty directories. + * @param f path to list from + * @return an iterator. + * @throws IOException failure + */ + @Retries.RetryTranslated public RemoteIterator listFilesAndEmptyDirectories( Path f, boolean recursive) throws IOException { - return innerListFiles(f, recursive, - new Listing.AcceptAllButS3nDirs()); + return invoker.retry("list", f.toString(), true, + () -> innerListFiles(f, recursive, new Listing.AcceptAllButS3nDirs())); } @Retries.OnceTranslated @@ -3904,8 +4075,7 @@ private FSDataInputStream select(final Path source, // readahead range can be dynamically set long ra = options.getLong(READAHEAD_RANGE, readAhead); - S3ObjectAttributes objectAttributes = createObjectAttributes( - path, fileStatus.getETag(), fileStatus.getVersionId()); + S3ObjectAttributes objectAttributes = createObjectAttributes(fileStatus); S3AReadOpContext readContext = createReadContext(fileStatus, inputPolicy, changeDetectionPolicy, ra); @@ -3998,4 +4168,59 @@ public CompletableFuture openFileWithOptions( return result; } + /** + * Build an immutable store context. + * If called while the FS is being initialized, + * some of the context will be incomplete. + * new store context instances should be created as appropriate. + * @return the store context of this FS. + */ + @InterfaceAudience.Private + public StoreContext createStoreContext() { + return new StoreContext( + getUri(), + getBucket(), + getConf(), + getUsername(), + owner, + boundedThreadPool, + executorCapacity, + invoker, + getInstrumentation(), + getStorageStatistics(), + getInputPolicy(), + changeDetectionPolicy, + enableMultiObjectsDelete, + metadataStore, + useListV1, + new ContextAccessorsImpl(), + getTtlTimeProvider()); + } + + /** + * The implementation of context accessors. + */ + private class ContextAccessorsImpl implements ContextAccessors { + + @Override + public Path keyToPath(final String key) { + return keyToQualifiedPath(key); + } + + @Override + public String pathToKey(final Path path) { + return S3AFileSystem.this.pathToKey(path); + } + + @Override + public File createTempFile(final String prefix, final long size) + throws IOException { + return createTmpFileForWrite(prefix, size, getConf()); + } + + @Override + public String getBucketLocation() throws IOException { + return S3AFileSystem.this.getBucketLocation(); + } + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java index 60221263cf3a0..c92a85ea57e9e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java @@ -128,23 +128,22 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead, * This does not attempt to open it; that is only done on the first * actual read() operation. * @param ctx operation context - * @param s3Attributes object attributes from a HEAD request - * @param contentLength length of content + * @param s3Attributes object attributes * @param client S3 client to use */ public S3AInputStream(S3AReadOpContext ctx, S3ObjectAttributes s3Attributes, - long contentLength, AmazonS3 client) { Preconditions.checkArgument(isNotEmpty(s3Attributes.getBucket()), "No Bucket"); Preconditions.checkArgument(isNotEmpty(s3Attributes.getKey()), "No Key"); - Preconditions.checkArgument(contentLength >= 0, "Negative content length"); + long l = s3Attributes.getLen(); + Preconditions.checkArgument(l >= 0, "Negative content length"); this.context = ctx; this.bucket = s3Attributes.getBucket(); this.key = s3Attributes.getKey(); this.pathStr = ctx.dstFileStatus.getPath().toString(); - this.contentLength = contentLength; + this.contentLength = l; this.client = client; this.uri = "s3a://" + this.bucket + "/" + this.key; this.streamStatistics = ctx.instrumentation.newInputStreamStatistics(); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java index fd7893f1bc405..e9ed972c6a16e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java @@ -183,10 +183,14 @@ public class S3AInstrumentation implements Closeable, MetricsSource { COMMITTER_MAGIC_FILES_CREATED, S3GUARD_METADATASTORE_PUT_PATH_REQUEST, S3GUARD_METADATASTORE_INITIALIZATION, + S3GUARD_METADATASTORE_RECORD_DELETES, + S3GUARD_METADATASTORE_RECORD_READS, + S3GUARD_METADATASTORE_RECORD_WRITES, S3GUARD_METADATASTORE_RETRY, S3GUARD_METADATASTORE_THROTTLED, STORE_IO_THROTTLED, - DELEGATION_TOKENS_ISSUED + DELEGATION_TOKENS_ISSUED, + FILES_DELETE_REJECTED }; private static final Statistic[] GAUGES_TO_CREATE = { @@ -1144,6 +1148,31 @@ public void throttled() { public void retrying() { // counters are incremented by owner. } + + /** + * Records have been read. + * @param count the number of records read + */ + public void recordsDeleted(int count) { + incrementCounter(S3GUARD_METADATASTORE_RECORD_DELETES, count); + } + + /** + * Records have been read. + * @param count the number of records read + */ + public void recordsRead(int count) { + incrementCounter(S3GUARD_METADATASTORE_RECORD_READS, count); + } + + /** + * records have been written (including deleted). + * @param count number of records written. + */ + public void recordsWritten(int count) { + incrementCounter(S3GUARD_METADATASTORE_RECORD_WRITES, count); + } + } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java index fe4fd0ffd6a70..7e30fa6011250 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java @@ -84,6 +84,7 @@ import static org.apache.commons.lang3.StringUtils.isEmpty; import static org.apache.hadoop.fs.s3a.Constants.*; +import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.translateDeleteException; /** * Utility methods for S3A code. @@ -284,7 +285,7 @@ public static IOException translateException(@Nullable String operation, case 200: if (exception instanceof MultiObjectDeleteException) { // failure during a bulk delete - return translateMultiObjectDeleteException(message, + return translateDeleteException(message, (MultiObjectDeleteException) exception); } // other 200: FALL THROUGH @@ -448,40 +449,6 @@ public static IOException translateDynamoDBException(final String path, return result; } - /** - * A MultiObjectDeleteException is raised if one or more delete objects - * listed in a bulk DELETE operation failed. - * The top-level exception is therefore just "something wasn't deleted", - * but doesn't include the what or the why. - * This translation will extract an AccessDeniedException if that's one of - * the causes, otherwise grabs the status code and uses it in the - * returned exception. - * @param message text for the exception - * @param ex exception to translate - * @return an IOE with more detail. - */ - public static IOException translateMultiObjectDeleteException(String message, - MultiObjectDeleteException ex) { - List keys; - StringBuffer result = new StringBuffer(ex.getErrors().size() * 100); - result.append(message).append(": "); - String exitCode = ""; - for (MultiObjectDeleteException.DeleteError error : ex.getErrors()) { - String code = error.getCode(); - result.append(String.format("%s: %s: %s%n", code, error.getKey(), - error.getMessage())); - if (exitCode.isEmpty() || "AccessDenied".equals(code)) { - exitCode = code; - } - } - if ("AccessDenied".equals(exitCode)) { - return (IOException) new AccessDeniedException(result.toString()) - .initCause(ex); - } else { - return new AWSS3IOException(result.toString(), ex); - } - } - /** * Get low level details of an amazon exception for logging; multi-line. * @param e exception @@ -1004,7 +971,7 @@ public static String stringify(S3ObjectSummary summary) { * @return the value * @throws IllegalArgumentException if the value is below the minimum */ - static int intOption(Configuration conf, String key, int defVal, int min) { + public static int intOption(Configuration conf, String key, int defVal, int min) { int v = conf.getInt(key, defVal); Preconditions.checkArgument(v >= min, String.format("Value of %s: %d is below the minimum value %d", @@ -1022,7 +989,7 @@ static int intOption(Configuration conf, String key, int defVal, int min) { * @return the value * @throws IllegalArgumentException if the value is below the minimum */ - static long longOption(Configuration conf, + public static long longOption(Configuration conf, String key, long defVal, long min) { @@ -1384,7 +1351,7 @@ public interface LocatedFileStatusMap { * @throws IOException anything in the closure, or iteration logic. */ public static long applyLocatedFiles( - RemoteIterator iterator, + RemoteIterator iterator, CallOnLocatedFileStatus eval) throws IOException { long count = 0; while (iterator.hasNext()) { @@ -1404,7 +1371,7 @@ public static long applyLocatedFiles( * @throws IOException anything in the closure, or iteration logic. */ public static List mapLocatedFiles( - RemoteIterator iterator, + RemoteIterator iterator, LocatedFileStatusMap eval) throws IOException { final List results = new ArrayList<>(); applyLocatedFiles(iterator, diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ObjectAttributes.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ObjectAttributes.java index 2e62ff6728206..5a8dfc7bc8c0e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ObjectAttributes.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ObjectAttributes.java @@ -18,38 +18,74 @@ package org.apache.hadoop.fs.s3a; +import com.amazonaws.services.s3.transfer.model.CopyResult; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.Path; /** - * This class is only a holder for bucket, key, SSE Algorithm and SSE key - * attributes. It is used in {@link S3AInputStream} and the select equivalent. + * This class holds attributed of an object independent of the + * file status type. + * It is used in {@link S3AInputStream} and the select equivalent. * as a way to reduce parameters being passed - * to the constructor of such class. + * to the constructor of such class, + * and elsewhere to be a source-neutral representation of a file status. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class S3ObjectAttributes { private final String bucket; + private final Path path; private final String key; private final S3AEncryptionMethods serverSideEncryptionAlgorithm; private final String serverSideEncryptionKey; private final String eTag; private final String versionId; + private final long len; public S3ObjectAttributes( String bucket, + Path path, String key, S3AEncryptionMethods serverSideEncryptionAlgorithm, String serverSideEncryptionKey, String eTag, - String versionId) { + String versionId, + long len) { this.bucket = bucket; + this.path = path; this.key = key; this.serverSideEncryptionAlgorithm = serverSideEncryptionAlgorithm; this.serverSideEncryptionKey = serverSideEncryptionKey; this.eTag = eTag; this.versionId = versionId; + this.len = len; + } + + /** + * Construct from the result of a copy and those parameters + * which aren't included in an AWS SDK response. + * @param path + * @param copyResult copy result. + * @param serverSideEncryptionAlgorithm current encryption algorithm + * @param serverSideEncryptionKey any server side encryption key? + * @param len + */ + public S3ObjectAttributes( + final Path path, + final CopyResult copyResult, + final S3AEncryptionMethods serverSideEncryptionAlgorithm, + final String serverSideEncryptionKey, + final long len) { + this.bucket = copyResult.getDestinationBucketName(); + this.key = copyResult.getDestinationKey(); + this.path = path; + this.serverSideEncryptionAlgorithm = serverSideEncryptionAlgorithm; + this.serverSideEncryptionKey = serverSideEncryptionKey; + this.eTag = copyResult.getETag(); + this.versionId = copyResult.getVersionId(); + this.len = len; } public String getBucket() { @@ -75,4 +111,12 @@ public String getETag() { public String getVersionId() { return versionId; } + + public long getLen() { + return len; + } + + public Path getPath() { + return path; + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java index 54a2c60254167..42322bc89656a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java @@ -42,6 +42,8 @@ public enum Statistic { "Total number of files created through the object store."), FILES_DELETED("files_deleted", "Total number of files deleted from the object store."), + FILES_DELETE_REJECTED("files_delete_rejected", + "Total number of files whose delete request was rejected"), FAKE_DIRECTORIES_CREATED("fake_directories_created", "Total number of fake directory entries created in the object store."), FAKE_DIRECTORIES_DELETED("fake_directories_deleted", @@ -207,6 +209,15 @@ public enum Statistic { "S3Guard metadata store put one metadata path latency"), S3GUARD_METADATASTORE_INITIALIZATION("s3guard_metadatastore_initialization", "S3Guard metadata store initialization times"), + S3GUARD_METADATASTORE_RECORD_DELETES( + "s3guard_metadatastore_record_deletes", + "S3Guard metadata store records deleted"), + S3GUARD_METADATASTORE_RECORD_READS( + "s3guard_metadatastore_record_reads", + "S3Guard metadata store records read"), + S3GUARD_METADATASTORE_RECORD_WRITES( + "s3guard_metadatastore_record_writes", + "S3Guard metadata store records written"), S3GUARD_METADATASTORE_RETRY("s3guard_metadatastore_retry", "S3Guard metadata store retry events"), S3GUARD_METADATASTORE_THROTTLED("s3guard_metadatastore_throttled", diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java index 54386addd7970..8cdce7b71d928 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.s3a; +import javax.annotation.Nullable; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; @@ -41,13 +42,15 @@ import com.amazonaws.services.s3.model.UploadPartResult; import com.amazonaws.services.s3.transfer.model.UploadResult; import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; +import org.apache.hadoop.fs.s3a.s3guard.S3Guard; import org.apache.hadoop.fs.s3a.select.SelectBinding; import org.apache.hadoop.util.DurationInfo; @@ -226,7 +229,7 @@ public String initiateMultiPartUpload(String destKey) throws IOException { /** * Finalize a multipart PUT operation. * This completes the upload, and, if that works, calls - * {@link S3AFileSystem#finishedWrite(String, long, String, String)} + * {@link S3AFileSystem#finishedWrite(String, long, String, String, BulkOperationState)} * to update the filesystem. * Retry policy: retrying, translated. * @param destKey destination of the commit @@ -234,6 +237,7 @@ public String initiateMultiPartUpload(String destKey) throws IOException { * @param partETags list of partial uploads * @param length length of the upload * @param retrying retrying callback + * @param operationState (nullable) operational state for a bulk update * @return the result of the operation. * @throws IOException on problems. */ @@ -243,7 +247,8 @@ private CompleteMultipartUploadResult finalizeMultipartUpload( String uploadId, List partETags, long length, - Retried retrying) throws IOException { + Retried retrying, + @Nullable BulkOperationState operationState) throws IOException { if (partETags.isEmpty()) { throw new IOException( "No upload parts in multipart upload to " + destKey); @@ -263,7 +268,7 @@ private CompleteMultipartUploadResult finalizeMultipartUpload( } ); owner.finishedWrite(destKey, length, uploadResult.getETag(), - uploadResult.getVersionId()); + uploadResult.getVersionId(), operationState); return uploadResult; } @@ -298,7 +303,8 @@ public CompleteMultipartUploadResult completeMPUwithRetries( uploadId, partETags, length, - (text, e, r, i) -> errorCount.incrementAndGet()); + (text, e, r, i) -> errorCount.incrementAndGet(), + null); } /** @@ -489,6 +495,52 @@ public void revertCommit(String destKey) throws IOException { ); } + /** + * This completes a multipart upload to the destination key via + * {@code finalizeMultipartUpload()}. + * Retry policy: retrying, translated. + * Retries increment the {@code errorCount} counter. + * @param destKey destination + * @param uploadId multipart operation Id + * @param partETags list of partial uploads + * @param length length of the upload + * @param operationState operational state for a bulk update + * @return the result of the operation. + * @throws IOException if problems arose which could not be retried, or + * the retry count was exceeded + */ + @Retries.RetryTranslated + public CompleteMultipartUploadResult commitUpload( + String destKey, + String uploadId, + List partETags, + long length, + @Nullable BulkOperationState operationState) + throws IOException { + checkNotNull(uploadId); + checkNotNull(partETags); + LOG.debug("Completing multipart upload {} with {} parts", + uploadId, partETags.size()); + return finalizeMultipartUpload(destKey, + uploadId, + partETags, + length, + Invoker.NO_OP, + operationState); + } + + /** + * Initiate a commit operation through any metastore. + * @param path path under which the writes will all take place. + * @return an possibly null operation state from the metastore. + * @throws IOException failure to instantiate. + */ + public BulkOperationState initiateCommitOperation( + Path path) throws IOException { + return S3Guard.initiateBulkWrite(owner.getMetadataStore(), + BulkOperationState.OperationType.Put, path); + } + /** * Upload part of a multi-partition file. * @param request request diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java index ed608cb983186..a49ab52b1ffd2 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java @@ -442,14 +442,27 @@ protected void commitPendingUploads(JobContext context, } LOG.debug("{}: committing the output of {} task(s)", getRole(), pending.size()); - Tasks.foreach(pending) - .stopOnFailure() - .executeWith(buildThreadPool(context)) - .onFailure((commit, exception) -> - getCommitOperations().abortSingleCommit(commit)) - .abortWith(commit -> getCommitOperations().abortSingleCommit(commit)) - .revertWith(commit -> getCommitOperations().revertCommit(commit)) - .run(commit -> getCommitOperations().commitOrFail(commit)); + try(CommitOperations.CommitContext commitContext + = initiateCommitOperation()) { + Tasks.foreach(pending) + .stopOnFailure() + .executeWith(buildThreadPool(context)) + .onFailure((commit, exception) -> + commitContext.abortSingleCommit(commit)) + .abortWith(commitContext::abortSingleCommit) + .revertWith(commitContext::revertCommit) + .run(commitContext::commitOrFail); + } + } + + /** + * Start the final commit/abort commit operations. + * @return a commit context through which the operations can be invoked. + * @throws IOException failure. + */ + protected CommitOperations.CommitContext initiateCommitOperation() + throws IOException { + return getCommitOperations().initiateCommitOperation(getOutputPath()); } /** @@ -531,7 +544,9 @@ protected void abortPendingUploadsInCleanup( Path dest = getOutputPath(); try (DurationInfo d = new DurationInfo(LOG, "Aborting all pending commits under %s", - dest)) { + dest); + CommitOperations.CommitContext commitContext + = initiateCommitOperation()) { CommitOperations ops = getCommitOperations(); List pending; try { @@ -544,7 +559,8 @@ protected void abortPendingUploadsInCleanup( Tasks.foreach(pending) .executeWith(buildThreadPool(getJobContext())) .suppressExceptions(suppressExceptions) - .run(u -> ops.abortMultipartCommit(u.getKey(), u.getUploadId())); + .run(u -> commitContext.abortMultipartCommit( + u.getKey(), u.getUploadId())); } } @@ -752,11 +768,13 @@ protected void abortPendingUploads(JobContext context, LOG.info("{}: no pending commits to abort", getRole()); } else { try (DurationInfo d = new DurationInfo(LOG, - "Aborting %s uploads", pending.size())) { + "Aborting %s uploads", pending.size()); + CommitOperations.CommitContext commitContext + = initiateCommitOperation()) { Tasks.foreach(pending) .executeWith(buildThreadPool(context)) .suppressExceptions(suppressExceptions) - .run(commit -> getCommitOperations().abortSingleCommit(commit)); + .run(commitContext::abortSingleCommit); } } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java index 55ace17b8a21e..7bf263ef8849c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java @@ -18,13 +18,14 @@ package org.apache.hadoop.fs.s3a.commit; +import javax.annotation.Nullable; +import java.io.Closeable; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -49,6 +50,9 @@ import org.apache.hadoop.fs.s3a.commit.files.PendingSet; import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit; import org.apache.hadoop.fs.s3a.commit.files.SuccessData; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.DurationInfo; import static org.apache.hadoop.fs.s3a.S3AUtils.*; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; @@ -128,10 +132,13 @@ protected S3AInstrumentation.CommitterStatistics getStatistics() { /** * Commit the operation, throwing an exception on any failure. * @param commit commit to execute + * @param operationState S3Guard state of ongoing operation. * @throws IOException on a failure */ - public void commitOrFail(SinglePendingCommit commit) throws IOException { - commit(commit, commit.getFilename()).maybeRethrow(); + private void commitOrFail( + final SinglePendingCommit commit, + final BulkOperationState operationState) throws IOException { + commit(commit, commit.getFilename(), operationState).maybeRethrow(); } /** @@ -139,16 +146,20 @@ public void commitOrFail(SinglePendingCommit commit) throws IOException { * and converted to an outcome. * @param commit entry to commit * @param origin origin path/string for outcome text + * @param operationState S3Guard state of ongoing operation. * @return the outcome */ - public MaybeIOE commit(SinglePendingCommit commit, String origin) { + private MaybeIOE commit( + final SinglePendingCommit commit, + final String origin, + final BulkOperationState operationState) { LOG.debug("Committing single commit {}", commit); MaybeIOE outcome; String destKey = "unknown destination"; try { commit.validate(); destKey = commit.getDestinationKey(); - long l = innerCommit(commit); + long l = innerCommit(commit, operationState); LOG.debug("Successful commit of file length {}", l); outcome = MaybeIOE.NONE; statistics.commitCompleted(commit.getLength()); @@ -171,17 +182,20 @@ public MaybeIOE commit(SinglePendingCommit commit, String origin) { /** * Inner commit operation. * @param commit entry to commit + * @param operationState S3Guard state of ongoing operation. * @return bytes committed. * @throws IOException failure */ - private long innerCommit(SinglePendingCommit commit) throws IOException { + private long innerCommit( + final SinglePendingCommit commit, + final BulkOperationState operationState) throws IOException { // finalize the commit - writeOperations.completeMPUwithRetries( + writeOperations.commitUpload( commit.getDestinationKey(), commit.getUploadId(), toPartEtags(commit.getEtags()), commit.getLength(), - new AtomicInteger(0)); + operationState); return commit.getLength(); } @@ -249,7 +263,7 @@ public IOException makeIOE(String key, Exception ex) { * @throws FileNotFoundException if the abort ID is unknown * @throws IOException on any failure */ - public void abortSingleCommit(SinglePendingCommit commit) + private void abortSingleCommit(SinglePendingCommit commit) throws IOException { String destKey = commit.getDestinationKey(); String origin = commit.getFilename() != null @@ -268,7 +282,7 @@ public void abortSingleCommit(SinglePendingCommit commit) * @throws FileNotFoundException if the abort ID is unknown * @throws IOException on any failure */ - public void abortMultipartCommit(String destKey, String uploadId) + private void abortMultipartCommit(String destKey, String uploadId) throws IOException { try { writeOperations.abortMultipartCommit(destKey, uploadId); @@ -392,7 +406,10 @@ public void createSuccessMarker(Path outputPath, Path markerPath = new Path(outputPath, _SUCCESS); LOG.debug("Touching success marker for job {}: {}", markerPath, successData); - successData.save(fs, markerPath, true); + try (DurationInfo ignored = new DurationInfo(LOG, + "Writing success file %s", markerPath)) { + successData.save(fs, markerPath, true); + } } /** @@ -401,7 +418,7 @@ public void createSuccessMarker(Path outputPath, * @throws IOException failure */ public void revertCommit(SinglePendingCommit commit) throws IOException { - LOG.warn("Revert {}", commit); + LOG.info("Revert {}", commit); try { writeOperations.revertCommit(commit.getDestinationKey()); } finally { @@ -520,6 +537,120 @@ public void jobCompleted(boolean success) { statistics.jobCompleted(success); } + /** + * Begin the final commit. + * @param path path for all work. + * @return the commit context to pass in. + * @throws IOException failure. + */ + public CommitContext initiateCommitOperation(Path path) throws IOException { + return new CommitContext(writeOperations.initiateCommitOperation(path)); + } + + /** + * Commit context. + * + * It is used to manage the final commit sequence where files become + * visible. It contains a {@link BulkOperationState} field, which, if + * there is a metastore, will be requested from the store so that it + * can track multiple creation operations within the same overall operation. + * This will be null if there is no metastore, or the store chooses not + * to provide one. + * + * This can only be created through {@link #initiateCommitOperation(Path)}. + * + * Once the commit operation has completed, it must be closed. + * It must not be reused. + */ + public final class CommitContext implements Closeable { + + /** + * State of any metastore. + */ + private final BulkOperationState operationState; + + /** + * Create. + * @param operationState any S3Guard bulk state. + */ + private CommitContext(@Nullable final BulkOperationState operationState) { + this.operationState = operationState; + } + + /** + * Commit the operation, throwing an exception on any failure. + * See {@link CommitOperations#commitOrFail(SinglePendingCommit, BulkOperationState)}. + * @param commit commit to execute + * @throws IOException on a failure + */ + public void commitOrFail(SinglePendingCommit commit) throws IOException { + CommitOperations.this.commitOrFail(commit, operationState); + } + + /** + * Commit a single pending commit; exceptions are caught + * and converted to an outcome. + * See {@link CommitOperations#commit(SinglePendingCommit, String, BulkOperationState)}. + * @param commit entry to commit + * @param origin origin path/string for outcome text + * @return the outcome + */ + public MaybeIOE commit(SinglePendingCommit commit, + String origin) { + return CommitOperations.this.commit(commit, origin, operationState); + } + + /** + * See {@link CommitOperations#abortSingleCommit(SinglePendingCommit)}. + * @param commit pending commit to abort + * @throws FileNotFoundException if the abort ID is unknown + * @throws IOException on any failure + */ + public void abortSingleCommit(final SinglePendingCommit commit) + throws IOException { + CommitOperations.this.abortSingleCommit(commit); + } + + /** + * See {@link CommitOperations#revertCommit(SinglePendingCommit)}. + * @param commit pending commit + * @throws IOException failure + */ + public void revertCommit(final SinglePendingCommit commit) + throws IOException { + CommitOperations.this.revertCommit(commit); + } + + /** + * See {@link CommitOperations#abortMultipartCommit(String, String)}.. + * @param destKey destination key + * @param uploadId upload to cancel + * @throws FileNotFoundException if the abort ID is unknown + * @throws IOException on any failure + */ + public void abortMultipartCommit( + final String destKey, + final String uploadId) + throws IOException { + CommitOperations.this.abortMultipartCommit(destKey, uploadId); + } + + @Override + public void close() throws IOException { + IOUtils.cleanupWithLogger(LOG, operationState); + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "CommitContext{"); + sb.append("operationState=").append(operationState); + sb.append('}'); + return sb.toString(); + } + + } + /** * A holder for a possible IOException; the call {@link #maybeRethrow()} * will throw any exception passed into the constructor, and be a no-op diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicS3GuardCommitter.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicS3GuardCommitter.java index 813b9a77460e3..969286e4d8eb0 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicS3GuardCommitter.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicS3GuardCommitter.java @@ -123,8 +123,11 @@ protected List listPendingUploadsToCommit( */ public void cleanupStagingDirs() { Path path = magicSubdir(getOutputPath()); - Invoker.ignoreIOExceptions(LOG, "cleanup magic directory", path.toString(), - () -> deleteWithWarning(getDestFS(), path, true)); + try(DurationInfo ignored = new DurationInfo(LOG, true, + "Deleting magic directory %s", path)) { + Invoker.ignoreIOExceptions(LOG, "cleanup magic directory", path.toString(), + () -> deleteWithWarning(getDestFS(), path, true)); + } } @Override diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java index f26384de49e83..518d789718a80 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.commit.AbstractS3ACommitter; import org.apache.hadoop.fs.s3a.commit.CommitConstants; +import org.apache.hadoop.fs.s3a.commit.CommitOperations; import org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants; import org.apache.hadoop.fs.s3a.commit.Tasks; import org.apache.hadoop.fs.s3a.commit.files.PendingSet; @@ -729,9 +730,14 @@ protected int commitTaskInternal(final TaskAttemptContext context, LOG.error( "{}: Exception during commit process, aborting {} commit(s)", getRole(), commits.size()); - Tasks.foreach(commits) - .suppressExceptions() - .run(commit -> getCommitOperations().abortSingleCommit(commit)); + try(CommitOperations.CommitContext commitContext + = initiateCommitOperation(); + DurationInfo ignored = new DurationInfo(LOG, + "Aborting %s uploads", commits.size())) { + Tasks.foreach(commits) + .suppressExceptions() + .run(commitContext::abortSingleCommit); + } deleteTaskAttemptPathQuietly(context); } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AbstractStoreOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AbstractStoreOperation.java new file mode 100644 index 0000000000000..904d5f750dbee --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AbstractStoreOperation.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Base class of operations in the store. + * An operation is something which executes against the context to + * perform a single function. + * It is expected to have a limited lifespan. + */ +public abstract class AbstractStoreOperation { + + private final StoreContext storeContext; + + /** + * constructor. + * @param storeContext store context. + */ + protected AbstractStoreOperation(final StoreContext storeContext) { + this.storeContext = checkNotNull(storeContext); + } + + /** + * Get the store context. + * @return the context. + */ + public final StoreContext getStoreContext() { + return storeContext; + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CallableSupplier.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CallableSupplier.java new file mode 100644 index 0000000000000..609eecee643c3 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CallableSupplier.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.Executor; +import java.util.function.Supplier; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.impl.WrappedIOException; +import org.apache.hadoop.util.DurationInfo; + +import static org.apache.hadoop.fs.impl.FutureIOSupport.raiseInnerCause; + +/** + * A bridge from Callable to Supplier; catching exceptions + * raised by the callable and wrapping them as appropriate. + * @param return type. + */ +public final class CallableSupplier implements Supplier { + + private static final Logger LOG = + LoggerFactory.getLogger(CallableSupplier.class); + + private final Callable call; + + /** + * Create. + * @param call call to invoke. + */ + public CallableSupplier(final Callable call) { + this.call = call; + } + + @Override + public Object get() { + try { + return call.call(); + } catch (RuntimeException e) { + throw e; + } catch (IOException e) { + throw new WrappedIOException(e); + } catch (Exception e) { + throw new WrappedIOException(new IOException(e)); + } + } + + /** + * Submit a callable into a completable future. + * RTEs are rethrown. + * Non RTEs are caught and wrapped; IOExceptions to + * {@link WrappedIOException} instances. + * @param executor executor. + * @param call call to invoke + * @param type + * @return the future to wait for + */ + @SuppressWarnings("unchecked") + public static CompletableFuture submit( + final Executor executor, + final Callable call) { + return CompletableFuture.supplyAsync( + new CallableSupplier(call), executor); + } + + /** + * Wait for a list of futures to complete. If the list is empty, + * return immediately. + * @param futures list of futures. + * @throws IOException if one of the called futures raised an IOE. + * @throws RuntimeException if one of the futures raised one. + */ + public static void waitForCompletion( + final List> futures) + throws IOException { + if (futures.isEmpty()) { + return; + } + // await completion + waitForCompletion(CompletableFuture.allOf( + futures.toArray(new CompletableFuture[0]))); + } + + /** + * Wait for a single of future to complete, extracting IOEs afterwards. + * @param future future to wait for. + * @throws IOException if one of the called futures raised an IOE. + * @throws RuntimeException if one of the futures raised one. + */ + public static void waitForCompletion( + final CompletableFuture future) + throws IOException { + try (DurationInfo ignore = + new DurationInfo(LOG, false, "Waiting for task completion")) { + future.join(); + } catch (CancellationException e) { + throw new IOException(e); + } catch (CompletionException e) { + raiseInnerCause(e); + } + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java new file mode 100644 index 0000000000000..1ca3a42686ab3 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.File; +import java.io.IOException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.Retries; + +/** + * An interface to implement for providing accessors to + * S3AFileSystem-level API calls. + *

    + * This is used to avoid giving any explicit reference to the owning + * FS in the store context; there are enough calls that using lambda-expressions + * gets over-complex. + *

      + *
    1. Test suites are free to provide their own implementation, using + * * the S3AFileSystem methods as the normative reference.
    2. + *
    3. All implementations MUST translate exceptions.
    4. + *
    + */ +public interface ContextAccessors { + + /** + * Convert a key to a fully qualified path. + * @param key input key + * @return the fully qualified path including URI scheme and bucket name. + */ + Path keyToPath(String key); + + /** + * Turns a path (relative or otherwise) into an S3 key. + * + * @param path input path, may be relative to the working dir + * @return a key excluding the leading "/", or, if it is the root path, "" + */ + String pathToKey(Path path); + + /** + * Create a temporary file. + * @param prefix prefix for the temporary file + * @param size the size of the file that is going to be written + * @return a unique temporary file + * @throws IOException IO problems + */ + File createTempFile(String prefix, long size) throws IOException; + + /** + * Get the region of a bucket. This may be via an S3 API call if not + * already cached. + * @return the region in which a bucket is located + * @throws IOException on any failure. + */ + @Retries.RetryTranslated + String getBucketLocation() throws IOException; +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java new file mode 100644 index 0000000000000..1b2a430284c02 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +/** + * Internal constants private only to the S3A codebase. + */ +public final class InternalConstants { + + private InternalConstants() { + } + + /** + * This is an arbitrary value: {@value}. + * It declares how many parallel copy operations + * in a single rename can be queued before the operation pauses + * and awaits completion. + * A very large value wouldn't just starve other threads from + * performing work, there's a risk that the S3 store itself would + * throttle operations (which all go to the same shard). + * It is not currently configurable just to avoid people choosing values + * which work on a microbenchmark (single rename, no other work, ...) + * but don't scale well to execution in a large process against a common + * store, all while separate processes are working with the same shard + * of storage. + * + * It should be a factor of {@link #MAX_ENTRIES_TO_DELETE} so that + * all copies will have finished before deletion is contemplated. + * (There's always a block for that, it just makes more sense to + * perform the bulk delete after another block of copies have completed). + */ + public static final int RENAME_PARALLEL_LIMIT = 10; + + /** + * The maximum number of entries that can be deleted in any bulk delete + * call to S3: {@value}. + */ + public static final int MAX_ENTRIES_TO_DELETE = 1000; + + /** + * Default blocksize as used in blocksize and FS status queries: {@value}. + */ + public static final int DEFAULT_BLOCKSIZE = 32 * 1024 * 1024; +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MultiObjectDeleteSupport.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MultiObjectDeleteSupport.java new file mode 100644 index 0000000000000..fce1780dd4137 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MultiObjectDeleteSupport.java @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.IOException; +import java.nio.file.AccessDeniedException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; + +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.MultiObjectDeleteException; +import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.AWSS3IOException; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; + +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Support for Multi Object Deletion. + */ +public final class MultiObjectDeleteSupport extends AbstractStoreOperation { + + private static final Logger LOG = LoggerFactory.getLogger( + MultiObjectDeleteSupport.class); + + /** + * Initiate with a store context. + * @param context store context. + */ + public MultiObjectDeleteSupport(final StoreContext context) { + super(context); + } + + /** + * This is the exception exit code if access was denied on a delete. + * {@value}. + */ + public static final String ACCESS_DENIED = "AccessDenied"; + + /** + * A {@code MultiObjectDeleteException} is raised if one or more + * paths listed in a bulk DELETE operation failed. + * The top-level exception is therefore just "something wasn't deleted", + * but doesn't include the what or the why. + * This translation will extract an AccessDeniedException if that's one of + * the causes, otherwise grabs the status code and uses it in the + * returned exception. + * @param message text for the exception + * @param deleteException the delete exception. to translate + * @return an IOE with more detail. + */ + public static IOException translateDeleteException( + final String message, + final MultiObjectDeleteException deleteException) { + final StringBuilder result = new StringBuilder( + deleteException.getErrors().size() * 256); + result.append(message).append(": "); + String exitCode = ""; + for (MultiObjectDeleteException.DeleteError error : + deleteException.getErrors()) { + String code = error.getCode(); + result.append(String.format("%s: %s: %s%n", code, error.getKey(), + error.getMessage())); + if (exitCode.isEmpty() || ACCESS_DENIED.equals(code)) { + exitCode = code; + } + } + if (ACCESS_DENIED.equals(exitCode)) { + return (IOException) new AccessDeniedException(result.toString()) + .initCause(deleteException); + } else { + return new AWSS3IOException(result.toString(), deleteException); + } + } + + /** + * Process a multi object delete exception by building two paths from + * the delete request: one of all deleted files, one of all undeleted values. + * The latter are those rejected in the delete call. + * @param deleteException the delete exception. + * @param keysToDelete the keys in the delete request + * @return tuple of (undeleted, deleted) paths. + */ + public Pair, List> splitUndeletedKeys( + final MultiObjectDeleteException deleteException, + final Collection keysToDelete) { + LOG.debug("Processing delete failure; keys to delete count = {};" + + " errors in exception {}; successful deletions = {}", + keysToDelete.size(), + deleteException.getErrors().size(), + deleteException.getDeletedObjects().size()); + // convert the collection of keys being deleted into paths + final List pathsBeingDeleted = keysToPaths(keysToDelete); + // Take this is list of paths + // extract all undeleted entries contained in the exception and + // then removes them from the original list. + List undeleted = removeUndeletedPaths(deleteException, + pathsBeingDeleted, + getStoreContext()::keyToPath); + return Pair.of(undeleted, pathsBeingDeleted); + } + + /** + * Given a list of delete requests, convert them all to paths. + * @param keysToDelete list of keys for the delete operation. + * @return the paths. + */ + public List keysToPaths( + final Collection keysToDelete) { + return convertToPaths(keysToDelete, + getStoreContext()::keyToPath); + } + + /** + * Given a list of delete requests, convert them all to paths. + * @param keysToDelete list of keys for the delete operation. + * @param qualifier path qualifier + * @return the paths. + */ + public static List convertToPaths( + final Collection keysToDelete, + final Function qualifier) { + return keysToDelete.stream() + .map((keyVersion) -> + qualifier.apply(keyVersion.getKey())) + .collect(Collectors.toList()); + } + + /** + * Process a delete failure by removing from the metastore all entries + * which where deleted, as inferred from the delete failures exception + * and the original list of files to delete declares to have been deleted. + * @param deleteException the delete exception. + * @param keysToDelete collection of keys which had been requested. + * @return a tuple of (undeleted, deleted, failures) + */ + public Triple, List, List>> + processDeleteFailure( + final MultiObjectDeleteException deleteException, + final List keysToDelete) { + final MetadataStore metadataStore = + checkNotNull(getStoreContext().getMetadataStore(), + "context metadatastore"); + final List> failures = new ArrayList<>(); + final Pair, List> outcome = + splitUndeletedKeys(deleteException, keysToDelete); + List deleted = outcome.getRight(); + List undeleted = outcome.getLeft(); + // delete the paths but recover + // TODO: handle the case where a parent path is deleted but not a child. + // TODO: in a fake object delete, we don't actually want to delete + // metastore entries + deleted.forEach(path -> { + try { + metadataStore.delete(path, getStoreContext().getTimeProvider()); + } catch (IOException e) { + // trouble: we failed to delete the far end entry + // try with the next one. + // if this is a big network failure, this is going to be noisy. + LOG.warn("Failed to update S3Guard store with deletion of {}", path); + failures.add(Pair.of(path, e)); + } + }); + if (LOG.isDebugEnabled()) { + undeleted.forEach(p -> LOG.debug("Deleted {}", p)); + } + return Triple.of(undeleted, deleted, failures); + } + + /** + * Build a list of undeleted paths from a {@code MultiObjectDeleteException}. + * Outside of unit tests, the qualifier function should be + * {@link S3AFileSystem#keyToQualifiedPath(String)}. + * @param deleteException the delete exception. + * @param qualifierFn function to qualify paths + * @return the possibly empty list of paths. + */ + @VisibleForTesting + public static List extractUndeletedPaths( + final MultiObjectDeleteException deleteException, + final Function qualifierFn) { + return deleteException.getErrors().stream() + .map((e) -> qualifierFn.apply(e.getKey())) + .collect(Collectors.toList()); + } + + /** + * Process a {@code MultiObjectDeleteException} by + * removing all undeleted paths from the list of paths being deleted. + * The original list is updated, and so becomes the list of successfully + * deleted paths. + * @param deleteException the delete exception. + * @param pathsBeingDeleted list of paths which were being deleted. + * This has all undeleted paths removed, leaving only those deleted. + * @return the list of undeleted entries + */ + @VisibleForTesting + static List removeUndeletedPaths( + final MultiObjectDeleteException deleteException, + final Collection pathsBeingDeleted, + final Function qualifier) { + List undeleted = extractUndeletedPaths(deleteException, qualifier); + pathsBeingDeleted.removeAll(undeleted); + return undeleted; + } + + /** + * A delete operation failed. + * Currently just returns the list of all paths. + * @param ex exception. + * @param keysToDelete the keys which were being deleted. + * @return all paths which were not deleted. + */ + public List processDeleteFailureGenericException(Exception ex, + final List keysToDelete) { + return keysToPaths(keysToDelete); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java new file mode 100644 index 0000000000000..04784fb6e65e8 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java @@ -0,0 +1,634 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.MultiObjectDeleteException; +import com.amazonaws.services.s3.transfer.model.CopyResult; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.InvalidRequestException; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.s3a.RenameFailedException; +import org.apache.hadoop.fs.s3a.Retries; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.S3ALocatedFileStatus; +import org.apache.hadoop.fs.s3a.S3AReadOpContext; +import org.apache.hadoop.fs.s3a.S3ObjectAttributes; +import org.apache.hadoop.fs.s3a.Tristate; +import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; +import org.apache.hadoop.fs.s3a.s3guard.RenameTracker; +import org.apache.hadoop.util.DurationInfo; + +import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_BLOCK_SIZE; +import static org.apache.hadoop.fs.s3a.S3AUtils.objectRepresentsDirectory; +import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit; +import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletion; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DEFAULT_BLOCKSIZE; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.MAX_ENTRIES_TO_DELETE; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.RENAME_PARALLEL_LIMIT; + +/** + * A parallelized rename operation which updates the metastore in the + * process, through whichever {@link RenameTracker} the store provides. + * The parallel execution is in groups of size + * {@link InternalConstants#RENAME_PARALLEL_LIMIT}; it is only + * after one group completes that the next group is initiated. + * Once enough files have been copied that they meet the + * {@link InternalConstants#MAX_ENTRIES_TO_DELETE} threshold, a delete + * is initiated. + * If it succeeds, the rename continues with the next group of files. + * + * The RenameTracker has the task of keeping the metastore up to date + * as the rename proceeds. + * + * The rename operation implements the classic HDFS rename policy of + * rename(file, dir) renames the file under the directory. + * + * There is no validation of input and output paths. + * Callers are required to themselves verify that destination is not under + * the source, above the source, the source itself, etc, etc. + */ +public class RenameOperation extends AbstractStoreOperation { + + private static final Logger LOG = LoggerFactory.getLogger( + RenameOperation.class); + + /** + * Used to stop any re-entrancy of the rename. + * This is an execute-once operation. + */ + private final AtomicBoolean executed = new AtomicBoolean(false); + + private final Path sourcePath; + + private final String sourceKey; + + private final S3AFileStatus sourceStatus; + + private final Path destPath; + + private final String destKey; + + private final S3AFileStatus destStatus; + + /** + * Callbacks into the filesystem. + */ + private final RenameOperationCallbacks callbacks; + + /** + * Counter of bytes copied. + */ + private final AtomicLong bytesCopied = new AtomicLong(); + + /** + * Rename tracker. + */ + private RenameTracker renameTracker; + + /** + * List of active copies. + */ + private final List> activeCopies = + new ArrayList<>(RENAME_PARALLEL_LIMIT); + + /** + * list of keys to delete on the next (bulk) delete call. + */ + private final List keysToDelete = + new ArrayList<>(); + + /** + * List of paths to delete, which will be passed to the rename + * tracker after the deletion succeeds. + */ + private final List pathsToDelete = new ArrayList<>(); + + private final long blocksize; + + /** + * Initiate the rename. + * + * @param storeContext store context + * @param sourcePath source path + * @param sourceKey key of source + * @param sourceStatus pre-fetched source status + * @param destPath destination path. + * @param destKey destination key + * @param destStatus destination status. + * @param callbacks callback provider + */ + public RenameOperation( + final StoreContext storeContext, + final Path sourcePath, + final String sourceKey, + final S3AFileStatus sourceStatus, + final Path destPath, + final String destKey, + final S3AFileStatus destStatus, + final RenameOperationCallbacks callbacks) { + super(storeContext); + this.sourcePath = sourcePath; + this.sourceKey = sourceKey; + this.sourceStatus = sourceStatus; + this.destPath = destPath; + this.destKey = destKey; + this.destStatus = destStatus; + this.callbacks = callbacks; + blocksize = storeContext.getConfiguration() + .getLongBytes(FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE); + } + + /** + * Wait for the active copies to complete then reset the list. + * @param reason for messages + */ + private void completeActiveCopies(String reason) throws IOException { + LOG.debug("Waiting for {} active copies to complete: {}", + activeCopies.size(), reason); + waitForCompletion(activeCopies); + activeCopies.clear(); + } + + /** + * Queue and object for deletion. + * @param path path to the object + * @param key key of the object. + */ + private void queueToDelete(Path path, String key) { + pathsToDelete.add(path); + keysToDelete.add(new DeleteObjectsRequest.KeyVersion(key)); + } + + /** + * Block waiting for ay active copies to finish + * then delete all queued keys + paths to delete. + * @param reason reason for logs + * @throws IOException failure. + */ + private void completeActiveCopiesAndDeleteSources(String reason) + throws IOException { + completeActiveCopies(reason); + removeSourceObjects( + keysToDelete, + pathsToDelete); + // now reset the lists. + keysToDelete.clear(); + pathsToDelete.clear(); + } + + @Retries.RetryMixed + public long executeRename() throws IOException { + Preconditions.checkState( + !executed.getAndSet(true), + "Rename attempted twice"); + final StoreContext storeContext = getStoreContext(); + final MetadataStore metadataStore = checkNotNull( + storeContext.getMetadataStore(), + "No metadata store in context"); + + // Validation completed: time to begin the operation. + // The store-specific rename tracker is used to keep the store + // to date with the in-progress operation. + // for the null store, these are all no-ops. + renameTracker = metadataStore.initiateRenameOperation( + storeContext, + sourcePath, sourceStatus, destPath); + + + // Ok! Time to start + try { + if (sourceStatus.isFile()) { + renameFileToDest(); + } else { + recursiveDirectoryRename(); + } + } catch (AmazonClientException | IOException ex) { + // rename failed. + // block for all ongoing copies to complete, successfully or not + try { + completeActiveCopies("failure handling"); + } catch (IOException e) { + // a failure to update the metastore after a rename failure is what + // we'd see on a network problem, expired credentials and other + // unrecoverable errors. + // Downgrading to warn because an exception is already + // about to be thrown. + LOG.warn("While completing all active copies", e); + } + // notify the rename tracker of the failure + throw renameTracker.renameFailed(ex); + } + + // At this point the rename has completed successfully in the S3 store. + // Tell the metastore this fact and let it complete its changes + renameTracker.completeRename(); + + callbacks.finishRename(sourcePath, destPath); + return bytesCopied.get(); + } + + /** + * The source is a file: rename it to the destination. + * @throws IOException failure + */ + protected void renameFileToDest() throws IOException { + final StoreContext storeContext = getStoreContext(); + // the source is a file. + Path copyDestinationPath = destPath; + String copyDestinationKey = destKey; + S3ObjectAttributes sourceAttributes = + callbacks.createObjectAttributes(sourceStatus); + S3AReadOpContext readContext = callbacks.createReadContext(sourceStatus); + if (destStatus != null && destStatus.isDirectory()) { + // destination is a directory: build the final destination underneath + String newDestKey = maybeAddTrailingSlash(destKey); + String filename = sourceKey.substring( + storeContext.pathToKey(sourcePath.getParent()).length() + 1); + newDestKey = newDestKey + filename; + copyDestinationKey = newDestKey; + copyDestinationPath = storeContext.keyToPath(newDestKey); + } + // destination either does not exist or is a file to overwrite. + LOG.debug("rename: renaming file {} to {}", sourcePath, + copyDestinationPath); + copySourceAndUpdateTracker( + sourcePath, + sourceKey, + sourceAttributes, + readContext, + copyDestinationPath, + copyDestinationKey, + false); + bytesCopied.addAndGet(sourceStatus.getLen()); + // delete the source + callbacks.deleteObjectAtPath(sourcePath, sourceKey, true); + // and update the tracker + renameTracker.sourceObjectsDeleted(Lists.newArrayList(sourcePath)); + } + + /** + * Execute a full recursive rename. + * The source is a file: rename it to the destination. + * @throws IOException failure + */ + protected void recursiveDirectoryRename() throws IOException { + final StoreContext storeContext = getStoreContext(); + + LOG.debug("rename: renaming directory {} to {}", sourcePath, destPath); + + // This is a directory-to-directory copy + String dstKey = maybeAddTrailingSlash(destKey); + String srcKey = maybeAddTrailingSlash(sourceKey); + + // Verify dest is not a child of the source directory + if (dstKey.startsWith(srcKey)) { + throw new RenameFailedException(srcKey, dstKey, + "cannot rename a directory to a subdirectory of itself "); + } + + if (destStatus != null + && destStatus.isEmptyDirectory() == Tristate.TRUE) { + // delete unnecessary fake directory at the destination. + // this MUST be done before anything else so that + // rollback code doesn't get confused and insert a tombstone + // marker. + LOG.debug("Deleting fake directory marker at destination {}", + destStatus.getPath()); + callbacks.deleteObjectAtPath(destStatus.getPath(), dstKey, false); + } + + Path parentPath = storeContext.keyToPath(srcKey); + final RemoteIterator iterator = + callbacks.listFilesAndEmptyDirectories(parentPath); + while (iterator.hasNext()) { + // get the next entry in the listing. + S3ALocatedFileStatus child = iterator.next(); + // convert it to an S3 key. + String k = storeContext.pathToKey(child.getPath()); + // possibly adding a "/" if it represents directory and it does + // not have a trailing slash already. + String key = (child.isDirectory() && !k.endsWith("/")) + ? k + "/" + : k; + // the source object to copy as a path. + Path childSourcePath = storeContext.keyToPath(key); + + // mark for deletion on a successful copy. + queueToDelete(childSourcePath, key); + + // the destination key is that of the key under the source tree, + // remapped under the new destination path. + String newDestKey = + dstKey + key.substring(srcKey.length()); + Path childDestPath = storeContext.keyToPath(newDestKey); + + // now begin the single copy + CompletableFuture copy = initiateCopy(child, key, + childSourcePath, newDestKey, childDestPath); + activeCopies.add(copy); + bytesCopied.addAndGet(sourceStatus.getLen()); + + if (activeCopies.size() == RENAME_PARALLEL_LIMIT) { + // the limit of active copies has been reached; + // wait for completion or errors to surface. + LOG.debug("Waiting for active copies to complete"); + completeActiveCopies("batch threshold reached"); + } + if (keysToDelete.size() == MAX_ENTRIES_TO_DELETE) { + // finish ongoing copies then delete all queued keys. + // provided the parallel limit is a factor of the max entry + // constant, this will not need to block for the copy, and + // simply jump straight to the delete. + completeActiveCopiesAndDeleteSources("paged delete"); + } + } // end of iteration through the list + + // await the final set of copies and their deletion + // This will notify the renameTracker that these objects + // have been deleted. + completeActiveCopiesAndDeleteSources("final copy and delete"); + + // We moved all the children, now move the top-level dir + // Empty directory should have been added as the object summary + renameTracker.moveSourceDirectory(); + } + + /** + * Initiate a copy operation in the executor. + * @param source status of the source object. + * @param key source key + * @param childSourcePath source as a path. + * @param newDestKey destination key + * @param childDestPath destination path. + * @return the future. + */ + protected CompletableFuture initiateCopy( + final S3ALocatedFileStatus source, + final String key, + final Path childSourcePath, + final String newDestKey, + final Path childDestPath) { + S3ObjectAttributes sourceAttributes = + callbacks.createObjectAttributes( + source.getPath(), + source.getETag(), + source.getVersionId(), + source.getLen()); + // queue the copy operation for execution in the thread pool + return submit(getStoreContext().getExecutor(), () -> + copySourceAndUpdateTracker( + childSourcePath, + key, + sourceAttributes, + callbacks.createReadContext(source), + childDestPath, + newDestKey, + true)); + } + + /** + * This invoked to copy a file or directory marker then update the + * rename operation on success. + * It may be called in its own thread. + * @param sourceFile source path of the copy; may have a trailing / on it. + * @param srcKey source key + * @param srcAttributes status of the source object + * @param destination destination as a qualified path. + * @param destinationKey destination key + * @param addAncestors should ancestors be added to the metastore? + * @return the destination path. + * @throws IOException failure + */ + @Retries.RetryTranslated + private Path copySourceAndUpdateTracker( + final Path sourceFile, + final String srcKey, + final S3ObjectAttributes srcAttributes, + final S3AReadOpContext readContext, + final Path destination, + final String destinationKey, + final boolean addAncestors) throws IOException { + long len = srcAttributes.getLen(); + CopyResult copyResult; + try (DurationInfo ignored = new DurationInfo(LOG, false, + "Copy file from %s to %s (length=%d)", srcKey, destinationKey, len)) { + copyResult = callbacks.copyFile(srcKey, destinationKey, + srcAttributes, readContext); + } + if (objectRepresentsDirectory(srcKey, len)) { + renameTracker.directoryMarkerCopied( + sourceFile, + destination, + addAncestors); + } else { + S3ObjectAttributes destAttributes = new S3ObjectAttributes( + destination, + copyResult, + srcAttributes.getServerSideEncryptionAlgorithm(), + srcAttributes.getServerSideEncryptionKey(), + len); + renameTracker.fileCopied( + sourceFile, + srcAttributes, + destAttributes, + destination, + blocksize, + addAncestors); + } + return destination; + } + + /** + * Remove source objects and update the metastore by way of + * the rename tracker. + * @param keys list of keys to delete + * @param paths list of paths matching the keys to delete 1:1. + * @throws IOException failure + */ + @Retries.RetryMixed + private void removeSourceObjects( + final List keys, + final List paths) + throws IOException { + List undeletedObjects = new ArrayList<>(); + try { + // remove the keys + // this will update the metastore on a failure, but on + // a successful operation leaves the store as is. + callbacks.removeKeys(keys, false, undeletedObjects); + // and clear the list. + } catch (AmazonClientException | IOException e) { + // Failed. + // Notify the rename operation. + // removeKeys will have already purged the metastore of + // all keys it has known to delete; this is just a final + // bit of housekeeping and a chance to tune exception + // reporting + throw renameTracker.deleteFailed(e, paths, undeletedObjects); + } + renameTracker.sourceObjectsDeleted(paths); + } + + /** + * Turns a path (relative or otherwise) into an S3 key, adding a trailing + * "/" if the path is not the root and does not already have a "/" + * at the end. + * + * @param key s3 key or "" + * @return the with a trailing "/", or, if it is the root key, "", + */ + private String maybeAddTrailingSlash(String key) { + if (!key.isEmpty() && !key.endsWith("/")) { + return key + '/'; + } else { + return key; + } + } + + /** + * These are all the callbacks which the rename operation needs, + * derived from the appropriate S3AFileSystem methods. + */ + public interface RenameOperationCallbacks { + + /** + * Create the attributes of an object for subsequent use. + * @param path path path of the request. + * @param eTag the eTag of the S3 object + * @param versionId S3 object version ID + * @param len length of the file + * @return attributes to use when building the query. + */ + S3ObjectAttributes createObjectAttributes( + Path path, + String eTag, + String versionId, + long len); + + /** + * Create the attributes of an object for subsequent use. + * @param fileStatus file status to build from. + * @return attributes to use when building the query. + */ + S3ObjectAttributes createObjectAttributes( + S3AFileStatus fileStatus); + + /** + * Create the read context for reading from the referenced file, + * using FS state as well as the status. + * @param fileStatus file status. + * @return a context for read and select operations. + */ + S3AReadOpContext createReadContext( + FileStatus fileStatus); + + /** + * The rename has finished; perform any store cleanup operations + * such as creating/deleting directory markers. + * @param sourceRenamed renamed source + * @param destCreated destination file created. + * @throws IOException failure + */ + void finishRename(Path sourceRenamed, Path destCreated) throws IOException; + + /** + * Delete an object, also updating the metastore. + * This call does not create any mock parent entries. + * Retry policy: retry untranslated; delete considered idempotent. + * @param path path to delete + * @param key key of entry + * @param isFile is the path a file (used for instrumentation only) + * @throws AmazonClientException problems working with S3 + * @throws IOException IO failure in the metastore + */ + @Retries.RetryMixed + void deleteObjectAtPath(Path path, String key, boolean isFile) + throws IOException; + + /** + * Recursive list of files and empty directories. + * @param path path to list from + * @return an iterator. + * @throws IOException failure + */ + RemoteIterator listFilesAndEmptyDirectories( + Path path) throws IOException; + + /** + * Copy a single object in the bucket via a COPY operation. + * There's no update of metadata, directory markers, etc. + * Callers must implement. + * @param srcKey source object path + * @param srcAttributes S3 attributes of the source object + * @param readContext the read context + * @return the result of the copy + * @throws InterruptedIOException the operation was interrupted + * @throws IOException Other IO problems + */ + @Retries.RetryTranslated + CopyResult copyFile(String srcKey, + String destKey, + S3ObjectAttributes srcAttributes, + S3AReadOpContext readContext) + throws IOException; + + /** + * Remove keys from the store, updating the metastore on a + * partial delete represented as a MultiObjectDeleteException failure by + * deleting all those entries successfully deleted and then rethrowing + * the MultiObjectDeleteException. + * @param keysToDelete collection of keys to delete on the s3-backend. + * if empty, no request is made of the object store. + * @param deleteFakeDir indicates whether this is for deleting fake dirs. + * @param undeletedObjectsOnFailure List which will be built up of all + * files that were not deleted. This happens even as an exception + * is raised. + * @throws InvalidRequestException if the request was rejected due to + * a mistaken attempt to delete the root directory. + * @throws MultiObjectDeleteException one or more of the keys could not + * be deleted in a multiple object delete operation. + * @throws AmazonClientException amazon-layer failure. + * @throws IOException other IO Exception. + */ + @Retries.RetryMixed + void removeKeys( + List keysToDelete, + boolean deleteFakeDir, + List undeletedObjectsOnFailure) + throws MultiObjectDeleteException, AmazonClientException, + IOException; + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java new file mode 100644 index 0000000000000..28300c2d0547d --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java @@ -0,0 +1,335 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.File; +import java.io.IOException; +import java.net.URI; + +import com.google.common.util.concurrent.ListeningExecutorService; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.Invoker; +import org.apache.hadoop.fs.s3a.S3AInputPolicy; +import org.apache.hadoop.fs.s3a.S3AInstrumentation; +import org.apache.hadoop.fs.s3a.S3AStorageStatistics; +import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; +import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.SemaphoredDelegatingExecutor; + +/** + * This class provides the core context of the S3A filesystem to subsidiary + * components, without exposing the entire parent class. + * This is eliminate explicit recursive coupling. + * + * Where methods on the FS are to be invoked, they are referenced + * via the {@link ContextAccessors} interface, so tests can implement + * their own. + * + * Warning: this really is private and unstable. Do not use + * outside the org.apache.hadoop.fs.s3a package. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class StoreContext { + + /** Filesystem URI. */ + private final URI fsURI; + + /** Bucket name. */ + private final String bucket; + + /** FS configuration after all per-bucket overrides applied. */ + private final Configuration configuration; + + /** Username. */ + private final String username; + + /** Principal who created the FS. */ + private final UserGroupInformation owner; + + /** + * Bounded thread pool for async operations. + */ + private final ListeningExecutorService executor; + + /** + * Capacity of new executors created. + */ + private final int executorCapacity; + + /** Invoker of operations. */ + private final Invoker invoker; + + /** Instrumentation and statistics. */ + private final S3AInstrumentation instrumentation; + private final S3AStorageStatistics storageStatistics; + + /** Seek policy. */ + private final S3AInputPolicy inputPolicy; + + /** How to react to changes in etags and versions. */ + private final ChangeDetectionPolicy changeDetectionPolicy; + + /** Evaluated options. */ + private final boolean multiObjectDeleteEnabled; + + /** List algorithm. */ + private final boolean useListV1; + + /** + * To allow this context to be passed down to the metastore, this field + * wll be null until initialized. + */ + private final MetadataStore metadataStore; + + private final ContextAccessors contextAccessors; + + /** + * Source of time. + */ + private ITtlTimeProvider timeProvider; + + /** + * Instantiate. + * No attempt to use a builder here as outside tests + * this should only be created in the S3AFileSystem. + */ + public StoreContext( + final URI fsURI, + final String bucket, + final Configuration configuration, + final String username, + final UserGroupInformation owner, + final ListeningExecutorService executor, + final int executorCapacity, + final Invoker invoker, + final S3AInstrumentation instrumentation, + final S3AStorageStatistics storageStatistics, + final S3AInputPolicy inputPolicy, + final ChangeDetectionPolicy changeDetectionPolicy, + final boolean multiObjectDeleteEnabled, + final MetadataStore metadataStore, + final boolean useListV1, + final ContextAccessors contextAccessors, + final ITtlTimeProvider timeProvider) { + this.fsURI = fsURI; + this.bucket = bucket; + this.configuration = configuration; + this.username = username; + this.owner = owner; + this.executor = executor; + this.executorCapacity = executorCapacity; + this.invoker = invoker; + this.instrumentation = instrumentation; + this.storageStatistics = storageStatistics; + this.inputPolicy = inputPolicy; + this.changeDetectionPolicy = changeDetectionPolicy; + this.multiObjectDeleteEnabled = multiObjectDeleteEnabled; + this.metadataStore = metadataStore; + this.useListV1 = useListV1; + this.contextAccessors = contextAccessors; + this.timeProvider = timeProvider; + } + + @Override + protected Object clone() throws CloneNotSupportedException { + return super.clone(); + } + + public URI getFsURI() { + return fsURI; + } + + public String getBucket() { + return bucket; + } + + public Configuration getConfiguration() { + return configuration; + } + + public String getUsername() { + return username; + } + + public ListeningExecutorService getExecutor() { + return executor; + } + + public Invoker getInvoker() { + return invoker; + } + + public S3AInstrumentation getInstrumentation() { + return instrumentation; + } + + public S3AInputPolicy getInputPolicy() { + return inputPolicy; + } + + public ChangeDetectionPolicy getChangeDetectionPolicy() { + return changeDetectionPolicy; + } + + public boolean isMultiObjectDeleteEnabled() { + return multiObjectDeleteEnabled; + } + + public MetadataStore getMetadataStore() { + return metadataStore; + } + + public boolean isUseListV1() { + return useListV1; + } + + /** + * Convert a key to a fully qualified path. + * @param key input key + * @return the fully qualified path including URI scheme and bucket name. + */ + public Path keyToPath(String key) { + return contextAccessors.keyToPath(key); + } + + /** + * Turns a path (relative or otherwise) into an S3 key. + * + * @param path input path, may be relative to the working dir + * @return a key excluding the leading "/", or, if it is the root path, "" + */ + public String pathToKey(Path path) { + return contextAccessors.pathToKey(path); + } + + /** + * Get the storage statistics of this filesystem. + * @return the storage statistics + */ + public S3AStorageStatistics getStorageStatistics() { + return storageStatistics; + } + + /** + * Increment a statistic by 1. + * This increments both the instrumentation and storage statistics. + * @param statistic The operation to increment + */ + public void incrementStatistic(Statistic statistic) { + incrementStatistic(statistic, 1); + } + + /** + * Increment a statistic by a specific value. + * This increments both the instrumentation and storage statistics. + * @param statistic The operation to increment + * @param count the count to increment + */ + public void incrementStatistic(Statistic statistic, long count) { + instrumentation.incrementCounter(statistic, count); + storageStatistics.incrementCounter(statistic, count); + } + + /** + * Decrement a gauge by a specific value. + * @param statistic The operation to decrement + * @param count the count to decrement + */ + public void decrementGauge(Statistic statistic, long count) { + instrumentation.decrementGauge(statistic, count); + } + + /** + * Increment a gauge by a specific value. + * @param statistic The operation to increment + * @param count the count to increment + */ + public void incrementGauge(Statistic statistic, long count) { + instrumentation.incrementGauge(statistic, count); + } + + /** + * Create a new executor service with a given capacity. + * This executor submits works to the {@link #executor}, using a + * {@link SemaphoredDelegatingExecutor} to limit the number + * of requests coming in from a specific client. + * + * Because this delegates to an existing thread pool, the cost of + * creating a new instance here is low. + * As the throttling is per instance, separate instances + * should be created for each operation which wishes to execute work in + * parallel without saturating the base executor. + * This is important if either the duration of each operation is long + * or the submission rate of work is high. + * @param capacity maximum capacity of this executor. + * @return an executor for submitting work. + */ + public ListeningExecutorService createThrottledExecutor(int capacity) { + return new SemaphoredDelegatingExecutor(executor, + capacity, true); + } + + /** + * Create a new executor with the capacity defined in + * {@link #executorCapacity}. + * @return a new executor for exclusive use by the caller. + */ + public ListeningExecutorService createThrottledExecutor() { + return createThrottledExecutor(executorCapacity); + } + + public UserGroupInformation getOwner() { + return owner; + } + + /** + * Create a temporary file somewhere. + * @param prefix prefix for the temporary file + * @param size expected size. + * @return a file reference. + * @throws IOException failure. + */ + public File createTempFile(String prefix, long size) throws IOException { + return contextAccessors.createTempFile(prefix, size); + } + + /** + * Get the location of the bucket. + * @return the bucket location. + * @throws IOException failure. + */ + public String getBucketLocation() throws IOException { + return contextAccessors.getBucketLocation(); + } + + /** + * Get the time provider. + * @return the time source. + */ + public ITtlTimeProvider getTimeProvider() { + return timeProvider; + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/BulkOperationState.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/BulkOperationState.java new file mode 100644 index 0000000000000..0fe05db833552 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/BulkOperationState.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.s3guard; + +import java.io.Closeable; +import java.io.IOException; + +/** + * This represents state which may be passed to bulk IO operations + * to enable them to store information about the state of the ongoing + * operation across invocations. + *

    + * A bulk operation state MUST only be be used for the single store + * from which it was created, and MUSTonly for the duration of a single + * bulk update operation. + *

    + * Passing in the state is to allow the stores to maintain state about + * updates they have already made to their store during this single operation: + * a cache of what has happened. It is not a list of operations to be applied. + * If a list of operations to perform is built up (e.g. during rename) + * that is the duty of the caller, not this state. + *

    + * After the operation has completed, it MUST be closed so + * as to guarantee that all state is released. + */ +public class BulkOperationState implements Closeable { + + private final OperationType operation; + + /** + * Constructor. + * @param operation the type of the operation. + */ + public BulkOperationState(final OperationType operation) { + this.operation = operation; + } + + /** + * Get the operation type. + * @return the operation type. + */ + public OperationType getOperation() { + return operation; + } + + @Override + public void close() throws IOException { + + } + + /** + * Enumeration of operations which can be performed in bulk. + * This can be used by the stores however they want. + * One special aspect: renames are to be done through a {@link RenameTracker}. + * Callers will be blocked from initiating a rename through + * {@code S3Guard#initiateBulkWrite()} + */ + public enum OperationType { + /** Writing data. */ + Put, + /** Rename: add and delete. */ + Rename, + /** Pruning: deleting entries and updating parents. */ + Prune, + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DelayedUpdateRenameTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DelayedUpdateRenameTracker.java new file mode 100644 index 0000000000000..916714b47b89b --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DelayedUpdateRenameTracker.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.s3guard; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import com.amazonaws.SdkBaseException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3ObjectAttributes; +import org.apache.hadoop.fs.s3a.Tristate; +import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.util.DurationInfo; + +import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.addMoveAncestors; +import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.addMoveDir; +import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.addMoveFile; + +/** + * This is the rename updating strategy originally used: + * a collection of source paths and a list of destinations are created, + * then updated at the end (possibly slow). + *

    + * It is not currently instantiated by any of the active trackers, + * but is preserved to show that the original rename strategy + * can be implemented via the tracker model. + */ +public class DelayedUpdateRenameTracker extends RenameTracker { + + private final MetadataStore metadataStore; + + private final Collection sourcePaths = new HashSet<>(); + + private final List destMetas = new ArrayList<>(); + + private final List deletedPaths = new ArrayList<>(); + + public DelayedUpdateRenameTracker( + final StoreContext storeContext, + final MetadataStore metadataStore, + final Path sourceRoot, + final Path dest, + final BulkOperationState operationState) { + super("DelayedUpdateRenameTracker", storeContext, metadataStore, + sourceRoot, dest, operationState); + this.metadataStore = storeContext.getMetadataStore(); + } + + @Override + public synchronized void fileCopied( + final Path sourcePath, + final S3ObjectAttributes sourceAttributes, + final S3ObjectAttributes destAttributes, + final Path destPath, + final long blockSize, + final boolean addAncestors) throws IOException { + + addMoveFile(metadataStore, + sourcePaths, + destMetas, + sourcePath, + destPath, + sourceAttributes.getLen(), + blockSize, + getOwner(), + destAttributes.getETag(), + destAttributes.getVersionId()); + // Ancestor directories may not be listed, so we explicitly add them + if (addAncestors) { + addMoveAncestors(metadataStore, + sourcePaths, + destMetas, + getSourceRoot(), + sourcePath, + destPath, + getOwner()); + } + } + + @Override + public synchronized void directoryMarkerCopied(final Path sourcePath, + final Path destPath, + final boolean addAncestors) throws IOException { + addMoveDir(metadataStore, sourcePaths, destMetas, + sourcePath, + destPath, getOwner()); + // Ancestor directories may not be listed, so we explicitly add them + if (addAncestors) { + addMoveAncestors(metadataStore, + sourcePaths, + destMetas, + getSourceRoot(), + sourcePath, + destPath, + getOwner()); + } + } + + @Override + public synchronized void moveSourceDirectory() throws IOException { + if (!sourcePaths.contains(getSourceRoot())) { + addMoveDir(metadataStore, sourcePaths, destMetas, + getSourceRoot(), + getDest(), getOwner()); + } + } + + @Override + public synchronized void sourceObjectsDeleted( + final Collection paths) throws IOException { + // add to the list of deleted paths. + deletedPaths.addAll(paths); + } + + @Override + public void completeRename() throws IOException { + metadataStore.move(sourcePaths, destMetas, + getStoreContext().getTimeProvider(), + getOperationState()); + super.completeRename(); + } + + @Override + public IOException renameFailed(final Exception ex) { + LOG.warn("Rename has failed; updating s3guard with destination state"); + try (DurationInfo ignored = new DurationInfo(LOG, + "Cleaning up deleted paths")) { + // the destination paths are updated; the source is left alone. + metadataStore.move(new ArrayList<>(0), destMetas, + getStoreContext().getTimeProvider(), + getOperationState()); + for (Path deletedPath : deletedPaths) { + // this is not ideal in that it may leave parent stuff around. + metadataStore.delete(deletedPath, getStoreContext().getTimeProvider()); + } + deleteParentPaths(); + } catch (IOException | SdkBaseException e) { + LOG.warn("Ignoring error raised in AWS SDK ", e); + } + + return super.renameFailed(ex); + } + + /** + * Delete all the parent paths we know to be empty (by walking up the tree + * deleting as appropriate). + * @throws IOException failure + */ + private void deleteParentPaths() throws IOException { + Set parentPaths = new HashSet<>(); + for (Path deletedPath : deletedPaths) { + Path parent = deletedPath.getParent(); + if (!parent.equals(getSourceRoot())) { + parentPaths.add(parent); + } + } + // now there's a set of parent paths. We now want to + // get them ordered by depth, so that deeper entries come first + // that way: when we check for a parent path existing we can + // see if it really is empty. + List parents = new ArrayList<>(parentPaths); + parents.sort(PathOrderComparators.TOPMOST_PATH_LAST); + for (Path parent : parents) { + PathMetadata md = metadataStore.get(parent, true); + if (md != null && md.isEmptyDirectory() == Tristate.TRUE) { + // if were confident that this is empty: delete it. + metadataStore.delete(parent, getStoreContext().getTimeProvider()); + } + } + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java index f668c6affdc92..9f0631309fd4d 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.s3a.s3guard; +import javax.annotation.Nullable; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InterruptedIOException; @@ -35,6 +36,7 @@ import java.util.Objects; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -59,6 +61,7 @@ import com.amazonaws.services.dynamodbv2.document.spec.GetItemSpec; import com.amazonaws.services.dynamodbv2.document.spec.QuerySpec; import com.amazonaws.services.dynamodbv2.document.utils.ValueMap; +import com.amazonaws.services.dynamodbv2.model.AmazonDynamoDBException; import com.amazonaws.services.dynamodbv2.model.BillingMode; import com.amazonaws.services.dynamodbv2.model.CreateTableRequest; import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughput; @@ -73,6 +76,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ListeningExecutorService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -83,6 +87,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.s3a.AWSClientIOException; import org.apache.hadoop.fs.s3a.AWSCredentialProviderList; import org.apache.hadoop.fs.s3a.AWSServiceThrottledException; @@ -97,15 +102,19 @@ import org.apache.hadoop.fs.s3a.auth.RoleModel; import org.apache.hadoop.fs.s3a.auth.RolePolicies; import org.apache.hadoop.fs.s3a.auth.delegation.AWSPolicyProvider; +import org.apache.hadoop.fs.s3a.impl.StoreContext; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.BlockingThreadPoolExecutorService; import org.apache.hadoop.util.ReflectionUtils; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3AUtils.*; import static org.apache.hadoop.fs.s3a.auth.RolePolicies.allowAllDynamoDBOperations; import static org.apache.hadoop.fs.s3a.auth.RolePolicies.allowS3GuardClientOperations; +import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit; +import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletion; import static org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation.*; import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.*; @@ -191,7 +200,7 @@ * * Some mutating operations, notably * {@link MetadataStore#deleteSubtree(Path, ITtlTimeProvider)} and - * {@link MetadataStore#move(Collection, Collection, ITtlTimeProvider)}, + * {@link MetadataStore#move(Collection, Collection, ITtlTimeProvider, BulkOperationState)} * are less efficient with this schema. * They require mutating multiple items in the DynamoDB table. * @@ -256,9 +265,25 @@ public class DynamoDBMetadataStore implements MetadataStore, public static final String E_ON_DEMAND_NO_SET_CAPACITY = "Neither ReadCapacityUnits nor WriteCapacityUnits can be specified when BillingMode is PAY_PER_REQUEST"; + @VisibleForTesting + static final String E_INCONSISTENT_UPDATE + = "Duplicate and inconsistent entry in update operation"; + private static ValueMap deleteTrackingValueMap = new ValueMap().withBoolean(":false", false); + /** + * The maximum number of outstanding operations to submit + * before blocking to await completion of all the executors. + * Paging work like this is less efficient, but it ensures that + * failure (auth, network, etc) are picked up before many more + * operations are submitted. + * + * Arbitrary Choice. + * Value: {@value}. + */ + private static final int S3GUARD_DDB_SUBMITTED_TASK_LIMIT = 50; + private AmazonDynamoDB amazonDynamoDB; private DynamoDB dynamoDB; private AWSCredentialProviderList credentials; @@ -308,6 +333,17 @@ public class DynamoDBMetadataStore implements MetadataStore, */ private AtomicInteger throttleEventCount = new AtomicInteger(0); + /** + * Executor for submitting operations. + */ + private ListeningExecutorService executor; + + /** + * Time source. This is used during writes when parent + * entries need to be created. + */ + private ITtlTimeProvider timeProvider; + /** * A utility function to create DynamoDB instance. * @param conf the file system configuration @@ -383,6 +419,7 @@ public void initialize(FileSystem fs) throws IOException { this::retryEvent ); + timeProvider = new S3Guard.TtlTimeProvider(conf); initTable(); instrumentation.initialized(); @@ -398,8 +435,13 @@ public void initialize(FileSystem fs) throws IOException { void bindToOwnerFilesystem(final S3AFileSystem fs) { owner = fs; conf = owner.getConf(); - instrumentation = owner.getInstrumentation().getS3GuardInstrumentation(); - username = owner.getUsername(); + StoreContext context = owner.createStoreContext(); + instrumentation = context.getInstrumentation().getS3GuardInstrumentation(); + username = context.getUsername(); + executor = context.createThrottledExecutor(); + timeProvider = Preconditions.checkNotNull( + context.getTimeProvider(), + "ttlTimeProvider must not be null"); } /** @@ -444,8 +486,19 @@ public void initialize(Configuration config) throws IOException { dynamoDB = createDynamoDB(conf, region, null, credentials); username = UserGroupInformation.getCurrentUser().getShortUserName(); + // without an executor from the owner FS, create one using + // the executor capacity for work. + int executorCapacity = intOption(conf, + EXECUTOR_CAPACITY, DEFAULT_EXECUTOR_CAPACITY, 1); + executor = BlockingThreadPoolExecutorService.newInstance( + executorCapacity, + executorCapacity * 2, + longOption(conf, KEEPALIVE_TIME, + DEFAULT_KEEPALIVE_TIME, 0), + TimeUnit.SECONDS, + "s3a-ddb-" + tableName); initDataAccessRetries(conf); - + timeProvider = new S3Guard.TtlTimeProvider(conf); initTable(); } @@ -481,6 +534,7 @@ public void delete(Path path, ITtlTimeProvider ttlTimeProvider) @Override @Retries.RetryTranslated public void forgetMetadata(Path path) throws IOException { + LOG.debug("Forget metadata for {}", path); innerDelete(path, false, null); } @@ -522,14 +576,23 @@ private void innerDelete(final Path path, boolean tombstone, "Put tombstone", path.toString(), idempotent, - () -> table.putItem(item)); + () -> { + LOG.debug("Adding tombstone to {}", path); + recordsWritten(1); + table.putItem(item); + }); } else { PrimaryKey key = pathToKey(path); writeOp.retry( "Delete key", path.toString(), idempotent, - () -> table.deleteItem(key)); + () -> { + // record the attempt so even on retry the counter goes up. + LOG.debug("Delete key {}", path); + recordsDeleted(1); + table.deleteItem(key); + }); } } @@ -547,16 +610,28 @@ public void deleteSubtree(Path path, ITtlTimeProvider ttlTimeProvider) return; } + // Execute via the bounded threadpool. + final List> futures = new ArrayList<>(); for (DescendantsIterator desc = new DescendantsIterator(this, meta); desc.hasNext();) { - innerDelete(desc.next().getPath(), true, ttlTimeProvider); + final Path pathToDelete = desc.next().getPath(); + futures.add(submit(executor, () -> { + innerDelete(pathToDelete, true, ttlTimeProvider); + return null; + })); + if (futures.size() > S3GUARD_DDB_SUBMITTED_TASK_LIMIT) { + // first batch done; block for completion. + waitForCompletion(futures); + futures.clear(); + } } + // now wait for the final set. + waitForCompletion(futures); } /** * Get a consistent view of an item. * @param path path to look up in the database - * @param path entry * @return the result * @throws IOException failure */ @@ -569,7 +644,10 @@ private Item getConsistentItem(final Path path) throws IOException { return readOp.retry("get", path.toString(), true, - () -> table.getItem(spec)); + () -> { + recordsRead(1); + return table.getItem(spec); + }); } @Override @@ -583,8 +661,11 @@ public DDBPathMetadata get(Path path) throws IOException { public DDBPathMetadata get(Path path, boolean wantEmptyDirectoryFlag) throws IOException { checkPath(path); - LOG.debug("Get from table {} in region {}: {}", tableName, region, path); - return innerGet(path, wantEmptyDirectoryFlag); + LOG.debug("Get from table {} in region {}: {}. wantEmptyDirectory={}", + tableName, region, path, wantEmptyDirectoryFlag); + DDBPathMetadata result = innerGet(path, wantEmptyDirectoryFlag); + LOG.debug("result of get {} is: {}", path, result); + return result; } /** @@ -711,40 +792,194 @@ DirListingMetadata getDirListingMetadataFromDirMetaAndList(Path path, } /** - * build the list of all parent entries. + * Build the list of all parent entries. + *

    + * Thread safety: none. Callers must synchronize access. + *

    + * Callers are required to synchronize on ancestorState. * @param pathsToCreate paths to create + * @param ancestorState ongoing ancestor state. + * @param ttlTimeProvider Must not be null * @return the full ancestry paths */ - Collection completeAncestry( - Collection pathsToCreate) { - // Key on path to allow fast lookup - Map ancestry = new HashMap<>(); - - for (DDBPathMetadata meta : pathsToCreate) { + private Collection completeAncestry( + final Collection pathsToCreate, + final AncestorState ancestorState, + final ITtlTimeProvider ttlTimeProvider) throws PathIOException { + List ancestorsToAdd = new ArrayList<>(0); + LOG.debug("Completing ancestry for {} paths", pathsToCreate.size()); + // we sort the inputs to guarantee that the topmost entries come first. + // that way if the put request contains both parents and children + // then the existing parents will not be re-created -they will just + // be added to the ancestor list first. + List sortedPaths = new ArrayList<>(pathsToCreate); + sortedPaths.sort(PathOrderComparators.TOPMOST_PM_FIRST); + // iterate through the paths. + for (DDBPathMetadata meta : sortedPaths) { Preconditions.checkArgument(meta != null); Path path = meta.getFileStatus().getPath(); + LOG.debug("Adding entry {}", path); if (path.isRoot()) { + // this is a root entry: do not add it. break; } - ancestry.put(path, new DDBPathMetadata(meta)); + // create the new entry + DDBPathMetadata entry = new DDBPathMetadata(meta); + // add it to the ancestor state, failing if it is already there and + // of a different type. + DDBPathMetadata oldEntry = ancestorState.put(path, entry); + if (oldEntry != null) { + if (!oldEntry.getFileStatus().isDirectory() + || !entry.getFileStatus().isDirectory()) { + // check for and warn if the existing bulk operation overwrote it. + // this should never occur outside tests explicitly crating it + LOG.warn("Overwriting a S3Guard file created in the operation: {}", + oldEntry); + LOG.warn("With new entry: {}", entry); + // restore the old state + ancestorState.put(path, oldEntry); + // then raise an exception + throw new PathIOException(path.toString(), E_INCONSISTENT_UPDATE); + } else { + // a directory is already present. Log and continue. + LOG.debug("Directory at {} being updated with value {}", + path, entry); + } + } + ancestorsToAdd.add(entry); Path parent = path.getParent(); - while (!parent.isRoot() && !ancestry.containsKey(parent)) { - LOG.debug("auto-create ancestor path {} for child path {}", - parent, path); - final S3AFileStatus status = makeDirStatus(parent, username); - ancestry.put(parent, new DDBPathMetadata(status, Tristate.FALSE, - false)); + while (!parent.isRoot()) { + if (!ancestorState.findEntry(parent, true)) { + // don't add this entry, but carry on with the parents + LOG.debug("auto-create ancestor path {} for child path {}", + parent, path); + final S3AFileStatus status = makeDirStatus(parent, username); + DDBPathMetadata md = new DDBPathMetadata(status, Tristate.FALSE, + false, false, ttlTimeProvider.getNow()); + ancestorState.put(parent, md); + ancestorsToAdd.add(md); + } parent = parent.getParent(); } } - return ancestry.values(); + return ancestorsToAdd; } + /** + * {@inheritDoc} + *

    + * The implementation scans all up the directory tree and does a get() + * for each entry; at each level one is found it is added to the ancestor + * state. + *

    + * The original implementation would stop on finding the first non-empty + * parent. This (re) implementation issues a GET for every parent entry + * and so detects and recovers from a tombstone marker further up the tree + * (i.e. an inconsistent store is corrected for). + *

    + * if {@code operationState} is not null, when this method returns the + * operation state will be updated with all new entries created. + * This ensures that subsequent operations with the same store will not + * trigger new updates. + * @param qualifiedPath path to update + * @param operationState (nullable) operational state for a bulk update + * @throws IOException on failure. + */ + @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") @Override @Retries.RetryTranslated - public void move(Collection pathsToDelete, - Collection pathsToCreate, ITtlTimeProvider ttlTimeProvider) - throws IOException { + public void addAncestors( + final Path qualifiedPath, + final ITtlTimeProvider ttlTimeProvider, + @Nullable final BulkOperationState operationState) throws IOException { + + Collection newDirs = new ArrayList<>(); + final AncestorState ancestorState = extractOrCreate(operationState, + BulkOperationState.OperationType.Rename); + Path parent = qualifiedPath.getParent(); + boolean entryFound = false; + + // Iterate up the parents. + // note that only ancestorState get/set operations are synchronized; + // the DDB read between them is not. As a result, more than one + // thread may probe the state, find the entry missing, do the database + // query and add the entry. + // This is done to avoid making the remote dynamo query part of the + // synchronized block. + // If a race does occur, the cost is simply one extra GET and potentially + // one extra PUT. + while (!parent.isRoot()) { + synchronized (ancestorState) { + if (ancestorState.contains(parent)) { + // the ancestry map contains the key, so no need to even look for it. + break; + } + } + // we don't worry about tombstone expiry here as expired or not, + // a directory entry will go in. + PathMetadata directory = get(parent); + if (directory == null || directory.isDeleted()) { + if (entryFound) { + LOG.warn("Inconsistent S3Guard table: adding directory {}", parent); + } + S3AFileStatus status = makeDirStatus(username, parent); + LOG.debug("Adding new ancestor entry {}", status); + DDBPathMetadata meta = new DDBPathMetadata(status, Tristate.FALSE, + false); + newDirs.add(meta); + // Do not update ancestor state here, as it + // will happen in the innerPut() call. Were we to add it + // here that put operation would actually (mistakenly) skip + // creating the entry. + } else { + // an entry was found. Check its type + entryFound = true; + if (directory.getFileStatus().isFile()) { + throw new PathIOException(parent.toString(), + "Cannot overwrite parent file: metadatstore is" + + " in an inconsistent state"); + } + // the directory exists. Add it to the ancestor state for next time. + synchronized (ancestorState) { + ancestorState.put(parent, new DDBPathMetadata(directory)); + } + } + parent = parent.getParent(); + } + // the listing of directories to put is all those parents which we know + // are not in the store or BulkOperationState. + if (!newDirs.isEmpty()) { + // patch up the time. + patchLastUpdated(newDirs, ttlTimeProvider); + innerPut(newDirs, operationState, ttlTimeProvider); + } + } + + /** + * {@inheritDoc}. + * + * The DDB implementation sorts all the paths such that new items + * are ordered highest level entry first; deleted items are ordered + * lowest entry first. + * + * This is to ensure that if a client failed partway through the update, + * there will no entries in the table which lack parent entries. + * @param pathsToDelete Collection of all paths that were removed from the + * source directory tree of the move. + * @param pathsToCreate Collection of all PathMetadata for the new paths + * that were created at the destination of the rename + * (). + * @param operationState Any ongoing state supplied to the rename tracker + * which is to be passed in with each move operation. + * @throws IOException if there is an error + */ + @Override + @Retries.RetryTranslated + public void move( + @Nullable Collection pathsToDelete, + @Nullable Collection pathsToCreate, + final ITtlTimeProvider ttlTimeProvider, + @Nullable final BulkOperationState operationState) throws IOException { if (pathsToDelete == null && pathsToCreate == null) { return; } @@ -761,18 +996,37 @@ public void move(Collection pathsToDelete, // Following code is to maintain this invariant by putting all ancestor // directories of the paths to create. // ancestor paths that are not explicitly added to paths to create - Collection newItems = new ArrayList<>(); + AncestorState ancestorState = extractOrCreate(operationState, + BulkOperationState.OperationType.Rename); + List newItems = new ArrayList<>(); if (pathsToCreate != null) { - newItems.addAll(completeAncestry(pathMetaToDDBPathMeta(pathsToCreate))); + // create all parent entries. + // this is synchronized on the move state so that across both serialized + // and parallelized renames, duplicate ancestor entries are not created. + synchronized (ancestorState) { + newItems.addAll( + completeAncestry( + pathMetaToDDBPathMeta(pathsToCreate), + ancestorState, + extractTimeProvider(ttlTimeProvider))); + } } + // sort all the new items topmost first. + newItems.sort(PathOrderComparators.TOPMOST_PM_FIRST); + + // now process the deletions. if (pathsToDelete != null) { + List tombstones = new ArrayList<>(pathsToDelete.size()); for (Path meta : pathsToDelete) { Preconditions.checkArgument(ttlTimeProvider != null, "ttlTimeProvider" + " must not be null"); final PathMetadata pmTombstone = PathMetadata.tombstone(meta); pmTombstone.setLastUpdated(ttlTimeProvider.getNow()); - newItems.add(new DDBPathMetadata(pmTombstone)); + tombstones.add(new DDBPathMetadata(pmTombstone)); } + // sort all the tombstones lowest first. + tombstones.sort(PathOrderComparators.TOPMOST_PM_LAST); + newItems.addAll(tombstones); } processBatchWriteRequest(null, pathMetadataToItem(newItems)); @@ -780,9 +1034,12 @@ public void move(Collection pathsToDelete, /** * Helper method to issue a batch write request to DynamoDB. - * + *

      + *
    1. Keys to delete are processed ahead of writing new items.
    2. + *
    3. No attempt is made to sort the input: the caller must do that
    4. + *
    * As well as retrying on the operation invocation, incomplete - * batches are retried until all have been deleted. + * batches are retried until all have been processed.. * @param keysToDelete primary keys to be deleted; can be null * @param itemsToPut new items to be put; can be null * @return the number of iterations needed to complete the call. @@ -792,6 +1049,10 @@ private int processBatchWriteRequest(PrimaryKey[] keysToDelete, Item[] itemsToPut) throws IOException { final int totalToDelete = (keysToDelete == null ? 0 : keysToDelete.length); final int totalToPut = (itemsToPut == null ? 0 : itemsToPut.length); + if (totalToPut == 0 && totalToDelete == 0) { + LOG.debug("Ignoring empty batch write request"); + return 0; + } int count = 0; int batches = 0; while (count < totalToDelete + totalToPut) { @@ -843,6 +1104,12 @@ private int processBatchWriteRequest(PrimaryKey[] keysToDelete, unprocessed = res.getUnprocessedItems(); } } + if (itemsToPut != null) { + recordsWritten(itemsToPut.length); + } + if (keysToDelete != null) { + recordsDeleted(keysToDelete.length); + } return batches; } @@ -893,7 +1160,15 @@ private void retryBackoffOnBatchWrite(int retryCount) throws IOException { @Override @Retries.RetryTranslated - public void put(PathMetadata meta) throws IOException { + public void put(final PathMetadata meta) throws IOException { + put(meta, null); + } + + @Override + @Retries.RetryTranslated + public void put( + final PathMetadata meta, + @Nullable final BulkOperationState operationState) throws IOException { // For a deeply nested path, this method will automatically create the full // ancestry and save respective item in DynamoDB table. // So after put operation, we maintain the invariant that if a path exists, @@ -904,32 +1179,77 @@ public void put(PathMetadata meta) throws IOException { Collection wrapper = new ArrayList<>(1); wrapper.add(meta); - put(wrapper); + put(wrapper, operationState); } @Override @Retries.RetryTranslated - public void put(Collection metas) throws IOException { - innerPut(pathMetaToDDBPathMeta(metas)); + public void put( + final Collection metas, + @Nullable final BulkOperationState operationState) throws IOException { + innerPut(pathMetaToDDBPathMeta(metas), operationState, timeProvider); } - @Retries.OnceRaw - private void innerPut(Collection metas) throws IOException { - Item[] items = pathMetadataToItem(completeAncestry(metas)); + /** + * Internal put operation. + *

    + * The ancestors to all entries are added to the set of entries to write, + * provided they are not already stored in any supplied operation state. + * Both the supplied metadata entries and ancestor entries are sorted + * so that the topmost entries are written first. + * This is to ensure that a failure partway through the operation will not + * create entries in the table without parents. + * @param metas metadata entries to write. + * @param operationState (nullable) operational state for a bulk update + * @param ttlTimeProvider + * @throws IOException failure. + */ + @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") + @Retries.RetryTranslated + private void innerPut( + final Collection metas, + @Nullable final BulkOperationState operationState, + final ITtlTimeProvider ttlTimeProvider) throws IOException { + if (metas.isEmpty()) { + // Happens when someone calls put() with an empty list. + LOG.debug("Ignoring empty list of entries to put"); + return; + } + // always create or retrieve an ancestor state instance, so it can + // always be used for synchronization. + final AncestorState ancestorState = extractOrCreate(operationState, + BulkOperationState.OperationType.Put); + + Item[] items; + synchronized (ancestorState) { + items = pathMetadataToItem( + completeAncestry(metas, ancestorState, ttlTimeProvider)); + } LOG.debug("Saving batch of {} items to table {}, region {}", items.length, tableName, region); processBatchWriteRequest(null, items); } /** - * Helper method to get full path of ancestors that are nonexistent in table. + * Get full path of ancestors that are nonexistent in table. + * + * This queries DDB when looking for parents which are not in + * any supplied ongoing operation state. + * Updates the operation state with found entries to reduce further checks. + * + * @param meta metadata to put + * @param operationState ongoing bulk state + * @return a possibly empty list of entries to put. + * @throws IOException failure */ + @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") @VisibleForTesting @Retries.RetryTranslated - Collection fullPathsToPut(DDBPathMetadata meta) + List fullPathsToPut(DDBPathMetadata meta, + @Nullable BulkOperationState operationState) throws IOException { checkPathMetadata(meta); - final Collection metasToPut = new ArrayList<>(); + final List metasToPut = new ArrayList<>(); // root path is not persisted if (!meta.getFileStatus().getPath().isRoot()) { metasToPut.add(meta); @@ -937,8 +1257,15 @@ Collection fullPathsToPut(DDBPathMetadata meta) // put all its ancestors if not present; as an optimization we return at its // first existent ancestor + final AncestorState ancestorState = extractOrCreate(operationState, + BulkOperationState.OperationType.Put); Path path = meta.getFileStatus().getPath().getParent(); while (path != null && !path.isRoot()) { + synchronized (ancestorState) { + if (ancestorState.findEntry(path, true)) { + break; + } + } final Item item = getConsistentItem(path); if (!itemExists(item)) { final S3AFileStatus status = makeDirStatus(path, username); @@ -946,12 +1273,23 @@ Collection fullPathsToPut(DDBPathMetadata meta) meta.isAuthoritativeDir(), meta.getLastUpdated())); path = path.getParent(); } else { + // found the entry in the table, so add it to the ancestor state + synchronized (ancestorState) { + ancestorState.put(path, itemToPathMetadata(item, username)); + } + // then break out of the loop. break; } } return metasToPut; } + /** + * Does an item represent an object which exists? + * @param item item retrieved in a query. + * @return true iff the item isn't null and, if there is an is_deleted + * column, that its value is false. + */ private boolean itemExists(Item item) { if (item == null) { return false; @@ -963,7 +1301,7 @@ private boolean itemExists(Item item) { return true; } - /** Create a directory FileStatus using current system time as mod time. */ + /** Create a directory FileStatus using 0 for the lastUpdated time. */ static S3AFileStatus makeDirStatus(Path f, String owner) { return new S3AFileStatus(Tristate.UNKNOWN, f, owner); } @@ -974,11 +1312,14 @@ static S3AFileStatus makeDirStatus(Path f, String owner) { * the call to {@link #processBatchWriteRequest(PrimaryKey[], Item[])} * is only tried once. * @param meta Directory listing metadata. + * @param operationState operational state for a bulk update * @throws IOException IO problem */ @Override @Retries.RetryTranslated - public void put(DirListingMetadata meta) throws IOException { + public void put( + final DirListingMetadata meta, + @Nullable final BulkOperationState operationState) throws IOException { LOG.debug("Saving to table {} in region {}: {}", tableName, region, meta); // directory path @@ -986,14 +1327,25 @@ public void put(DirListingMetadata meta) throws IOException { DDBPathMetadata ddbPathMeta = new DDBPathMetadata(makeDirStatus(path, username), meta.isEmpty(), false, meta.isAuthoritative(), meta.getLastUpdated()); - + // put all its ancestors if not present; as an optimization we return at its + // first existent ancestor + final AncestorState ancestorState = extractOrCreate(operationState, + BulkOperationState.OperationType.Put); // First add any missing ancestors... - final Collection metasToPut = fullPathsToPut(ddbPathMeta); + final List metasToPut = fullPathsToPut(ddbPathMeta, + ancestorState); // next add all children of the directory metasToPut.addAll(pathMetaToDDBPathMeta(meta.getListing())); + // sort so highest-level entries are written to the store first. + // if a sequence fails, no orphan entries will have been written. + metasToPut.sort(PathOrderComparators.TOPMOST_PM_FIRST); processBatchWriteRequest(null, pathMetadataToItem(metasToPut)); + // and add the ancestors + synchronized (ancestorState) { + metasToPut.forEach(ancestorState::put); + } } @Override @@ -1026,6 +1378,10 @@ public void destroy() throws IOException { invoker.retry("delete", null, true, () -> table.delete()); table.waitForDelete(); + } catch (IllegalArgumentException ex) { + throw new TableDeleteTimeoutException(tableName, + "Timeout waiting for the table " + tableArn + " to be deleted", + ex); } catch (FileNotFoundException rnfe) { LOG.info("FileNotFoundException while deleting DynamoDB table {} in " + "region {}. This may indicate that the table does not exist, " @@ -1099,69 +1455,98 @@ public void prune(PruneMode pruneMode, long cutoff) throws IOException { @Retries.RetryTranslated public void prune(PruneMode pruneMode, long cutoff, String keyPrefix) throws IOException { + LOG.debug("Prune files under {} with age {}", keyPrefix, cutoff); final ItemCollection items = expiredFiles(pruneMode, cutoff, keyPrefix); - innerPrune(items); + innerPrune(keyPrefix, items); } - private void innerPrune(ItemCollection items) + private void innerPrune(String keyPrefix, ItemCollection items) throws IOException { int itemCount = 0; - try { - Collection deletionBatch = + try (AncestorState state = initiateBulkWrite( + BulkOperationState.OperationType.Prune, null)) { + ArrayList deletionBatch = new ArrayList<>(S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT); long delay = conf.getTimeDuration( S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY, S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_DEFAULT, TimeUnit.MILLISECONDS); Set parentPathSet = new HashSet<>(); + Set clearedParentPathSet = new HashSet<>(); for (Item item : items) { DDBPathMetadata md = PathMetadataDynamoDBTranslation .itemToPathMetadata(item, username); Path path = md.getFileStatus().getPath(); deletionBatch.add(path); - // add parent path of what we remove + // add parent path of what we remove if it has not + // already been processed Path parentPath = path.getParent(); - if (parentPath != null) { + if (parentPath != null && !clearedParentPathSet.contains(parentPath)) { parentPathSet.add(parentPath); } itemCount++; if (deletionBatch.size() == S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT) { - Thread.sleep(delay); + // lowest path entries get deleted first. + deletionBatch.sort(PathOrderComparators.TOPMOST_PATH_LAST); processBatchWriteRequest(pathToKey(deletionBatch), null); // set authoritative false for each pruned dir listing - removeAuthoritativeDirFlag(parentPathSet); + removeAuthoritativeDirFlag(parentPathSet, state); + // already cleared parent paths. + clearedParentPathSet.addAll(parentPathSet); parentPathSet.clear(); deletionBatch.clear(); + if (delay > 0) { + Thread.sleep(delay); + } } } // final batch of deletes if (!deletionBatch.isEmpty()) { - Thread.sleep(delay); processBatchWriteRequest(pathToKey(deletionBatch), null); // set authoritative false for each pruned dir listing - removeAuthoritativeDirFlag(parentPathSet); + removeAuthoritativeDirFlag(parentPathSet, state); parentPathSet.clear(); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new InterruptedIOException("Pruning was interrupted"); + } catch (AmazonDynamoDBException e) { + throw translateDynamoDBException(keyPrefix, + "Prune of " + keyPrefix + " failed", e); } LOG.info("Finished pruning {} items in batches of {}", itemCount, S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT); } - private void removeAuthoritativeDirFlag(Set pathSet) - throws IOException { + /** + * Remove the Authoritative Directory Marker from a set of paths, if + * those paths are in the store. + * If an exception is raised in the get/update process, then the exception + * is caught and only rethrown after all the other paths are processed. + * This is to ensure a best-effort attempt to update the store. + * @param pathSet set of paths. + * @param state ongoing operation state. + * @throws IOException only after a best effort is made to update the store. + */ + private void removeAuthoritativeDirFlag( + final Set pathSet, + final AncestorState state) throws IOException { + AtomicReference rIOException = new AtomicReference<>(); Set metas = pathSet.stream().map(path -> { try { + if (state != null && state.get(path) != null) { + // there's already an entry for this path + LOG.debug("Ignoring update of entry already in the state map"); + return null; + } DDBPathMetadata ddbPathMetadata = get(path); if(ddbPathMetadata == null) { return null; @@ -1180,7 +1565,9 @@ private void removeAuthoritativeDirFlag(Set pathSet) try { LOG.debug("innerPut on metas: {}", metas); - innerPut(metas); + if (!metas.isEmpty()) { + innerPut(metas, state, timeProvider); + } } catch (IOException e) { String msg = String.format("IOException while setting false " + "authoritative directory flag on: %s.", metas); @@ -1800,6 +2187,72 @@ public Invoker getInvoker() { return invoker; } + /** + * Record the number of records written. + * @param count count of records. + */ + private void recordsWritten(final int count) { + if (instrumentation != null) { + instrumentation.recordsWritten(count); + } + } + + /** + * Record the number of records read. + * @param count count of records. + */ + private void recordsRead(final int count) { + if (instrumentation != null) { + instrumentation.recordsRead(count); + } + } + /** + * Record the number of records deleted. + * @param count count of records. + */ + private void recordsDeleted(final int count) { + if (instrumentation != null) { + instrumentation.recordsDeleted(count); + } + } + + /** + * Initiate the rename operation by creating the tracker for the filesystem + * to keep up to date with state changes in the S3A bucket. + * @param storeContext store context. + * @param source source path + * @param sourceStatus status of the source file/dir + * @param dest destination path. + * @return the rename tracker + */ + @Override + public RenameTracker initiateRenameOperation( + final StoreContext storeContext, + final Path source, + final S3AFileStatus sourceStatus, + final Path dest) { + return new ProgressiveRenameTracker(storeContext, this, source, dest, + new AncestorState(BulkOperationState.OperationType.Rename, dest)); + } + + @Override + public AncestorState initiateBulkWrite( + final BulkOperationState.OperationType operation, + final Path dest) { + return new AncestorState(operation, dest); + } + + /** + * Extract a time provider from the argument or fall back to the + * one in the constructor. + * @param ttlTimeProvider nullable time source passed in as an argument. + * @return a non-null time source. + */ + private ITtlTimeProvider extractTimeProvider( + @Nullable ITtlTimeProvider ttlTimeProvider) { + return ttlTimeProvider != null ? ttlTimeProvider : timeProvider; + } + /** * Take an {@code IllegalArgumentException} raised by a DDB operation * and if it contains an inner SDK exception, unwrap it. @@ -1841,4 +2294,117 @@ static IOException translateTableWaitFailure( return new IOException(e); } } + + /** + * Get the move state passed in; create a new one if needed. + * @param state state. + * @param operation the type of the operation to use if the state is created. + * @return the cast or created state. + */ + @VisibleForTesting + static AncestorState extractOrCreate(@Nullable BulkOperationState state, + BulkOperationState.OperationType operation) { + if (state != null) { + return (AncestorState) state; + } else { + return new AncestorState(operation, null); + } + } + + /** + * This tracks all the ancestors created, + * across multiple move/write operations. + * This is to avoid duplicate creation of ancestors during bulk commits + * and rename operations managed by a rename tracker. + */ + @VisibleForTesting + static final class AncestorState extends BulkOperationState { + + private final Map ancestry = new HashMap<>(); + + private final Path dest; + + /** + * Create the state. + * @param operation the type of the operation. + * @param dest destination path. + */ + AncestorState(final OperationType operation, @Nullable final Path dest) { + super(operation); + this.dest = dest; + } + + int size() { + return ancestry.size(); + } + + public Path getDest() { + return dest; + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "AncestorState{"); + sb.append("operation=").append(getOperation()); + sb.append("; dest=").append(dest); + sb.append("; size=").append(size()); + sb.append("; paths={") + .append(StringUtils.join(ancestry.keySet(), " ")) + .append('}'); + sb.append('}'); + return sb.toString(); + } + + /** + * Does the ancestor state contain a path? + * @param p path to check + * @return true if the state has an entry + */ + boolean contains(Path p) { + return ancestry.containsKey(p); + } + + DDBPathMetadata put(Path p, DDBPathMetadata md) { + return ancestry.put(p, md); + } + + DDBPathMetadata put(DDBPathMetadata md) { + return ancestry.put(md.getFileStatus().getPath(), md); + } + + DDBPathMetadata get(Path p) { + return ancestry.get(p); + } + + /** + * Find an entry in the ancestor state, warning and optionally + * raising an exception if there is a file at the path. + * @param path path to look up + * @param failOnFile fail if a file was found. + * @return true iff a directory was found in the ancestor state. + * @throws PathIOException if there was a file at the path. + */ + boolean findEntry( + final Path path, + final boolean failOnFile) throws PathIOException { + final DDBPathMetadata ancestor = get(path); + if (ancestor != null) { + // there's an entry in the ancestor state + if (!ancestor.getFileStatus().isDirectory()) { + // but: its a file, which means this update is now inconsistent. + final String message = E_INCONSISTENT_UPDATE + " entry is " + ancestor + .getFileStatus(); + LOG.error(message); + if (failOnFile) { + // errors trigger failure + throw new PathIOException(path.toString(), message); + } + } + return true; + } else { + return false; + } + } + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java index 6c13cd151d5da..4327002ddbd03 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java @@ -18,6 +18,8 @@ package org.apache.hadoop.fs.s3a.s3guard; +import javax.annotation.Nullable; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -30,12 +32,17 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.S3AFileStatus; import org.apache.hadoop.fs.s3a.Tristate; +import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.security.UserGroupInformation; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.net.URI; +import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.Map; @@ -70,6 +77,8 @@ public class LocalMetadataStore implements MetadataStore { /* Null iff this FS does not have an associated URI host. */ private String uriHost; + private String username; + @Override public void initialize(FileSystem fileSystem) throws IOException { Preconditions.checkNotNull(fileSystem); @@ -100,6 +109,7 @@ public void initialize(Configuration conf) throws IOException { } localCache = builder.build(); + username = UserGroupInformation.getCurrentUser().getShortUserName(); } @Override @@ -192,15 +202,19 @@ public synchronized DirListingMetadata listChildren(Path p) throws } @Override - public void move(Collection pathsToDelete, - Collection pathsToCreate, - ITtlTimeProvider ttlTimeProvider) throws IOException { + public void move( + @Nullable Collection pathsToDelete, + @Nullable Collection pathsToCreate, + ITtlTimeProvider ttlTimeProvider, + @Nullable final BulkOperationState operationState) throws IOException { LOG.info("Move {} to {}", pathsToDelete, pathsToCreate); - Preconditions.checkNotNull(pathsToDelete, "pathsToDelete is null"); - Preconditions.checkNotNull(pathsToCreate, "pathsToCreate is null"); - Preconditions.checkArgument(pathsToDelete.size() == pathsToCreate.size(), - "Must supply same number of paths to delete/create."); + if (pathsToCreate == null) { + pathsToCreate = Collections.emptyList(); + } + if (pathsToDelete == null) { + pathsToDelete = Collections.emptyList(); + } // I feel dirty for using reentrant lock. :-| synchronized (this) { @@ -214,7 +228,7 @@ public void move(Collection pathsToDelete, // 2. Create new destination path metadata for (PathMetadata meta : pathsToCreate) { LOG.debug("move: adding metadata {}", meta); - put(meta); + put(meta, null); } // 3. We now know full contents of all dirs in destination subtree @@ -232,7 +246,13 @@ public void move(Collection pathsToDelete, } @Override - public void put(PathMetadata meta) throws IOException { + public void put(final PathMetadata meta) throws IOException { + put(meta, null); + } + + @Override + public void put(PathMetadata meta, + final BulkOperationState operationState) throws IOException { Preconditions.checkNotNull(meta); S3AFileStatus status = meta.getFileStatus(); @@ -301,7 +321,8 @@ public void put(PathMetadata meta) throws IOException { } @Override - public synchronized void put(DirListingMetadata meta) throws IOException { + public synchronized void put(DirListingMetadata meta, + final BulkOperationState operationState) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("put dirMeta {}", meta.prettyPrint()); } @@ -312,13 +333,14 @@ public synchronized void put(DirListingMetadata meta) throws IOException { } else { entry.setDirListingMetadata(meta); } - put(meta.getListing()); + put(meta.getListing(), null); } - public synchronized void put(Collection metas) throws + public synchronized void put(Collection metas, + final BulkOperationState operationState) throws IOException { for (PathMetadata meta : metas) { - put(meta); + put(meta, operationState); } } @@ -564,4 +586,35 @@ DirListingMetadata getDirListingMeta(Path p){ } } + @Override + public RenameTracker initiateRenameOperation(final StoreContext storeContext, + final Path source, + final S3AFileStatus sourceStatus, final Path dest) throws IOException { + return new ProgressiveRenameTracker(storeContext, this, source, dest, + null); + } + + @Override + public void addAncestors(final Path qualifiedPath, + ITtlTimeProvider ttlTimeProvider, + @Nullable final BulkOperationState operationState) throws IOException { + + Collection newDirs = new ArrayList<>(); + Path parent = qualifiedPath.getParent(); + while (!parent.isRoot()) { + PathMetadata directory = get(parent); + if (directory == null || directory.isDeleted()) { + S3AFileStatus status = new S3AFileStatus(Tristate.FALSE, parent, + username); + PathMetadata meta = new PathMetadata(status, Tristate.FALSE, false); + newDirs.add(meta); + } else { + break; + } + parent = parent.getParent(); + } + if (!newDirs.isEmpty()) { + put(newDirs, operationState); + } + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java index 7875d43d1e6bb..397d23aa34323 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.s3a.s3guard; +import javax.annotation.Nullable; import java.io.Closeable; import java.io.IOException; import java.util.Collection; @@ -30,6 +31,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.Retries.RetryTranslated; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.impl.StoreContext; /** * {@code MetadataStore} defines the set of operations that any metadata store @@ -142,6 +145,23 @@ PathMetadata get(Path path, boolean wantEmptyDirectoryFlag) */ DirListingMetadata listChildren(Path path) throws IOException; + /** + * This adds all new ancestors of a path as directories. + *

    + * Important: to propagate TTL information, any new ancestors added + * must have their last updated timestamps set through + * {@link S3Guard#patchLastUpdated(Collection, ITtlTimeProvider)}. + * @param qualifiedPath path to update + * @param timeProvider time provider for timestamps + * @param operationState (nullable) operational state for a bulk update + * @throws IOException failure + */ + @RetryTranslated + void addAncestors( + Path qualifiedPath, + @Nullable ITtlTimeProvider timeProvider, + @Nullable BulkOperationState operationState) throws IOException; + /** * Record the effects of a {@link FileSystem#rename(Path, Path)} in the * MetadataStore. Clients provide explicit enumeration of the affected @@ -163,15 +183,18 @@ PathMetadata get(Path path, boolean wantEmptyDirectoryFlag) * @param pathsToDelete Collection of all paths that were removed from the * source directory tree of the move. * @param pathsToCreate Collection of all PathMetadata for the new paths - * that were created at the destination of the rename - * (). + * that were created at the destination of the rename(). * @param ttlTimeProvider the time provider to set last_updated. Must not * be null. + * @param operationState Any ongoing state supplied to the rename tracker + * which is to be passed in with each move operation. * @throws IOException if there is an error */ - void move(Collection pathsToDelete, - Collection pathsToCreate, - ITtlTimeProvider ttlTimeProvider) throws IOException; + void move( + @Nullable Collection pathsToDelete, + @Nullable Collection pathsToCreate, + ITtlTimeProvider ttlTimeProvider, + @Nullable BulkOperationState operationState) throws IOException; /** * Saves metadata for exactly one path. @@ -186,15 +209,33 @@ void move(Collection pathsToDelete, @RetryTranslated void put(PathMetadata meta) throws IOException; + /** + * Saves metadata for exactly one path, potentially + * using any bulk operation state to eliminate duplicate work. + * + * Implementations may pre-create all the path's ancestors automatically. + * Implementations must update any {@code DirListingMetadata} objects which + * track the immediate parent of this file. + * + * @param meta the metadata to save + * @param operationState operational state for a bulk update + * @throws IOException if there is an error + */ + @RetryTranslated + void put(PathMetadata meta, + @Nullable BulkOperationState operationState) throws IOException; + /** * Saves metadata for any number of paths. * * Semantics are otherwise the same as single-path puts. * * @param metas the metadata to save + * @param operationState (nullable) operational state for a bulk update * @throws IOException if there is an error */ - void put(Collection metas) throws IOException; + void put(Collection metas, + @Nullable BulkOperationState operationState) throws IOException; /** * Save directory listing metadata. Callers may save a partial directory @@ -211,9 +252,11 @@ void move(Collection pathsToDelete, * another process. * * @param meta Directory listing metadata. + * @param operationState operational state for a bulk update * @throws IOException if there is an error */ - void put(DirListingMetadata meta) throws IOException; + void put(DirListingMetadata meta, + @Nullable BulkOperationState operationState) throws IOException; /** * Destroy all resources associated with the metadata store. @@ -303,4 +346,36 @@ enum PruneMode { ALL_BY_MODTIME, TOMBSTONES_BY_LASTUPDATED } + + /** + * Start a rename operation. + * + * @param storeContext store context. + * @param source source path + * @param sourceStatus status of the source file/dir + * @param dest destination path. + * @return the rename tracker + * @throws IOException Failure. + */ + RenameTracker initiateRenameOperation( + StoreContext storeContext, + Path source, + S3AFileStatus sourceStatus, + Path dest) + throws IOException; + + /** + * Initiate a bulk update and create an operation state for it. + * This may then be passed into put operations. + * @param operation the type of the operation. + * @param dest path under which updates will be explicitly put. + * @return null or a store-specific state to pass into the put operations. + * @throws IOException failure + */ + default BulkOperationState initiateBulkWrite( + BulkOperationState.OperationType operation, + Path dest) throws IOException { + return null; + } + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/NullMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/NullMetadataStore.java index 1472ef1a2219f..f0792ab3e1949 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/NullMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/NullMetadataStore.java @@ -18,9 +18,14 @@ package org.apache.hadoop.fs.s3a.s3guard; +import javax.annotation.Nullable; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.S3ObjectAttributes; +import org.apache.hadoop.fs.s3a.impl.StoreContext; import java.io.IOException; import java.util.Collection; @@ -79,19 +84,27 @@ public DirListingMetadata listChildren(Path path) throws IOException { @Override public void move(Collection pathsToDelete, Collection pathsToCreate, - ITtlTimeProvider ttlTimeProvider) throws IOException { + ITtlTimeProvider ttlTimeProvider, + final BulkOperationState operationState) throws IOException { } @Override - public void put(PathMetadata meta) throws IOException { + public void put(final PathMetadata meta) throws IOException { } @Override - public void put(Collection meta) throws IOException { + public void put(PathMetadata meta, + final BulkOperationState operationState) throws IOException { } @Override - public void put(DirListingMetadata meta) throws IOException { + public void put(Collection meta, + final BulkOperationState operationState) throws IOException { + } + + @Override + public void put(DirListingMetadata meta, + final BulkOperationState operationState) throws IOException { } @Override @@ -123,4 +136,40 @@ public Map getDiagnostics() throws IOException { public void updateParameters(Map parameters) throws IOException { } + + @Override + public RenameTracker initiateRenameOperation(final StoreContext storeContext, + final Path source, + final S3AFileStatus sourceStatus, + final Path dest) + throws IOException { + return new NullRenameTracker(storeContext, source, dest, this); + } + + @Override + public void addAncestors(final Path qualifiedPath, + final ITtlTimeProvider timeProvider, + @Nullable final BulkOperationState operationState) throws IOException { + } + + private static final class NullRenameTracker extends RenameTracker { + + private NullRenameTracker( + final StoreContext storeContext, + final Path source, + final Path dest, MetadataStore metadataStore) { + super("null tracker", storeContext, metadataStore, source, dest, null); + } + + @Override + public void fileCopied(final Path childSource, + final S3ObjectAttributes sourceAttributes, + final S3ObjectAttributes destAttributes, + final Path destPath, + final long blockSize, + final boolean addAncestors) throws IOException { + + } + + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathMetadataDynamoDBTranslation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathMetadataDynamoDBTranslation.java index c9559ec1517f7..7d4980a06fc29 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathMetadataDynamoDBTranslation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathMetadataDynamoDBTranslation.java @@ -51,7 +51,8 @@ */ @InterfaceAudience.Private @InterfaceStability.Evolving -final class PathMetadataDynamoDBTranslation { +@VisibleForTesting +public final class PathMetadataDynamoDBTranslation { /** The HASH key name of each item. */ @VisibleForTesting @@ -289,7 +290,8 @@ static KeyAttribute pathToParentKeyAttribute(Path path) { * @param path path to convert * @return string for parent key */ - static String pathToParentKey(Path path) { + @VisibleForTesting + public static String pathToParentKey(Path path) { Preconditions.checkNotNull(path); Preconditions.checkArgument(path.isUriPathAbsolute(), "Path not absolute"); URI uri = path.toUri(); @@ -343,9 +345,21 @@ static PrimaryKey[] pathToKey(Collection paths) { private PathMetadataDynamoDBTranslation() { } + /** + * Convert a collection of metadata entries to a list + * of DDBPathMetadata entries. + * If the sources are already DDBPathMetadata instances, they + * are copied directly into the new list, otherwise new + * instances are created. + * @param pathMetadatas source data + * @return the converted list. + */ static List pathMetaToDDBPathMeta( - Collection pathMetadatas) { - return pathMetadatas.stream().map(p -> new DDBPathMetadata(p)) + Collection pathMetadatas) { + return pathMetadatas.stream().map(p -> + (p instanceof DDBPathMetadata) + ? (DDBPathMetadata) p + : new DDBPathMetadata(p)) .collect(Collectors.toList()); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathOrderComparators.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathOrderComparators.java new file mode 100644 index 0000000000000..a3a7967caf79b --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathOrderComparators.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.s3guard; + +import java.io.Serializable; +import java.util.Comparator; + +import org.apache.hadoop.fs.Path; + +/** + * Comparator of path ordering for sorting collections. + * + * The definition of "topmost" is: + *

      + *
    1. The depth of a path is the primary comparator.
    2. + *
    3. Root is topmost, "0"
    4. + *
    5. If two paths are of equal depth, {@link Path#compareTo(Path)}
    6. + * is used. This delegates to URI compareTo. + *
    7. repeated sorts do not change the order
    8. + *
    + */ +final class PathOrderComparators { + + private PathOrderComparators() { + } + + /** + * The shallowest paths come first. + * This is to be used when adding entries. + */ + static final Comparator TOPMOST_PATH_FIRST + = new TopmostFirst(); + + /** + * The leaves come first. + * This is to be used when deleting entries. + */ + static final Comparator TOPMOST_PATH_LAST + = new TopmostLast(); + + /** + * The shallowest paths come first. + * This is to be used when adding entries. + */ + static final Comparator TOPMOST_PM_FIRST + = new PathMetadataComparator(TOPMOST_PATH_FIRST); + + /** + * The leaves come first. + * This is to be used when deleting entries. + */ + static final Comparator TOPMOST_PM_LAST + = new PathMetadataComparator(TOPMOST_PATH_LAST); + + private static class TopmostFirst implements Comparator, Serializable { + + @Override + public int compare(Path pathL, Path pathR) { + // exit fast on equal values. + if (pathL.equals(pathR)) { + return 0; + } + int depthL = pathL.depth(); + int depthR = pathR.depth(); + if (depthL < depthR) { + // left is higher up than the right. + return -1; + } + if (depthR < depthL) { + // right is higher up than the left + return 1; + } + // and if they are of equal depth, use the "classic" comparator + // of paths. + return pathL.compareTo(pathR); + } + } + + /** + * Compare the topmost last. + * For some reason the .reverse() option wasn't giving the + * correct outcome. + */ + private static final class TopmostLast extends TopmostFirst { + + @Override + public int compare(final Path pathL, final Path pathR) { + int compare = super.compare(pathL, pathR); + if (compare < 0) { + return 1; + } + if (compare > 0) { + return -1; + } + return 0; + } + } + + /** + * Compare on path status. + */ + private static final class PathMetadataComparator implements + Comparator, Serializable { + + private final Comparator inner; + + private PathMetadataComparator(final Comparator inner) { + this.inner = inner; + } + + @Override + public int compare(final PathMetadata o1, final PathMetadata o2) { + return inner.compare(o1.getFileStatus().getPath(), + o2.getFileStatus().getPath()); + } + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ProgressiveRenameTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ProgressiveRenameTracker.java new file mode 100644 index 0000000000000..87cba20dd48ae --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ProgressiveRenameTracker.java @@ -0,0 +1,252 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.s3guard; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3ObjectAttributes; +import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.util.DurationInfo; + +import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.addMoveAncestors; +import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.addMoveDir; + +/** + * This rename tracker progressively updates the metadata store + * as it proceeds, during the parallelized copy operation. + *

    + * Algorithm + *

      + *
    1. + * As {@code RenameTracker.fileCopied()} callbacks + * are raised, the metastore is updated with the new file entry. + *
    2. + *
    3. + * Including parent entries, as appropriate. + *
    4. + *
    5. + * All directories which have been created are tracked locally, + * to avoid needing to read the store; this is a thread-safe structure. + *
    6. + *
    7. + * The actual update is performed out of any synchronized block. + *
    8. + *
    9. + * When deletes are executed, the store is also updated. + *
    10. + *
    11. + * And at the completion of a successful rename, the source directory + * is also removed. + *
    12. + *
    + *
    + *
    + * 
    + */ +public class ProgressiveRenameTracker extends RenameTracker { + + /** + * The collection of paths to delete; this is added as individual files + * are renamed. + *

    + * The metastore is only updated with these entries after the DELETE + * call containing these paths succeeds. + *

    + * If the DELETE fails; the filesystem will use + * {@code MultiObjectDeleteSupport} to remove all successfully deleted + * entries from the metastore. + */ + private final Collection pathsToDelete = new HashSet<>(); + + public ProgressiveRenameTracker( + final StoreContext storeContext, + final MetadataStore metadataStore, + final Path sourceRoot, + final Path dest, + final BulkOperationState operationState) { + super("ProgressiveRenameTracker", + storeContext, metadataStore, sourceRoot, dest, operationState); + } + + /** + * When a file is copied, any ancestors + * are calculated and then the store is updated with + * the destination entries. + *

    + * The source entries are added to the {@link #pathsToDelete} list. + * @param sourcePath path of source + * @param sourceAttributes status of source. + * @param destAttributes destination attributes + * @param destPath destination path. + * @param blockSize block size. + * @param addAncestors should ancestors be added? + * @throws IOException failure + */ + @Override + public void fileCopied( + final Path sourcePath, + final S3ObjectAttributes sourceAttributes, + final S3ObjectAttributes destAttributes, + final Path destPath, + final long blockSize, + final boolean addAncestors) throws IOException { + + // build the list of entries to add in a synchronized block. + final List entriesToAdd = new ArrayList<>(1); + LOG.debug("Updating store with copied file {}", sourcePath); + MetadataStore store = getMetadataStore(); + synchronized (this) { + checkArgument(!pathsToDelete.contains(sourcePath), + "File being renamed is already processed %s", destPath); + // create the file metadata and update the lists + // the pathsToDelete field is incremented with the new source path, + // for deletion after the DELETE operation succeeds; + // the entriesToAdd variable is filled in with all entries + // to add within this method + S3Guard.addMoveFile( + store, + pathsToDelete, + entriesToAdd, + sourcePath, + destPath, + sourceAttributes.getLen(), + blockSize, + getOwner(), + destAttributes.getETag(), + destAttributes.getVersionId()); + LOG.debug("New metastore entry : {}", entriesToAdd.get(0)); + if (addAncestors) { + // add all new ancestors to the lists + addMoveAncestors( + store, + pathsToDelete, + entriesToAdd, + getSourceRoot(), + sourcePath, + destPath, + getOwner()); + } + } + + // outside the lock, the entriesToAdd variable has all the new entries to + // create. ...so update the store. + // no entries are deleted at this point. + try (DurationInfo ignored = new DurationInfo(LOG, false, + "Adding new metastore entries")) { + store.move(null, entriesToAdd, + getStoreContext().getTimeProvider(), + getOperationState()); + } + } + + /** + * A directory marker has been added. + * Add the new entry and record the source path as another entry to delete. + * @param sourcePath status of source. + * @param destPath destination path. + * @param addAncestors should ancestors be added? + * @throws IOException failure. + */ + @Override + public void directoryMarkerCopied( + final Path sourcePath, + final Path destPath, + final boolean addAncestors) throws IOException { + // this list is created on demand. + final List entriesToAdd = new ArrayList<>(1); + MetadataStore store = getMetadataStore(); + synchronized (this) { + addMoveDir(store, + pathsToDelete, + entriesToAdd, + sourcePath, + destPath, + getOwner()); + // Ancestor directories may not be listed, so we explicitly add them + if (addAncestors) { + addMoveAncestors(store, + pathsToDelete, + entriesToAdd, + getSourceRoot(), + sourcePath, + destPath, + getOwner()); + } + } + // outside the lock, the entriesToAdd list has all new files to create. + // ...so update the store. + try (DurationInfo ignored = new DurationInfo(LOG, false, + "adding %s metastore entries", entriesToAdd.size())) { + store.move(null, entriesToAdd, + getStoreContext().getTimeProvider(), + getOperationState()); + } + } + + @Override + public synchronized void moveSourceDirectory() throws IOException { + // this moves the source directory in the metastore if it has not + // already been processed. + // TODO S3Guard: performance: mark destination dirs as authoritative + if (!pathsToDelete.contains(getSourceRoot())) { + final List toDelete = new ArrayList<>(1); + final List toAdd = new ArrayList<>(1); + + addMoveDir(getMetadataStore(), pathsToDelete, toAdd, + getSourceRoot(), + getDest(), + getOwner()); + getMetadataStore().move(toDelete, toAdd, + getStoreContext().getTimeProvider(), + getOperationState()); + } + } + + /** + * As source objects are deleted, so is the list of entries. + * @param paths path of objects deleted. + * @throws IOException failure. + */ + @Override + public void sourceObjectsDeleted( + final Collection paths) throws IOException { + + // delete the paths from the metastore + try (DurationInfo ignored = new DurationInfo(LOG, false, + "delete %s metastore entries", paths.size())) { + getMetadataStore().move(paths, null, + getStoreContext().getTimeProvider(), + getOperationState()); + } + } + + @Override + public synchronized void completeRename() throws IOException { + // and finish off; by deleting source directories. + sourceObjectsDeleted(pathsToDelete); + super.completeRename(); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/RenameTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/RenameTracker.java new file mode 100644 index 0000000000000..76e269e1e3f6e --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/RenameTracker.java @@ -0,0 +1,275 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.s3guard; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; + +import com.amazonaws.SdkBaseException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3ObjectAttributes; +import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.fs.s3a.impl.AbstractStoreOperation; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.DurationInfo; + +import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.fs.s3a.S3AUtils.translateException; + +/** + * A class which manages updating the metastore with the rename process + * as initiated in the S3AFilesystem rename. + *

    + * Subclasses must provide an implementation and return it in + * {@code MetadataStore.initiateRenameOperation()}. + *

    + * The {@link #operationState} field/constructor argument is an opaque state to + * be passed down to the metastore in its move operations; this allows the + * stores to manage ongoing state -while still being able to share + * rename tracker implementations. + *

    + * This is to avoid performance problems wherein the progressive rename + * tracker causes the store to repeatedly create and write duplicate + * ancestor entries for every file added. + */ +public abstract class RenameTracker extends AbstractStoreOperation { + + public static final Logger LOG = LoggerFactory.getLogger( + RenameTracker.class); + + /** source path. */ + private final Path sourceRoot; + + /** destination path. */ + private final Path dest; + + /** + * Track the duration of this operation. + */ + private final DurationInfo durationInfo; + + /** + * Generated name for strings. + */ + private final String name; + + /** + * Any ongoing state supplied to the rename tracker + * which is to be passed in with each move operation. + * This must be closed at the end of the tracker's life. + */ + private final BulkOperationState operationState; + + /** + * The metadata store for this tracker. + * Always non-null. + *

    + * This is passed in separate from the store context to guarantee + * that whichever store creates a tracker is explicitly bound to that + * instance. + */ + private final MetadataStore metadataStore; + + /** + * Constructor. + * @param name tracker name for logs. + * @param storeContext store context. + * @param metadataStore the stopre + * @param sourceRoot source path. + * @param dest destination path. + * @param operationState ongoing move state. + */ + protected RenameTracker( + final String name, + final StoreContext storeContext, + final MetadataStore metadataStore, + final Path sourceRoot, + final Path dest, + final BulkOperationState operationState) { + super(checkNotNull(storeContext)); + checkNotNull(storeContext.getUsername(), "No username"); + this.metadataStore = checkNotNull(metadataStore); + this.sourceRoot = checkNotNull(sourceRoot); + this.dest = checkNotNull(dest); + this.operationState = operationState; + this.name = String.format("%s (%s, %s)", name, sourceRoot, dest); + durationInfo = new DurationInfo(LOG, false, + name +" (%s, %s)", sourceRoot, dest); + } + + @Override + public String toString() { + return name; + } + + public Path getSourceRoot() { + return sourceRoot; + } + + public Path getDest() { + return dest; + } + + public String getOwner() { + return getStoreContext().getUsername(); + } + + public BulkOperationState getOperationState() { + return operationState; + } + + /** + * Get the metadata store. + * @return a non-null store. + */ + protected MetadataStore getMetadataStore() { + return metadataStore; + } + + /** + * A file has been copied. + * + * @param childSource source of the file. This may actually be different + * from the path of the sourceAttributes. (HOW?) + * @param sourceAttributes status of source. + * @param destAttributes destination attributes + * @param destPath destination path. + * @param blockSize block size. + * @param addAncestors should ancestors be added? + * @throws IOException failure. + */ + public abstract void fileCopied( + Path childSource, + S3ObjectAttributes sourceAttributes, + S3ObjectAttributes destAttributes, + Path destPath, + long blockSize, + boolean addAncestors) throws IOException; + + /** + * A directory marker has been copied. + * @param sourcePath source path. + * @param destPath destination path. + * @param addAncestors should ancestors be added? + * @throws IOException failure. + */ + public void directoryMarkerCopied( + Path sourcePath, + Path destPath, + boolean addAncestors) throws IOException { + } + + /** + * The delete failed. + *

    + * By the time this is called, the metastore will already have + * been updated with the results of any partial delete failure, + * such that all files known to have been deleted will have been + * removed. + * @param e exception + * @param pathsToDelete paths which were to be deleted. + * @param undeletedObjects list of objects which were not deleted. + */ + public IOException deleteFailed( + final Exception e, + final List pathsToDelete, + final List undeletedObjects) { + + return convertToIOException(e); + } + + /** + * Top level directory move. + * This is invoked after all child entries have been copied + * @throws IOException on failure + */ + public void moveSourceDirectory() throws IOException { + } + + /** + * Note that source objects have been deleted. + * The metastore will already have been updated. + * @param paths path of objects deleted. + */ + public void sourceObjectsDeleted( + final Collection paths) throws IOException { + } + + /** + * Complete the operation. + * @throws IOException failure. + */ + public void completeRename() throws IOException { + IOUtils.cleanupWithLogger(LOG, operationState); + noteRenameFinished(); + } + + /** + * Note that the rename has finished by closing the duration info; + * this will log the duration of the operation at debug. + */ + protected void noteRenameFinished() { + durationInfo.close(); + } + + /** + * Rename has failed. + *

    + * The metastore now needs to be updated with its current state + * even though the operation is incomplete. + * Implementations MUST NOT throw exceptions here, as this is going to + * be invoked in an exception handler. + * catch and log or catch and return/wrap. + *

    + * The base implementation returns the IOE passed in and translates + * any AWS exception into an IOE. + * @param ex the exception which caused the failure. + * This is either an IOException or and AWS exception + * @return an IOException to throw in an exception. + */ + public IOException renameFailed(Exception ex) { + LOG.debug("Rename has failed", ex); + IOUtils.cleanupWithLogger(LOG, operationState); + noteRenameFinished(); + return convertToIOException(ex); + } + + /** + * Convert a passed in exception (expected to be an IOE or AWS exception) + * into an IOException. + * @param ex exception caught + * @return the exception to throw in the failure handler. + */ + protected IOException convertToIOException(final Exception ex) { + if (ex instanceof IOException) { + return (IOException) ex; + } else if (ex instanceof SdkBaseException) { + return translateException("rename " + sourceRoot + " to " + dest, + sourceRoot.toString(), + (SdkBaseException) ex); + } else { + // should never happen, but for completeness + return new IOException(ex); + } + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java index 933a01ced5f4c..d4626364ff483 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java @@ -47,7 +47,6 @@ import org.apache.hadoop.fs.s3a.Retries.RetryTranslated; import org.apache.hadoop.fs.s3a.S3AFileStatus; import org.apache.hadoop.fs.s3a.S3AInstrumentation; -import org.apache.hadoop.fs.s3a.Tristate; import org.apache.hadoop.util.ReflectionUtils; import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_METADATASTORE_METADATA_TTL; @@ -132,7 +131,7 @@ static Class getMetadataStoreClass( } if (conf.get(S3_METADATA_STORE_IMPL) != null && LOG.isDebugEnabled()) { LOG.debug("Metastore option source {}", - conf.getPropertySources(S3_METADATA_STORE_IMPL)); + (Object)conf.getPropertySources(S3_METADATA_STORE_IMPL)); } Class aClass = conf.getClass( @@ -157,14 +156,64 @@ public static S3AFileStatus putAndReturn(MetadataStore ms, S3AFileStatus status, S3AInstrumentation instrumentation, ITtlTimeProvider timeProvider) throws IOException { + return putAndReturn(ms, status, instrumentation, timeProvider, null); + } + + /** + * Helper function which puts a given S3AFileStatus into the MetadataStore and + * returns the same S3AFileStatus. Instrumentation monitors the put operation. + * @param ms MetadataStore to {@code put()} into. + * @param status status to store + * @param instrumentation instrumentation of the s3a file system + * @param timeProvider Time provider to use when writing entries + * @param operationState possibly-null metastore state tracker. + * @return The same status as passed in + * @throws IOException if metadata store update failed + */ + @RetryTranslated + public static S3AFileStatus putAndReturn( + final MetadataStore ms, + final S3AFileStatus status, + final S3AInstrumentation instrumentation, + final ITtlTimeProvider timeProvider, + @Nullable final BulkOperationState operationState) throws IOException { long startTimeNano = System.nanoTime(); - S3Guard.putWithTtl(ms, new PathMetadata(status), timeProvider); - instrumentation.addValueToQuantiles(S3GUARD_METADATASTORE_PUT_PATH_LATENCY, - (System.nanoTime() - startTimeNano)); - instrumentation.incrementCounter(S3GUARD_METADATASTORE_PUT_PATH_REQUEST, 1); + try { + putWithTtl(ms, new PathMetadata(status), timeProvider, operationState); + } finally { + instrumentation.addValueToQuantiles( + S3GUARD_METADATASTORE_PUT_PATH_LATENCY, + (System.nanoTime() - startTimeNano)); + instrumentation.incrementCounter( + S3GUARD_METADATASTORE_PUT_PATH_REQUEST, + 1); + } return status; } + /** + * Initiate a bulk write and create an operation state for it. + * This may then be passed into put operations. + * @param metastore store + * @param operation the type of the operation. + * @param path path under which updates will be explicitly put. + * @return a store-specific state to pass into the put operations, or null + * @throws IOException failure + */ + public static BulkOperationState initiateBulkWrite( + @Nullable final MetadataStore metastore, + final BulkOperationState.OperationType operation, + final Path path) throws IOException { + Preconditions.checkArgument( + operation != BulkOperationState.OperationType.Rename, + "Rename operations cannot be started through initiateBulkWrite"); + if (metastore == null || isNullMetadataStore(metastore)) { + return null; + } else { + return metastore.initiateBulkWrite(operation, path); + } + } + /** * Convert the data of a directory listing to an array of {@link FileStatus} * entries. Tombstones are filtered out at this point. If the listing is null @@ -250,7 +299,7 @@ public static FileStatus[] dirListingUnion(MetadataStore ms, Path path, if (status != null && s.getModificationTime() > status.getModificationTime()) { LOG.debug("Update ms with newer metadata of: {}", status); - S3Guard.putWithTtl(ms, new PathMetadata(s), timeProvider); + S3Guard.putWithTtl(ms, new PathMetadata(s), timeProvider, null); } } @@ -271,7 +320,7 @@ public static FileStatus[] dirListingUnion(MetadataStore ms, Path path, if (changed && isAuthoritative) { dirMeta.setAuthoritative(true); // This is the full directory contents - S3Guard.putWithTtl(ms, dirMeta, timeProvider); + S3Guard.putWithTtl(ms, dirMeta, timeProvider, null); } return dirMetaToStatuses(dirMeta); @@ -308,7 +357,7 @@ public static boolean isNullMetadataStore(MetadataStore ms) { * dir. * @param owner Hadoop user name. * @param authoritative Whether to mark new directories as authoritative. - * @param timeProvider Time provider for testing. + * @param timeProvider Time provider. */ @Deprecated @Retries.OnceExceptionsSwallowed @@ -357,7 +406,7 @@ public static void makeDirsOrdered(MetadataStore ms, List dirs, children.add(new PathMetadata(prevStatus)); } dirMeta = new DirListingMetadata(f, children, authoritative); - S3Guard.putWithTtl(ms, dirMeta, timeProvider); + S3Guard.putWithTtl(ms, dirMeta, timeProvider, null); } pathMetas.add(new PathMetadata(status)); @@ -365,7 +414,7 @@ public static void makeDirsOrdered(MetadataStore ms, List dirs, } // Batched put - S3Guard.putWithTtl(ms, pathMetas, timeProvider); + S3Guard.putWithTtl(ms, pathMetas, timeProvider, null); } catch (IOException ioe) { LOG.error("MetadataStore#put() failure:", ioe); } @@ -432,7 +481,7 @@ public static void addMoveFile(MetadataStore ms, Collection srcPaths, * take care of those inferred directories of this path explicitly. * * As {@link #addMoveFile} and {@link #addMoveDir}, this method adds resulting - * metadata to the supplied lists. It does not store in MetadataStore. + * metadata to the supplied lists. It does not update the MetadataStore. * * @param ms MetadataStore, no-op if it is NullMetadataStore * @param srcPaths stores the source path here @@ -469,25 +518,36 @@ public static void addMoveAncestors(MetadataStore ms, } } - public static void addAncestors(MetadataStore metadataStore, - Path qualifiedPath, String username, ITtlTimeProvider timeProvider) - throws IOException { - Collection newDirs = new ArrayList<>(); - Path parent = qualifiedPath.getParent(); - while (!parent.isRoot()) { - PathMetadata directory = metadataStore.get(parent); - if (directory == null || directory.isDeleted()) { - S3AFileStatus s3aStatus = new S3AFileStatus(Tristate.FALSE, parent, username); - PathMetadata meta = new PathMetadata(s3aStatus, Tristate.FALSE, false); - newDirs.add(meta); - } else { - break; - } - parent = parent.getParent(); - } - S3Guard.putWithTtl(metadataStore, newDirs, timeProvider); + /** + * This adds all new ancestors of a path as directories. + * This forwards to + * {@link MetadataStore#addAncestors(Path, ITtlTimeProvider, BulkOperationState)}. + *

    + * Originally it implemented the logic to probe for an add ancestors, + * but with the addition of a store-specific bulk operation state + * it became unworkable. + * + * @param metadataStore store + * @param qualifiedPath path to update + * @param operationState (nullable) operational state for a bulk update + * @throws IOException failure + */ + @Retries.RetryTranslated + public static void addAncestors( + final MetadataStore metadataStore, + final Path qualifiedPath, + final ITtlTimeProvider timeProvider, + @Nullable final BulkOperationState operationState) throws IOException { + metadataStore.addAncestors(qualifiedPath, timeProvider, operationState); } + /** + * Add the fact that a file was moved from a source path to a destination. + * @param srcPaths collection of source paths to update + * @param dstMetas collection of destination meta data entries to update. + * @param srcPath path of the source file. + * @param dstStatus status of the source file after it was copied. + */ private static void addMoveStatus(Collection srcPaths, Collection dstMetas, Path srcPath, @@ -570,30 +630,72 @@ public String toString() { } } + /** + * Put a directory entry, setting the updated timestamp of the + * directory and its children. + * @param ms metastore + * @param dirMeta directory + * @param timeProvider nullable time provider + * @throws IOException failure. + */ public static void putWithTtl(MetadataStore ms, DirListingMetadata dirMeta, - ITtlTimeProvider timeProvider) + final ITtlTimeProvider timeProvider, + @Nullable final BulkOperationState operationState) throws IOException { - dirMeta.setLastUpdated(timeProvider.getNow()); + long now = timeProvider.getNow(); + dirMeta.setLastUpdated(now); dirMeta.getListing() - .forEach(pm -> pm.setLastUpdated(timeProvider.getNow())); - ms.put(dirMeta); + .forEach(pm -> pm.setLastUpdated(now)); + ms.put(dirMeta, operationState); } + /** + * Put an entry, using the time provider to set its timestamp. + * @param ms metastore + * @param fileMeta entry to write + * @param timeProvider nullable time provider + * @param operationState nullable state for a bulk update + * @throws IOException failure. + */ public static void putWithTtl(MetadataStore ms, PathMetadata fileMeta, - @Nullable ITtlTimeProvider timeProvider) throws IOException { + @Nullable ITtlTimeProvider timeProvider, + @Nullable final BulkOperationState operationState) throws IOException { if (timeProvider != null) { fileMeta.setLastUpdated(timeProvider.getNow()); } else { LOG.debug("timeProvider is null, put {} without setting last_updated", fileMeta); } - ms.put(fileMeta); + ms.put(fileMeta, operationState); } + /** + * Put entries, using the time provider to set their timestamp. + * @param ms metastore + * @param fileMetas file metadata entries. + * @param timeProvider nullable time provider + * @param operationState nullable state for a bulk update + * @throws IOException failure. + */ public static void putWithTtl(MetadataStore ms, - Collection fileMetas, - @Nullable ITtlTimeProvider timeProvider) + Collection fileMetas, + @Nullable ITtlTimeProvider timeProvider, + @Nullable final BulkOperationState operationState) throws IOException { + patchLastUpdated(fileMetas, timeProvider); + ms.put(fileMetas, operationState); + } + + /** + * Patch any collection of metadata entries with the timestamp + * of a time provider. + * This MUST be used when creating new entries for directories. + * @param fileMetas file metadata entries. + * @param timeProvider nullable time provider + */ + static void patchLastUpdated( + final Collection fileMetas, + @Nullable final ITtlTimeProvider timeProvider) { if (timeProvider != null) { final long now = timeProvider.getNow(); fileMetas.forEach(fileMeta -> fileMeta.setLastUpdated(now)); @@ -601,9 +703,16 @@ public static void putWithTtl(MetadataStore ms, LOG.debug("timeProvider is null, put {} without setting last_updated", fileMetas); } - ms.put(fileMetas); } + /** + * Get a path entry provided it is not considered expired. + * @param ms metastore + * @param path path to look up. + * @param timeProvider nullable time provider + * @return the metadata or null if there as no entry. + * @throws IOException failure. + */ public static PathMetadata getWithTtl(MetadataStore ms, Path path, @Nullable ITtlTimeProvider timeProvider) throws IOException { final PathMetadata pathMetadata = ms.get(path); @@ -616,11 +725,11 @@ public static PathMetadata getWithTtl(MetadataStore ms, Path path, long ttl = timeProvider.getMetadataTtl(); if (pathMetadata != null) { - // Special case: the pathmetadata's last updated is 0. This can happen + // Special case: the path metadata's last updated is 0. This can happen // eg. with an old db using this implementation if (pathMetadata.getLastUpdated() == 0) { LOG.debug("PathMetadata TTL for {} is 0, so it will be returned as " - + "not expired."); + + "not expired.", path); return pathMetadata; } @@ -636,6 +745,14 @@ public static PathMetadata getWithTtl(MetadataStore ms, Path path, return null; } + /** + * List children; mark the result as non-auth if the TTL has expired. + * @param ms metastore + * @param path path to look up. + * @param timeProvider nullable time provider + * @return the listing of entries under a path, or null if there as no entry. + * @throws IOException failure. + */ public static DirListingMetadata listChildrenWithTtl(MetadataStore ms, Path path, @Nullable ITtlTimeProvider timeProvider) throws IOException { diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java index dedb84931a902..002f793a2e63e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.s3a.s3guard; +import javax.annotation.Nullable; import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintStream; @@ -63,6 +64,7 @@ import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.Invoker.LOG_EVENT; +import static org.apache.hadoop.fs.s3a.S3AUtils.clearBucketOption; import static org.apache.hadoop.service.launcher.LauncherExitCodes.*; /** @@ -650,7 +652,13 @@ public int run(String[] args, PrintStream out) throws Exception { Preconditions.checkState(getStore() != null, "Metadata Store is not initialized"); - getStore().destroy(); + try { + getStore().destroy(); + } catch (TableDeleteTimeoutException e) { + LOG.warn("The table is been deleted but it is still (briefly)" + + " listed as present in AWS"); + LOG.debug("Timeout waiting for table disappearing", e); + } println(out, "Metadata store is deleted."); return SUCCESS; } @@ -696,9 +704,11 @@ public String getUsage() { * Put parents into MS and cache if the parents are not presented. * * @param f the file or an empty directory. + * @param operationState store's bulk update state. * @throws IOException on I/O errors. */ - private void putParentsIfNotPresent(FileStatus f) throws IOException { + private void putParentsIfNotPresent(FileStatus f, + @Nullable BulkOperationState operationState) throws IOException { Preconditions.checkNotNull(f); Path parent = f.getPath().getParent(); while (parent != null) { @@ -708,7 +718,8 @@ private void putParentsIfNotPresent(FileStatus f) throws IOException { S3AFileStatus dir = DynamoDBMetadataStore.makeDirStatus(parent, f.getOwner()); S3Guard.putWithTtl(getStore(), new PathMetadata(dir), - getFilesystem().getTtlTimeProvider()); + getFilesystem().getTtlTimeProvider(), + operationState); dirCache.add(parent); parent = parent.getParent(); } @@ -721,6 +732,9 @@ private void putParentsIfNotPresent(FileStatus f) throws IOException { */ private long importDir(FileStatus status) throws IOException { Preconditions.checkArgument(status.isDirectory()); + BulkOperationState operationState = getStore().initiateBulkWrite( + BulkOperationState.OperationType.Put, + status.getPath()); RemoteIterator it = getFilesystem() .listFilesAndEmptyDirectories(status.getPath(), true); long items = 0; @@ -741,9 +755,11 @@ private long importDir(FileStatus status) throws IOException { located.getETag(), located.getVersionId()); } - putParentsIfNotPresent(child); - S3Guard.putWithTtl(getStore(), new PathMetadata(child), - getFilesystem().getTtlTimeProvider()); + putParentsIfNotPresent(child, operationState); + S3Guard.putWithTtl(getStore(), + new PathMetadata(child), + getFilesystem().getTtlTimeProvider(), + operationState); items++; } return items; @@ -779,7 +795,7 @@ public int run(String[] args, PrintStream out) throws Exception { long items = 1; if (status.isFile()) { PathMetadata meta = new PathMetadata(status); - getStore().put(meta); + getStore().put(meta, null); } else { items = importDir(status); } @@ -1137,16 +1153,19 @@ public int run(String[] args, PrintStream out) } String s3Path = paths.get(0); CommandFormat commands = getCommandFormat(); + URI fsURI = toUri(s3Path); // check if UNGUARDED_FLAG is passed and use NullMetadataStore in // config to avoid side effects like creating the table if not exists + Configuration unguardedConf = getConf(); if (commands.getOpt(UNGUARDED_FLAG)) { LOG.debug("Unguarded flag is passed to command :" + this.getName()); - getConf().set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL); + clearBucketOption(unguardedConf, fsURI.getHost(), S3_METADATA_STORE_IMPL); + unguardedConf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL); } S3AFileSystem fs = (S3AFileSystem) FileSystem.newInstance( - toUri(s3Path), getConf()); + fsURI, unguardedConf); setFilesystem(fs); Configuration conf = fs.getConf(); URI fsUri = fs.getUri(); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/TableDeleteTimeoutException.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/TableDeleteTimeoutException.java new file mode 100644 index 0000000000000..7969332139220 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/TableDeleteTimeoutException.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.s3guard; + +import org.apache.hadoop.fs.PathIOException; + +/** + * An exception raised when a table being deleted is still present after + * the wait time is exceeded. + */ +public class TableDeleteTimeoutException extends PathIOException { + + TableDeleteTimeoutException(final String path, + final String error, + final Throwable cause) { + super(path, error, cause); + } +} diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md index 337fc95b6c703..2729a9ebef091 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md @@ -1474,6 +1474,18 @@ Caused by: java.lang.NullPointerException ... 1 more ``` +### Error `Attempt to change a resource which is still in use: Table is being deleted` + +``` +com.amazonaws.services.dynamodbv2.model.ResourceInUseException: + Attempt to change a resource which is still in use: Table is being deleted: + s3guard.test.testDynamoDBInitDestroy351245027 + (Service: AmazonDynamoDBv2; Status Code: 400; Error Code: ResourceInUseException;) +``` + +You have attempted to call `hadoop s3guard destroy` on a table which is already +being destroyed. + ## Other Topics For details on how to test S3Guard, see [Testing S3Guard](./testing.html#s3guard) diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java index 433964998cd9a..b19b241bb1d9a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java @@ -18,14 +18,23 @@ package org.apache.hadoop.fs.contract.s3a; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRenameTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.fs.s3a.Statistic; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; +import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyFileContents; import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset; +import static org.apache.hadoop.fs.s3a.S3ATestConstants.S3A_TEST_TIMEOUT; import static org.apache.hadoop.fs.s3a.S3ATestUtils.maybeEnableS3Guard; /** @@ -33,6 +42,15 @@ */ public class ITestS3AContractRename extends AbstractContractRenameTest { + public static final Logger LOG = LoggerFactory.getLogger( + ITestS3AContractRename.class); + + + @Override + protected int getTestTimeoutMillis() { + return S3A_TEST_TIMEOUT; + } + /** * Create a configuration, possibly patching in S3Guard options. * @return a configuration @@ -50,6 +68,12 @@ protected AbstractFSContract createContract(Configuration conf) { return new S3AContract(conf); } + @Override + public void teardown() throws Exception { + describe("\nTeardown\n"); + super.teardown(); + } + @Override public void testRenameDirIntoExistingDir() throws Throwable { describe("Verify renaming a dir into an existing dir puts the files" @@ -64,12 +88,55 @@ public void testRenameDirIntoExistingDir() throws Throwable { Path destDir = path("dest"); Path destFilePath = new Path(destDir, "dest-512.txt"); - byte[] destDateset = dataset(512, 'A', 'Z'); - writeDataset(fs, destFilePath, destDateset, destDateset.length, 1024, + byte[] destDataset = dataset(512, 'A', 'Z'); + writeDataset(fs, destFilePath, destDataset, destDataset.length, 1024, false); assertIsFile(destFilePath); boolean rename = fs.rename(srcDir, destDir); assertFalse("s3a doesn't support rename to non-empty directory", rename); } + + /** + * Test that after renaming, the nested file is moved along with all its + * ancestors. It is similar to {@link #testRenamePopulatesDirectoryAncestors}. + * + * This is an extension testRenamePopulatesFileAncestors + * of the superclass version which does better + * logging of the state of the store before the assertions. + */ + @Test + public void testRenamePopulatesFileAncestors2() throws Exception { + final S3AFileSystem fs = (S3AFileSystem) getFileSystem(); + Path base = path("testRenamePopulatesFileAncestors2"); + final Path src = new Path(base, "src"); + Path dest = new Path(base, "dest"); + fs.mkdirs(src); + final String nestedFile = "/dir1/dir2/dir3/fileA"; + // size of file to create + int filesize = 16 * 1024; + byte[] srcDataset = dataset(filesize, 'a', 'z'); + Path srcFile = path(src + nestedFile); + Path destFile = path(dest + nestedFile); + writeDataset(fs, srcFile, srcDataset, srcDataset.length, + 1024, false); + + S3ATestUtils.MetricDiff fileCopyDiff = new S3ATestUtils.MetricDiff(fs, + Statistic.FILES_COPIED); + S3ATestUtils.MetricDiff fileCopyBytes = new S3ATestUtils.MetricDiff(fs, + Statistic.FILES_COPIED_BYTES); + + fs.rename(src, dest); + + describe("Rename has completed, examining data under " + base); + fileCopyDiff.assertDiffEquals("Number of files copied", 1); + fileCopyBytes.assertDiffEquals("Number of bytes copied", filesize); + // log everything in the base directory. + S3ATestUtils.lsR(fs, base, true); + // look at the data. + verifyFileContents(fs, destFile, srcDataset); + describe("validating results"); + validateAncestorsMoved(src, dest, nestedFile); + + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java index 8f8d8605653b1..55d396e60f783 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java @@ -20,9 +20,14 @@ import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.MultiObjectDeleteException; +import com.google.common.collect.Lists; +import org.junit.Assume; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport; import org.junit.Test; import org.slf4j.Logger; @@ -31,13 +36,16 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.nio.file.AccessDeniedException; import static org.apache.hadoop.fs.contract.ContractTestUtils.*; -import static org.apache.hadoop.fs.s3a.S3ATestUtils.getLandsatCSVPath; +import static org.apache.hadoop.fs.s3a.test.ExtraAssertions.failIf; +import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.*; +import static org.apache.hadoop.fs.s3a.impl.TestPartialDeleteFailures.keysToDelete; import static org.apache.hadoop.test.LambdaTestUtils.*; /** - * Test S3A Failure translation. + * ITest for failure handling, primarily multipart deletion. */ public class ITestS3AFailureHandling extends AbstractS3ATestBase { private static final Logger LOG = @@ -69,12 +77,17 @@ public void testMultiObjectDeleteNoFile() throws Throwable { private void removeKeys(S3AFileSystem fileSystem, String... keys) throws IOException { + fileSystem.removeKeys(buildDeleteRequest(keys), false); + } + + private List buildDeleteRequest( + final String[] keys) { List request = new ArrayList<>( keys.length); for (String key : keys) { request.add(new DeleteObjectsRequest.KeyVersion(key)); } - fileSystem.removeKeys(request, false, false); + return request; } @Test @@ -87,12 +100,87 @@ public void testMultiObjectDeleteSomeFiles() throws Throwable { timer.end("removeKeys"); } + + private Path maybeGetCsvPath() { + Configuration conf = getConfiguration(); + String csvFile = conf.getTrimmed(KEY_CSVTEST_FILE, DEFAULT_CSVTEST_FILE); + Assume.assumeTrue("CSV test file is not the default", + DEFAULT_CSVTEST_FILE.equals(csvFile)); + return new Path(csvFile); + } + + /** + * Test low-level failure handling with low level delete request. + */ @Test public void testMultiObjectDeleteNoPermissions() throws Throwable { - Path testFile = getLandsatCSVPath(getConfiguration()); - S3AFileSystem fs = (S3AFileSystem)testFile.getFileSystem( + describe("Delete the landsat CSV file and expect it to fail"); + Path csvPath = maybeGetCsvPath(); + S3AFileSystem fs = (S3AFileSystem) csvPath.getFileSystem( + getConfiguration()); + List keys + = buildDeleteRequest( + new String[]{ + fs.pathToKey(csvPath), + "missing-key.csv" + }); + MultiObjectDeleteException ex = intercept( + MultiObjectDeleteException.class, + () -> fs.removeKeys(keys, false)); + + final List undeleted + = extractUndeletedPaths(ex, fs::keyToQualifiedPath); + String undeletedFiles = join(undeleted); + failIf(undeleted.size() != 2, + "undeleted list size wrong: " + undeletedFiles, + ex); + assertTrue("no CSV in " +undeletedFiles, undeleted.contains(csvPath)); + + // and a full split, after adding a new key + String marker = "/marker"; + Path markerPath = fs.keyToQualifiedPath(marker); + keys.add(new DeleteObjectsRequest.KeyVersion(marker)); + + Pair, List> pair = + new MultiObjectDeleteSupport(fs.createStoreContext()) + .splitUndeletedKeys(ex, keys); + assertEquals(undeleted, pair.getLeft()); + List right = pair.getRight(); + assertEquals("Wrong size for " + join(right), 1, right.size()); + assertEquals(markerPath, right.get(0)); + } + + /** + * See what happens when you delete two entries which do not exist. + * It must not raise an exception. + */ + @Test + public void testMultiObjectDeleteMissingEntriesSucceeds() throws Throwable { + describe("Delete keys which don't exist"); + Path base = path("missing"); + S3AFileSystem fs = getFileSystem(); + List keys = keysToDelete( + Lists.newArrayList(new Path(base, "1"), new Path(base, "2"))); + fs.removeKeys(keys, false); + } + + private String join(final Iterable iterable) { + return "[" + StringUtils.join(iterable, ",") + "]"; + } + + /** + * Test low-level failure handling with a single-entry file. + * This is deleted as a single call, so isn't that useful. + */ + @Test + public void testSingleObjectDeleteNoPermissionsTranslated() throws Throwable { + describe("Delete the landsat CSV file and expect it to fail"); + Path csvPath = maybeGetCsvPath(); + S3AFileSystem fs = (S3AFileSystem) csvPath.getFileSystem( getConfiguration()); - intercept(MultiObjectDeleteException.class, - () -> removeKeys(fs, fs.pathToKey(testFile))); + AccessDeniedException aex = intercept(AccessDeniedException.class, + () -> fs.delete(csvPath, false)); + Throwable cause = aex.getCause(); + failIf(cause == null, "no nested exception", aex); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetadataPersistenceException.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetadataPersistenceException.java index 26661a36090ed..3662194d34b50 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetadataPersistenceException.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetadataPersistenceException.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.S3ATestUtils.MetricDiff; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; import org.apache.hadoop.fs.s3a.s3guard.LocalMetadataStore; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; import org.apache.hadoop.fs.s3a.s3guard.NullMetadataStore; @@ -129,8 +130,15 @@ private IOExceptionMetadataStore(IOException ioException) { } @Override - public void put(PathMetadata meta) throws IOException { + public void put(PathMetadata meta, + final BulkOperationState operationState) throws IOException { throw ioException; } + + @Override + public void put(final PathMetadata meta) throws IOException { + put(meta, null); + } + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java index c90dd7c63383e..10ebacdbed815 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java @@ -47,6 +47,7 @@ import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.FailureInjectionPolicy.*; import static org.apache.hadoop.fs.s3a.InconsistentAmazonS3Client.*; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** * Test S3Guard list consistency feature by injecting delayed listObjects() @@ -253,13 +254,11 @@ public void testConsistentRenameAfterDelete() throws Exception { assertFalse(list.contains(path("a3/b/dir3-" + DEFAULT_DELAY_KEY_SUBSTRING))); - try { - RemoteIterator old = fs.listFilesAndEmptyDirectories( - path("a"), true); - fail("Recently renamed dir should not be visible"); - } catch(FileNotFoundException e) { - // expected - } + intercept(FileNotFoundException.class, "", + "Recently renamed dir should not be visible", + () -> S3AUtils.mapLocatedFiles( + fs.listFilesAndEmptyDirectories(path("a"), true), + FileStatus::getPath)); } @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java index 0e091a9e9cf0a..99f1a07968de1 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.s3a.auth.delegation.EncryptionSecrets; import org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; import org.apache.hadoop.util.Progressable; /** @@ -177,7 +178,8 @@ public boolean exists(Path f) throws IOException { } @Override - void finishedWrite(String key, long length, String eTag, String versionId) { + void finishedWrite(String key, long length, String eTag, String versionId, + BulkOperationState operationState) { } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java index 8ca2eccfe3a91..3d7cdfc08dec4 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java @@ -32,6 +32,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy; import org.apache.hadoop.fs.s3a.impl.ChangeTracker; @@ -58,6 +59,8 @@ public class TestStreamChangeTracker extends HadoopTestBase { public static final String URI = "s3a://" + BUCKET + "/" + OBJECT; + public static final Path PATH = new Path(URI); + @Test public void testVersionCheckingHandlingNoVersions() throws Throwable { LOG.info("If an endpoint doesn't return versions, that's OK"); @@ -434,10 +437,12 @@ private S3Object emptyResponse() { private S3ObjectAttributes objectAttributes( String etag, String versionId) { return new S3ObjectAttributes(BUCKET, + PATH, OBJECT, null, null, etag, - versionId); + versionId, + 0); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java index 1ac52c4e3a239..72613538a42f6 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java @@ -23,9 +23,7 @@ import java.io.IOException; import java.net.URI; import java.nio.file.AccessDeniedException; -import java.util.ArrayList; import java.util.List; -import java.util.stream.Collectors; import java.util.stream.IntStream; import com.amazonaws.auth.AWSCredentials; @@ -53,7 +51,6 @@ import org.apache.hadoop.fs.s3a.commit.files.PendingSet; import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit; -import static org.apache.hadoop.fs.contract.ContractTestUtils.assertRenameOutcome; import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; @@ -524,78 +521,6 @@ public Path methodPath() throws IOException { return path(getMethodName()); } - @Test - public void testRestrictedRename() throws Throwable { - describe("rename with parent paths not writeable"); - executeRestrictedRename(createAssumedRoleConfig()); - } - - @Test - public void testRestrictedSingleDeleteRename() throws Throwable { - describe("rename with parent paths not writeable" - + " and multi-object delete disabled"); - Configuration conf = createAssumedRoleConfig(); - conf.setBoolean(ENABLE_MULTI_DELETE, false); - executeRestrictedRename(conf); - } - - /** - * Execute a sequence of rename operations with access locked down. - * @param conf FS configuration - */ - public void executeRestrictedRename(final Configuration conf) - throws IOException { - Path basePath = methodPath(); - Path restrictedDir = new Path(basePath, "renameSrc"); - Path destPath = new Path(basePath, "renameDest"); - Path child = new Path(restrictedDir, "child"); - // the full FS - S3AFileSystem fs = getFileSystem(); - fs.delete(basePath, true); - - bindRolePolicyStatements(conf, - STATEMENT_S3GUARD_CLIENT, - STATEMENT_ALLOW_SSE_KMS_RW, - STATEMENT_ALL_BUCKET_READ_ACCESS, - new Statement(Effects.Allow) - .addActions(S3_PATH_RW_OPERATIONS) - .addResources(directory(restrictedDir)) - .addResources(directory(destPath)) - ); - roleFS = (S3AFileSystem) restrictedDir.getFileSystem(conf); - - roleFS.getFileStatus(ROOT); - roleFS.mkdirs(restrictedDir); - // you can create an adjacent child - touch(roleFS, child); - - roleFS.delete(destPath, true); - // as dest doesn't exist, this will map child -> dest - assertRenameOutcome(roleFS, child, destPath, true); - - assertIsFile(destPath); - assertIsDirectory(restrictedDir); - Path renamedDestPath = new Path(restrictedDir, destPath.getName()); - assertRenameOutcome(roleFS, destPath, restrictedDir, true); - assertIsFile(renamedDestPath); - roleFS.delete(restrictedDir, true); - roleFS.delete(destPath, true); - } - - @Test - public void testRestrictedRenameReadOnlyData() throws Throwable { - describe("rename with source read only, multidelete"); - executeRenameReadOnlyData(createAssumedRoleConfig()); - } - - @Test - public void testRestrictedRenameReadOnlySingleDelete() throws Throwable { - describe("rename with source read only single delete"); - Configuration conf = createAssumedRoleConfig(); - conf.setBoolean(ENABLE_MULTI_DELETE, false); - executeRenameReadOnlyData(conf); - } - /** * Without simulation of STS failures, and with STS overload likely to * be very rare, there'll be no implicit test coverage of @@ -615,102 +540,6 @@ public void testAssumedRoleRetryHandler() throws Throwable { } } - /** - * Execute a sequence of rename operations where the source - * data is read only to the client calling rename(). - * This will cause the inner delete() operations to fail, whose outcomes - * are explored. - * Multiple files are created (in parallel) for some renames, so exploring - * the outcome on bulk delete calls, including verifying that a - * MultiObjectDeleteException is translated to an AccessDeniedException. - *

      - *
    1. The exception raised is AccessDeniedException, - * from single and multi DELETE calls.
    2. - *
    3. It happens after the COPY. Not ideal, but, well, we can't pretend - * it's a filesystem forever.
    4. - *
    - * @param conf FS configuration - */ - public void executeRenameReadOnlyData(final Configuration conf) - throws Exception { - assume("Does not work with S3Guard", !getFileSystem().hasMetadataStore()); - Path basePath = methodPath(); - Path destDir = new Path(basePath, "renameDest"); - Path readOnlyDir = new Path(basePath, "readonlyDir"); - Path readOnlyFile = new Path(readOnlyDir, "readonlyChild"); - - // the full FS - S3AFileSystem fs = getFileSystem(); - fs.delete(basePath, true); - - // this file is readable by the roleFS, but cannot be deleted - touch(fs, readOnlyFile); - - bindRolePolicyStatements(conf, - STATEMENT_S3GUARD_CLIENT, - STATEMENT_ALL_BUCKET_READ_ACCESS, - new Statement(Effects.Allow) - .addActions(S3_PATH_RW_OPERATIONS) - .addResources(directory(destDir)) - ); - roleFS = (S3AFileSystem) destDir.getFileSystem(conf); - - roleFS.delete(destDir, true); - roleFS.mkdirs(destDir); - // rename will fail in the delete phase - forbidden(readOnlyFile.toString(), - () -> roleFS.rename(readOnlyFile, destDir)); - - // and the source file is still there - assertIsFile(readOnlyFile); - - // but so is the copied version, because there's no attempt - // at rollback, or preflight checking on the delete permissions - Path renamedFile = new Path(destDir, readOnlyFile.getName()); - - assertIsFile(renamedFile); - - ContractTestUtils.assertDeleted(roleFS, renamedFile, true); - assertFileCount("Empty Dest Dir", roleFS, - destDir, 0); - // create a set of files - // this is done in parallel as it is 10x faster on a long-haul test run. - int range = 10; - touchFiles(fs, readOnlyDir, range); - // don't forget about that original file! - final long createdFiles = range + 1; - // are they all there? - assertFileCount("files ready to rename", roleFS, - readOnlyDir, createdFiles); - - // try to rename the directory - LOG.info("Renaming readonly files {} to {}", readOnlyDir, destDir); - AccessDeniedException ex = forbidden("", - () -> roleFS.rename(readOnlyDir, destDir)); - LOG.info("Result of renaming read-only files is AccessDeniedException", ex); - assertFileCount("files copied to the destination", roleFS, - destDir, createdFiles); - assertFileCount("files in the source directory", roleFS, - readOnlyDir, createdFiles); - - // and finally (so as to avoid the delay of POSTing some more objects, - // delete that r/o source - forbidden("", () -> roleFS.delete(readOnlyDir, true)); - } - - /** - * Parallel-touch a set of files in the destination directory. - * @param fs filesystem - * @param destDir destination - * @param range range 1..range inclusive of files to create. - */ - public void touchFiles(final S3AFileSystem fs, - final Path destDir, - final int range) { - IntStream.rangeClosed(1, range).parallel().forEach( - (i) -> eval(() -> touch(fs, new Path(destDir, "file-" + i)))); - } - @Test public void testRestrictedCommitActions() throws Throwable { describe("Attempt commit operations against a path with restricted rights"); @@ -779,12 +608,16 @@ public void testRestrictedCommitActions() throws Throwable { // all those commits must fail List commits = pendingCommits.getLeft().getCommits(); assertEquals(range, commits.size()); - commits.parallelStream().forEach( - (c) -> { - CommitOperations.MaybeIOE maybeIOE = operations.commit(c, "origin"); - Path path = c.destinationPath(); - assertCommitAccessDenied(path, maybeIOE); - }); + try(CommitOperations.CommitContext commitContext + = operations.initiateCommitOperation(uploadDest)) { + commits.parallelStream().forEach( + (c) -> { + CommitOperations.MaybeIOE maybeIOE = + commitContext.commit(c, "origin"); + Path path = c.destinationPath(); + assertCommitAccessDenied(path, maybeIOE); + }); + } // fail of all list and abort of .pending files. LOG.info("abortAllSinglePendingCommits({})", readOnlyDir); @@ -840,24 +673,25 @@ public void writeCSVData(final File localSrc) throws IOException { @Test public void testPartialDelete() throws Throwable { describe("delete with part of the child tree read only; multidelete"); - executePartialDelete(createAssumedRoleConfig()); + executePartialDelete(createAssumedRoleConfig(), false); } @Test public void testPartialDeleteSingleDelete() throws Throwable { describe("delete with part of the child tree read only"); - Configuration conf = createAssumedRoleConfig(); - conf.setBoolean(ENABLE_MULTI_DELETE, false); - executePartialDelete(conf); + executePartialDelete(createAssumedRoleConfig(), true); } /** * Have a directory with full R/W permissions, but then remove * write access underneath, and try to delete it. * @param conf FS configuration + * @param singleDelete flag to indicate this is a single delete operation */ - public void executePartialDelete(final Configuration conf) + public void executePartialDelete(final Configuration conf, + final boolean singleDelete) throws Exception { + conf.setBoolean(ENABLE_MULTI_DELETE, !singleDelete); Path destDir = methodPath(); Path readOnlyDir = new Path(destDir, "readonlyDir"); @@ -888,25 +722,4 @@ public void executePartialDelete(final Configuration conf) roleFS.delete(pathWhichDoesntExist, true)); } - /** - * Assert that the number of files in a destination matches that expected. - * @param text text to use in the message - * @param fs filesystem - * @param path path to list (recursively) - * @param expected expected count - * @throws IOException IO problem - */ - private static void assertFileCount(String text, FileSystem fs, - Path path, long expected) - throws IOException { - List files = new ArrayList<>(); - applyLocatedFiles(fs.listFiles(path, true), - (status) -> files.add(status.getPath().toString())); - long actual = files.size(); - if (actual != expected) { - String ls = files.stream().collect(Collectors.joining("\n")); - fail(text + ": expected " + expected + " files in " + path - + " but got " + actual + "\n" + ls); - } - } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/RoleTestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/RoleTestUtils.java index dbbaee5f8a9d1..8942d70daa698 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/RoleTestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/RoleTestUtils.java @@ -18,8 +18,12 @@ package org.apache.hadoop.fs.s3a.auth; +import java.io.IOException; import java.nio.file.AccessDeniedException; +import java.util.List; import java.util.concurrent.Callable; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import com.fasterxml.jackson.core.JsonProcessingException; import org.junit.Assume; @@ -160,18 +164,35 @@ public static Configuration newAssumedRoleConfig( /** * Assert that an operation is forbidden. + * @param type of closure * @param contained contained text, may be null * @param eval closure to evaluate + * @return the access denied exception + * @throws Exception any other exception + */ + public static AccessDeniedException forbidden( + final String contained, + final Callable eval) + throws Exception { + return forbidden("", contained, eval); + } + + /** + * Assert that an operation is forbidden. * @param type of closure + * @param message error message + * @param contained contained text, may be null + * @param eval closure to evaluate * @return the access denied exception * @throws Exception any other exception */ public static AccessDeniedException forbidden( - String contained, - Callable eval) + final String message, + final String contained, + final Callable eval) throws Exception { return intercept(AccessDeniedException.class, - contained, eval); + contained, message, eval); } /** @@ -209,4 +230,23 @@ public static void assertCredentialsEqual(final String message, actual.getSessionToken()); } + + /** + * Parallel-touch a set of files in the destination directory. + * @param fs filesystem + * @param destDir destination + * @param range range 1..range inclusive of files to create. + * @return the list of paths created. + */ + public static List touchFiles(final FileSystem fs, + final Path destDir, + final int range) throws IOException { + List paths = IntStream.rangeClosed(1, range) + .mapToObj((i) -> new Path(destDir, "file-" + i)) + .collect(Collectors.toList()); + for (Path path : paths) { + touch(fs, path); + } + return paths; + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java index ef594e62a7f02..e8645b84bad72 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java @@ -205,6 +205,7 @@ public static String randomJobId() throws Exception { */ @Override public void teardown() throws Exception { + LOG.info("AbstractCommitITest::teardown"); waitForConsistency(); // make sure there are no failures any more resetFailures(); @@ -495,6 +496,7 @@ public static SuccessData validateSuccessFile(final S3AFileSystem fs, public static SuccessData loadSuccessFile(final S3AFileSystem fs, final Path outputPath) throws IOException { Path success = new Path(outputPath, _SUCCESS); + ContractTestUtils.assertIsFile(fs, success); FileStatus status = fs.getFileStatus(success); assertTrue("0 byte success file - not a s3guard committer " + success, status.getLen() > 0); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitMRJob.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitMRJob.java index 682931ddd9f39..1fb3d89efd401 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitMRJob.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitMRJob.java @@ -71,6 +71,9 @@ public void testMRJob() throws Exception { S3AFileSystem fs = getFileSystem(); // final dest is in S3A Path outputPath = path(getMethodName()); + // create and delete to force in a tombstone marker -see HADOOP-16207 + fs.mkdirs(outputPath); + fs.delete(outputPath, true); String commitUUID = UUID.randomUUID().toString(); String suffix = isUniqueFilenames() ? ("-" + commitUUID) : ""; @@ -116,6 +119,7 @@ public void testMRJob() throws Exception { String sysprops = String.format("-Xmx256m -Dlog4j.configuration=%s", log4j); jobConf.set(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, sysprops); + jobConf.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, sysprops); jobConf.set("yarn.app.mapreduce.am.command-opts", sysprops); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java index 45f07389c6ad0..2501662fd442b 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java @@ -71,7 +71,7 @@ public abstract class AbstractYarnClusterITest extends AbstractCommitITest { LoggerFactory.getLogger(AbstractYarnClusterITest.class); private static final int TEST_FILE_COUNT = 2; - private static final int SCALE_TEST_FILE_COUNT = 20; + private static final int SCALE_TEST_FILE_COUNT = 50; public static final int SCALE_TEST_KEYS = 1000; public static final int BASE_TEST_KEYS = 10; @@ -138,6 +138,12 @@ protected static ClusterBinding createCluster(JobConf conf) return new ClusterBinding(miniDFSClusterService, yarnCluster); } + protected static void terminateCluster(ClusterBinding clusterBinding) { + if (clusterBinding != null) { + clusterBinding.terminate(); + } + } + /** * Get the cluster binding for this subclass * @return diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java index 2886a998b0373..d453715d8a81c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java @@ -21,9 +21,11 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import com.amazonaws.services.s3.model.PartETag; +import com.google.common.collect.Lists; import org.junit.Assume; import org.junit.Test; import org.slf4j.Logger; @@ -36,6 +38,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.Statistic; import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit; import org.apache.hadoop.fs.s3a.commit.magic.MagicCommitTracker; import org.apache.hadoop.fs.s3a.commit.magic.MagicS3GuardCommitter; @@ -268,12 +271,17 @@ public void testCommitterFactorySchema() throws Throwable { public void testBaseRelativePath() throws Throwable { describe("Test creating file with a __base marker and verify that it ends" + " up in where expected"); + S3AFileSystem fs = getFileSystem(); Path destDir = methodPath("testBaseRelativePath"); + fs.delete(destDir, true); Path pendingBaseDir = new Path(destDir, MAGIC + "/child/" + BASE); String child = "subdir/child.txt"; Path pendingChildPath = new Path(pendingBaseDir, child); Path expectedDestPath = new Path(destDir, child); - createFile(getFileSystem(), pendingChildPath, true, DATASET); + assertPathDoesNotExist("dest file was found before upload", + expectedDestPath); + + createFile(fs, pendingChildPath, true, DATASET); commit("child.txt", pendingChildPath, expectedDestPath, 0, 0); } @@ -281,7 +289,9 @@ private void createCommitAndVerify(String filename, byte[] data) throws Exception { S3AFileSystem fs = getFileSystem(); Path destFile = methodPath(filename); + fs.delete(destFile.getParent(), true); Path magicDest = makeMagic(destFile); + assertPathDoesNotExist("Magic file should not exist", magicDest); try(FSDataOutputStream stream = fs.create(magicDest, true)) { assertTrue(stream.hasCapability(STREAM_CAPABILITY_MAGIC_OUTPUT)); if (data != null && data.length > 0) { @@ -332,13 +342,21 @@ private void commit(String filename, validateIntermediateAndFinalPaths(magicFile, destFile); SinglePendingCommit commit = SinglePendingCommit.load(getFileSystem(), validatePendingCommitData(filename, magicFile)); - CommitOperations actions = newCommitOperations(); setThrottling(throttle, failures); - actions.commitOrFail(commit); + commitOrFail(destFile, commit, newCommitOperations()); resetFailures(); verifyCommitExists(commit); } + private void commitOrFail(final Path destFile, + final SinglePendingCommit commit, final CommitOperations actions) + throws IOException { + try (CommitOperations.CommitContext commitContext + = actions.initiateCommitOperation(destFile)) { + commitContext.commitOrFail(commit); + } + } + /** * Perform any validation of paths. * @param magicFilePath path to magic file @@ -348,7 +366,7 @@ private void commit(String filename, private void validateIntermediateAndFinalPaths(Path magicFilePath, Path destFile) throws IOException { - assertPathDoesNotExist("dest file was created", destFile); + assertPathDoesNotExist("dest file was found", destFile); } /** @@ -439,7 +457,7 @@ public void testUploadEmptyFile() throws Throwable { resetFailures(); assertPathDoesNotExist("pending commit", dest); fullThrottle(); - actions.commitOrFail(pendingCommit); + commitOrFail(dest, pendingCommit, actions); resetFailures(); FileStatus status = verifyPathExists(fs, "uploaded file commit", dest); @@ -454,15 +472,19 @@ public void testUploadSmallFile() throws Throwable { CommitOperations actions = newCommitOperations(); Path dest = methodPath("testUploadSmallFile"); S3AFileSystem fs = getFileSystem(); + fs.delete(dest, true); fullThrottle(); + assertPathDoesNotExist("test setup", dest); SinglePendingCommit pendingCommit = actions.uploadFileToPendingCommit(tempFile, dest, null, DEFAULT_MULTIPART_SIZE); resetFailures(); + LOG.debug("Precommit validation"); assertPathDoesNotExist("pending commit", dest); fullThrottle(); - actions.commitOrFail(pendingCommit); + LOG.debug("Postcommit validation"); + commitOrFail(dest, pendingCommit, actions); resetFailures(); String s = readUTF8(fs, dest, -1); assertEquals(text, s); @@ -544,4 +566,97 @@ public void testWriteNormalStream() throws Throwable { assertTrue("Empty marker file: " + status, status.getLen() > 0); } + /** + * Creates a bulk commit and commits multiple files. + * If the DDB metastore is in use, use the instrumentation to + * verify that the write count is as expected. + * This is done without actually looking into the store -just monitoring + * changes in the filesystem's instrumentation counters. + * As changes to the store may be made during get/list calls, + * when the counters must be reset before each commit, this must be + * *after* all probes for the outcome of the previous operation. + */ + @Test + public void testBulkCommitFiles() throws Throwable { + describe("verify bulk commit including metastore update count"); + File localFile = File.createTempFile("commit", ".txt"); + CommitOperations actions = newCommitOperations(); + Path destDir = methodPath("out"); + S3AFileSystem fs = getFileSystem(); + fs.delete(destDir, false); + fullThrottle(); + + Path destFile1 = new Path(destDir, "file1"); + // this subdir will only be created in the commit of file 2 + Path subdir = new Path(destDir, "subdir"); + // file 2 + Path destFile2 = new Path(subdir, "file2"); + Path destFile3 = new Path(subdir, "file3"); + List destinations = Lists.newArrayList(destFile1, destFile2, + destFile3); + List commits = new ArrayList<>(3); + + for (Path destination : destinations) { + SinglePendingCommit commit1 = + actions.uploadFileToPendingCommit(localFile, + destination, null, + DEFAULT_MULTIPART_SIZE); + commits.add(commit1); + } + resetFailures(); + assertPathDoesNotExist("destination dir", destDir); + assertPathDoesNotExist("subdirectory", subdir); + LOG.info("Initiating commit operations"); + try (CommitOperations.CommitContext commitContext + = actions.initiateCommitOperation(destDir)) { + // how many records have been written + MetricDiff writes = new MetricDiff(fs, + Statistic.S3GUARD_METADATASTORE_RECORD_WRITES); + LOG.info("Commit #1"); + commitContext.commitOrFail(commits.get(0)); + final String firstCommitContextString = commitContext.toString(); + LOG.info("First Commit state {}", firstCommitContextString); + long writesOnFirstCommit = writes.diff(); + assertPathExists("destFile1", destFile1); + assertPathExists("destination dir", destDir); + + LOG.info("Commit #2"); + writes.reset(); + commitContext.commitOrFail(commits.get(1)); + assertPathExists("subdirectory", subdir); + assertPathExists("destFile2", destFile2); + final String secondCommitContextString = commitContext.toString(); + LOG.info("Second Commit state {}", secondCommitContextString); + + if (writesOnFirstCommit != 0) { + LOG.info("DynamoDB Metastore is in use: checking write count"); + // S3Guard is in use against DDB, so the metrics can be checked + // to see how many records were updated. + // there should only be two new entries: one for the file and + // one for the parent. + // we include the string values of the contexts because that includes + // the internals of the bulk operation state. + writes.assertDiffEquals("Number of records written after commit #2" + + "; first commit had " + writesOnFirstCommit + + "; first commit ancestors " + firstCommitContextString + + "; second commit ancestors: " + secondCommitContextString, + 2); + } + + LOG.info("Commit #3"); + writes.reset(); + commitContext.commitOrFail(commits.get(2)); + assertPathExists("destFile3", destFile3); + if (writesOnFirstCommit != 0) { + // this file is in the same dir as destFile2, so only its entry + // is added + writes.assertDiffEquals( + "Number of records written after third commit; " + + "first commit had " + writesOnFirstCommit, + 1); + } + } + resetFailures(); + } + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitMRJob.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitMRJob.java index a9b9c2cbe1e1d..e403ab49b168e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitMRJob.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitMRJob.java @@ -18,17 +18,23 @@ package org.apache.hadoop.fs.s3a.commit.magic; +import java.io.FileNotFoundException; import java.io.IOException; import org.junit.AfterClass; import org.junit.BeforeClass; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.commit.AbstractITCommitMRJob; import org.apache.hadoop.fs.s3a.commit.files.SuccessData; import org.apache.hadoop.mapred.JobConf; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.lsR; +import static org.apache.hadoop.fs.s3a.S3AUtils.applyLocatedFiles; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** * Full integration test for the Magic Committer. @@ -55,7 +61,7 @@ public static void setupClusters() throws IOException { @AfterClass public static void teardownClusters() throws IOException { - clusterBinding.terminate(); + terminateCluster(clusterBinding); } @Override @@ -93,6 +99,22 @@ protected void applyCustomConfigOptions(JobConf conf) { @Override protected void customPostExecutionValidation(Path destPath, SuccessData successData) throws Exception { - assertPathDoesNotExist("No cleanup", new Path(destPath, MAGIC)); + Path magicDir = new Path(destPath, MAGIC); + + // if an FNFE isn't raised on getFileStatus, list out the directory + // tree + S3AFileSystem fs = getFileSystem(); + // log the contents + lsR(fs, destPath, true); + intercept(FileNotFoundException.class, () -> { + final FileStatus st = fs.getFileStatus(magicDir); + StringBuilder result = new StringBuilder("Found magic dir which should" + + " have been deleted at ").append(st).append('\n'); + result.append("["); + applyLocatedFiles(fs.listFiles(magicDir, true), + (status) -> result.append(status.getPath()).append('\n')); + result.append("["); + return result.toString(); + }); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java index 072295962ce36..9a2ad0ee9da10 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java @@ -143,9 +143,12 @@ public void test_030_postCreationAssertions() throws Throwable { assertNotNull("jobDir", jobDir); Pair>> results = operations.loadSinglePendingCommits(jobDir, false); - for (SinglePendingCommit singlePendingCommit : - results.getKey().getCommits()) { - operations.commitOrFail(singlePendingCommit); + try(CommitOperations.CommitContext commitContext + = operations.initiateCommitOperation(jobDir)) { + for (SinglePendingCommit singlePendingCommit : + results.getKey().getCommits()) { + commitContext.commitOrFail(singlePendingCommit); + } } timer.end("time to commit %s", pendingDataFile); // upload is no longer pending diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestDirectoryCommitMRJob.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestDirectoryCommitMRJob.java index 8d44ddba56484..1e44086b1e125 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestDirectoryCommitMRJob.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestDirectoryCommitMRJob.java @@ -46,7 +46,7 @@ public static void setupClusters() throws IOException { @AfterClass public static void teardownClusters() throws IOException { - clusterBinding.terminate(); + terminateCluster(clusterBinding); } @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestPartitionCommitMRJob.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestPartitionCommitMRJob.java index f71479c203bab..6106974ce74ed 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestPartitionCommitMRJob.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestPartitionCommitMRJob.java @@ -47,7 +47,7 @@ public static void setupClusters() throws IOException { @AfterClass public static void teardownClusters() throws IOException { - clusterBinding.terminate(); + terminateCluster(clusterBinding); } @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitMRJob.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitMRJob.java index d4a351f55c77e..218c72ac50ea0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitMRJob.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitMRJob.java @@ -58,7 +58,7 @@ public static void setupClusters() throws IOException { @AfterClass public static void teardownClusters() throws IOException { - clusterBinding.terminate(); + terminateCluster(clusterBinding); } @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitMRJobBadDest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitMRJobBadDest.java index 68926f972ac68..72488132faf76 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitMRJobBadDest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitMRJobBadDest.java @@ -53,7 +53,7 @@ public static void setupClusters() throws IOException { @AfterClass public static void teardownClusters() throws IOException { - clusterBinding.terminate(); + terminateCluster(clusterBinding); } @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/AbstractCommitTerasortIT.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/AbstractCommitTerasortIT.java index 491ecb999851f..7db3068f49a4c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/AbstractCommitTerasortIT.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/AbstractCommitTerasortIT.java @@ -238,4 +238,9 @@ public void test_140_teracomplete() throws Throwable { public void test_150_teracleanup() throws Throwable { terasortDuration = Optional.empty(); } + + @Test + public void test_200_directory_deletion() throws Throwable { + getFileSystem().delete(terasortPath, true); + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java new file mode 100644 index 0000000000000..942f0b6658077 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java @@ -0,0 +1,871 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.IOException; +import java.nio.file.AccessDeniedException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import com.amazonaws.services.s3.model.MultiObjectDeleteException; +import com.google.common.base.Charsets; +import com.google.common.util.concurrent.ListeningExecutorService; +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; +import org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation; +import org.apache.hadoop.util.BlockingThreadPoolExecutorService; +import org.apache.hadoop.util.DurationInfo; + +import static org.apache.hadoop.fs.contract.ContractTestUtils.*; +import static org.apache.hadoop.fs.s3a.Constants.*; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.MetricDiff; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; +import static org.apache.hadoop.fs.s3a.S3AUtils.applyLocatedFiles; +import static org.apache.hadoop.fs.s3a.Statistic.FILES_DELETE_REJECTED; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_DELETE_REQUESTS; +import static org.apache.hadoop.fs.s3a.auth.RoleModel.Effects; +import static org.apache.hadoop.fs.s3a.auth.RoleModel.Statement; +import static org.apache.hadoop.fs.s3a.auth.RoleModel.directory; +import static org.apache.hadoop.fs.s3a.auth.RoleModel.statement; +import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*; +import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.bindRolePolicyStatements; +import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.forbidden; +import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.newAssumedRoleConfig; +import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit; +import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletion; +import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.extractUndeletedPaths; +import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.removeUndeletedPaths; +import static org.apache.hadoop.fs.s3a.test.ExtraAssertions.assertFileCount; +import static org.apache.hadoop.fs.s3a.test.ExtraAssertions.extractCause; +import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; +import static org.apache.hadoop.test.LambdaTestUtils.eval; + +/** + * Test partial failures of delete and rename operations, especially + * that the S3Guard tables are consistent with the state of + * the filesystem. + * + * All these test have a unique path for each run, with a roleFS having + * full RW access to part of it, and R/O access to a restricted subdirectory + * + *
      + *
    1. + * The tests are parameterized to single/multi delete, which control which + * of the two delete mechanisms are used. + *
    2. + *
    3. + * In multi delete, in a scale test run, a significantly larger set of files + * is created and then deleted. + *
    4. + *
    5. + * This isn't done in the single delete as it is much slower and it is not + * the situation we are trying to create. + *
    6. + *
    + * + * This test manages to create lots of load on the s3guard prune command + * when that is tested in a separate test suite; + * too many tombstone files for the test to complete. + * An attempt is made in {@link #deleteTestDirInTeardown()} to prune these test + * files. + */ +@SuppressWarnings("ThrowableNotThrown") +@RunWith(Parameterized.class) +public class ITestPartialRenamesDeletes extends AbstractS3ATestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(ITestPartialRenamesDeletes.class); + + private static final Path ROOT = new Path("/"); + + private static final Statement STATEMENT_ALL_BUCKET_READ_ACCESS + = statement(true, S3_ALL_BUCKETS, S3_BUCKET_READ_OPERATIONS); + + /** Many threads for scale performance: {@value}. */ + public static final int EXECUTOR_THREAD_COUNT = 64; + + /** + * For submitting work. + */ + private static final ListeningExecutorService EXECUTOR = + BlockingThreadPoolExecutorService.newInstance( + EXECUTOR_THREAD_COUNT, + EXECUTOR_THREAD_COUNT * 2, + 30, TimeUnit.SECONDS, + "test-operations"); + + + /** + * The number of files in a non-scaled test. + *

    + * Value: {@value}. + */ + public static final int FILE_COUNT_NON_SCALED = 2; + + /** + * The number of files for a scaled test. This is still + * less than half the amount which can be fitted into a delete + * request, so that even with this many R/W and R/O files, + * both can fit in the same request. + * Then, when a partial delete occurs, we can make assertions + * knowing that all R/W files should have been deleted and all + * R/O files rejected. + *

    + * Value: {@value}. + */ + public static final int FILE_COUNT_SCALED = 10; + + public static final int DIR_COUNT = 2; + public static final int DIR_COUNT_SCALED = 4; + public static final int DEPTH = 2; + public static final int DEPTH_SCALED = 2; + + /** + * A role FS; if non-null it is closed in teardown. + */ + private S3AFileSystem roleFS; + + /** + * Base path for this test run. + * This is generated uniquely for each test. + */ + private Path basePath; + + /** + * A directory which restricted roles have full write access to. + */ + private Path writableDir; + + /** + * A directory to which restricted roles have only read access. + */ + private Path readOnlyDir; + + /** + * A file under {@link #readOnlyDir} which cannot be written or deleted. + */ + private Path readOnlyChild; + + /** + * A directory to which restricted roles have no read access. + */ + private Path noReadDir; + + /** delete policy: single or multi? */ + private final boolean multiDelete; + + /** + * Configuration for the assume role FS. + */ + private Configuration assumedRoleConfig; + + private int fileCount; + private int dirCount; + private int dirDepth; + + /** + * Was the -Dscale switch passed in to the test run? + */ + private boolean scaleTest; + + /** + * Test array for parameterized test runs. + *

      + *
    • Run 0: single deletes
    • + *
    • Run 1: multi deletes
    • + *
    + * + * @return a list of parameter tuples. + */ + @Parameterized.Parameters(name = "bulk-delete={0}") + public static Collection params() { + return Arrays.asList(new Object[][]{ + {false}, + {true}, + }); + } + + /** + * Constructor. + * @param multiDelete single vs multi delete in the role FS? + */ + public ITestPartialRenamesDeletes(final boolean multiDelete) { + this.multiDelete = multiDelete; + } + + /** + * This sets up a unique path for every test run, so as to guarantee isolation + * from previous runs. + * It creates a role policy which has read access to everything except + * the contents of {@link #noReadDir}, and with write access to + * {@link #writableDir}. + * @throws Exception failure + */ + @Override + public void setup() throws Exception { + super.setup(); + assumeRoleTests(); + basePath = uniquePath(); + readOnlyDir = new Path(basePath, "readonlyDir"); + writableDir = new Path(basePath, "writableDir"); + readOnlyChild = new Path(readOnlyDir, "child"); + noReadDir = new Path(basePath, "noReadDir"); + // the full FS + S3AFileSystem fs = getFileSystem(); + fs.delete(basePath, true); + fs.mkdirs(writableDir); + + // create the baseline assumed role + assumedRoleConfig = createAssumedRoleConfig(); + bindRolePolicyStatements(assumedRoleConfig, + STATEMENT_S3GUARD_CLIENT, + STATEMENT_ALL_BUCKET_READ_ACCESS, // root: r-x + new Statement(Effects.Allow) // dest: rwx + .addActions(S3_PATH_RW_OPERATIONS) + .addResources(directory(writableDir)), + new Statement(Effects.Deny) // noReadDir: --x + .addActions(S3_ALL_GET) + .addActions(S3_ALL_PUT) + .addActions(S3_ALL_DELETE) + .addResources(directory(noReadDir))); + // the role configured to that set of restrictions + roleFS = (S3AFileSystem) readOnlyDir.getFileSystem(assumedRoleConfig); + + // switch to the big set of files iff this is a multidelete run + // with -Dscale set. + // without that the DELETE calls become a key part of the bottleneck + scaleTest = multiDelete && getTestPropertyBool( + getConfiguration(), + KEY_SCALE_TESTS_ENABLED, + DEFAULT_SCALE_TESTS_ENABLED); + fileCount = scaleTest ? FILE_COUNT_SCALED : FILE_COUNT_NON_SCALED; + dirCount = scaleTest ? DIR_COUNT_SCALED : DIR_COUNT; + dirDepth = scaleTest ? DEPTH_SCALED : DEPTH; + } + + @Override + public void teardown() throws Exception { + cleanupWithLogger(LOG, roleFS); + super.teardown(); + } + + /** + * Directory cleanup includes pruning everything under the path. + * This ensures that any in the tree from failed tests don't fill up + * the store with many, many, deleted entries. + * @throws IOException failure. + */ + @Override + protected void deleteTestDirInTeardown() throws IOException { + super.deleteTestDirInTeardown(); + Path path = getContract().getTestPath(); + try { + prune(path); + } catch (IOException e) { + LOG.warn("When pruning the test directory {}", path, e); + } + } + + private void assumeRoleTests() { + assume("No ARN for role tests", !getAssumedRoleARN().isEmpty()); + } + + private String getAssumedRoleARN() { + return getContract().getConf().getTrimmed(ASSUMED_ROLE_ARN, ""); + } + + /** + * Create the assumed role configuration. + * @return a config bonded to the ARN of the assumed role + */ + public Configuration createAssumedRoleConfig() { + return createAssumedRoleConfig(getAssumedRoleARN()); + } + + /** + * Create a config for an assumed role; it also disables FS caching + * and sets the multi delete option to that of the current mode. + * @param roleARN ARN of role + * @return the new configuration + */ + private Configuration createAssumedRoleConfig(String roleARN) { + Configuration conf = newAssumedRoleConfig(getContract().getConf(), + roleARN); + String bucketName = getTestBucketName(conf); + + removeBucketOverrides(bucketName, conf, ENABLE_MULTI_DELETE); + conf.setBoolean(ENABLE_MULTI_DELETE, multiDelete); + return conf; + } + + @Override + protected Configuration createConfiguration() { + Configuration conf = super.createConfiguration(); + String bucketName = getTestBucketName(conf); + + // ramp up the number of connections we can have for maximum PUT + // performance + removeBucketOverrides(bucketName, conf, + MAX_THREADS, + MAXIMUM_CONNECTIONS, + S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY); + conf.setInt(MAX_THREADS, EXECUTOR_THREAD_COUNT); + conf.setInt(MAXIMUM_CONNECTIONS, EXECUTOR_THREAD_COUNT * 2); + // turn off prune delays, so as to stop scale tests creating + // so much cruft that future CLI prune commands take forever + conf.setInt(S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY, 0); + return conf; + } + + /** + * Create a unique path, which includes method name, + * multidelete flag and a random UUID. + * @return a string to use for paths. + * @throws IOException path creation failure. + */ + private Path uniquePath() throws IOException { + return path( + String.format("%s-%s-%04d", + getMethodName(), + multiDelete ? "multi" : "single", + System.currentTimeMillis() % 10000)); + } + + /** + * This is here to verify role and path setup. + */ + @Test + public void testCannotTouchUnderRODir() throws Throwable { + forbidden("touching the empty child " + readOnlyChild, + "", + () -> { + touch(roleFS, readOnlyChild); + return readOnlyChild; + }); + } + @Test + public void testCannotReadUnderNoReadDir() throws Throwable { + Path path = new Path(noReadDir, "unreadable.txt"); + createFile(getFileSystem(), path, true, "readonly".getBytes()); + forbidden("trying to read " + path, + "", + () -> readUTF8(roleFS, path, -1)); + } + + @Test + public void testMultiDeleteOptionPropagated() throws Throwable { + describe("Verify the test parameter propagates to the store context"); + StoreContext ctx = roleFS.createStoreContext(); + Assertions.assertThat(ctx.isMultiObjectDeleteEnabled()) + .as(ctx.toString()) + .isEqualTo(multiDelete); + } + + /** + * Execute a sequence of rename operations with access locked down. + */ + @Test + public void testRenameParentPathNotWriteable() throws Throwable { + describe("rename with parent paths not writeable; multi=%s", multiDelete); + final Configuration conf = createAssumedRoleConfig(); + bindRolePolicyStatements(conf, + STATEMENT_S3GUARD_CLIENT, + STATEMENT_ALLOW_SSE_KMS_RW, + STATEMENT_ALL_BUCKET_READ_ACCESS, + new Statement(Effects.Allow) + .addActions(S3_PATH_RW_OPERATIONS) + .addResources(directory(readOnlyDir)) + .addResources(directory(writableDir))); + roleFS = (S3AFileSystem) readOnlyDir.getFileSystem(conf); + + S3AFileSystem fs = getFileSystem(); + roleFS.getFileStatus(ROOT); + fs.mkdirs(readOnlyDir); + // you can create an adjacent child + touch(fs, readOnlyChild); + + fs.delete(writableDir, true); + // as dest doesn't exist, this will map child -> dest + assertRenameOutcome(roleFS, readOnlyChild, writableDir, true); + + assertIsFile(writableDir); + assertIsDirectory(readOnlyDir); + Path renamedDestPath = new Path(readOnlyDir, writableDir.getName()); + assertRenameOutcome(roleFS, writableDir, readOnlyDir, true); + assertIsFile(renamedDestPath); + roleFS.delete(readOnlyDir, true); + roleFS.delete(writableDir, true); + } + + @Test + public void testRenameSingleFileFailsInDelete() throws Throwable { + describe("rename with source read only; multi=%s", multiDelete); + Path readOnlyFile = readOnlyChild; + + // the full FS + S3AFileSystem fs = getFileSystem(); + fs.delete(basePath, true); + + // this file is readable by the roleFS, but cannot be deleted + touch(fs, readOnlyFile); + + roleFS.delete(writableDir, true); + roleFS.mkdirs(writableDir); + // rename will fail in the delete phase + expectRenameForbidden(readOnlyFile, writableDir); + + // and the source file is still there + assertIsFile(readOnlyFile); + + // and so is the copied version, because there's no attempt + // at rollback, or preflight checking on the delete permissions + Path renamedFile = new Path(writableDir, readOnlyFile.getName()); + + assertIsFile(renamedFile); + + ContractTestUtils.assertDeleted(roleFS, renamedFile, true); + assertFileCount("Empty Dest Dir", roleFS, + writableDir, 0); + } + + /** + * Execute a sequence of rename operations where the source + * data is read only to the client calling rename(). + * This will cause the inner delete() operations to fail, whose outcomes + * are explored. + * Multiple files are created (in parallel) for some renames, so the test + * explores the outcome on bulk delete calls, including verifying that a + * MultiObjectDeleteException is translated to an AccessDeniedException. + *
      + *
    1. The exception raised is AccessDeniedException, + * from single and multi DELETE calls.
    2. + *
    3. It happens after the COPY. Not ideal, but, well, we can't pretend + * it's a filesystem forever.
    4. + *
    + */ + @Test + public void testRenameDirFailsInDelete() throws Throwable { + describe("rename with source read only; multi=%s", multiDelete); + + // the full FS + S3AFileSystem fs = getFileSystem(); + + roleFS.mkdirs(writableDir); + + // create a set of files + // this is done in parallel as it is 10x faster on a long-haul test run. + List createdFiles = createFiles(fs, readOnlyDir, dirDepth, fileCount, + dirCount); + // are they all there? + int expectedFileCount = createdFiles.size(); + assertFileCount("files ready to rename", roleFS, + readOnlyDir, expectedFileCount); + + // try to rename the directory + LOG.info("Renaming readonly files {} to {}", readOnlyDir, writableDir); + + AccessDeniedException deniedException = expectRenameForbidden(readOnlyDir, + writableDir); + if (multiDelete) { + // look in that exception for a multidelete + MultiObjectDeleteException mde = extractCause( + MultiObjectDeleteException.class, deniedException); + final List undeleted + = extractUndeletedPaths(mde, fs::keyToQualifiedPath); + Assertions.assertThat(undeleted) + .as("files which could not be deleted") + .hasSize(expectedFileCount) + .containsAll(createdFiles) + .containsExactlyInAnyOrderElementsOf(createdFiles); + } + LOG.info("Result of renaming read-only files is as expected", + deniedException); + assertFileCount("files in the source directory", roleFS, + readOnlyDir, expectedFileCount); + // now lets look at the destination. + // even with S3Guard on, we expect the destination to match that of our + // the remote state. + // the test will exist + describe("Verify destination directory exists"); + FileStatus st = roleFS.getFileStatus(writableDir); + assertTrue("Not a directory: " + st, + st.isDirectory()); + assertFileCount("files in the dest directory", roleFS, + writableDir, expectedFileCount); + } + + @Test + public void testRenameFileFailsNoWrite() throws Throwable { + describe("Try to rename to a write-only destination fails with src" + + " & dest unchanged."); + roleFS.mkdirs(writableDir); + S3AFileSystem fs = getFileSystem(); + Path source = new Path(writableDir, "source"); + touch(fs, source); + fs.mkdirs(readOnlyDir); + Path dest = new Path(readOnlyDir, "dest"); + describe("Renaming files {} to {}", writableDir, dest); + // rename fails but doesn't raise an exception. Good or bad? + expectRenameForbidden(source, dest); + assertIsFile(source); + assertPathDoesNotExist("rename destination", dest); + } + + @Test + public void testCopyDirFailsToReadOnlyDir() throws Throwable { + describe("Try to copy to a read-only destination"); + roleFS.mkdirs(writableDir); + S3AFileSystem fs = getFileSystem(); + List files = createFiles(fs, writableDir, dirDepth, fileCount, + dirCount); + + fs.mkdirs(readOnlyDir); + Path dest = new Path(readOnlyDir, "dest"); + expectRenameForbidden(writableDir, dest); + assertPathDoesNotExist("rename destination", dest); + assertFileCount("files in the source directory", roleFS, + writableDir, files.size()); + } + + @Test + public void testCopyFileFailsOnSourceRead() throws Throwable { + describe("The source file isn't readable, so the COPY fails"); + Path source = new Path(noReadDir, "source"); + S3AFileSystem fs = getFileSystem(); + touch(fs, source); + fs.mkdirs(writableDir); + Path dest = new Path(writableDir, "dest"); + expectRenameForbidden(source, dest); + assertIsFile(source); + assertPathDoesNotExist("rename destination", dest); + } + + @Test + public void testCopyDirFailsOnSourceRead() throws Throwable { + describe("The source file isn't readable, so the COPY fails"); + S3AFileSystem fs = getFileSystem(); + List files = createFiles(fs, noReadDir, dirDepth, fileCount, + dirCount); + fs.mkdirs(writableDir); + Path dest = new Path(writableDir, "dest"); + expectRenameForbidden(noReadDir, dest); + assertFileCount("files in the source directory", fs, + noReadDir, files.size()); + } + + /** + * Have a directory with full R/W permissions, but then remove + * write access underneath, and try to delete it. + * This verifies that failures in the delete fake dir stage. + * are not visible. + */ + @Test + public void testPartialEmptyDirDelete() throws Throwable { + describe("delete an empty directory with parent dir r/o" + + " multidelete=%s", multiDelete); + + // the full FS + final Path deletableChild = new Path(writableDir, "deletableChild"); + // deletable child is created. + roleFS.mkdirs(deletableChild); + assertPathExists("parent dir after create", writableDir); + assertPathExists("grandparent dir after create", writableDir.getParent()); + // now delete it. + roleFS.delete(deletableChild, true); + assertPathExists("parent dir after deletion", writableDir); + assertPathExists("grandparent dir after deletion", writableDir.getParent()); + assertPathDoesNotExist("deletable dir after deletion", deletableChild); + } + + /** + * Have a directory with full R/W permissions, but then remove + * write access underneath, and try to delete it. + */ + @Test + public void testPartialDirDelete() throws Throwable { + describe("delete with part of the child tree read only;" + + " multidelete=%s", multiDelete); + + // the full FS + S3AFileSystem fs = getFileSystem(); + + List readOnlyFiles = createFiles(fs, readOnlyDir, + dirDepth, fileCount, dirCount); + List deletableFiles = createFiles(fs, + writableDir, dirDepth, fileCount, dirCount); + + // as a safety check, verify that one of the deletable files can be deleted + Path head = deletableFiles.remove(0); + assertTrue("delete " + head + " failed", + roleFS.delete(head, false)); + List allFiles = Stream.concat( + readOnlyFiles.stream(), + deletableFiles.stream()) + .collect(Collectors.toList()); + + // this set can be deleted by the role FS + MetricDiff rejectionCount = new MetricDiff(roleFS, FILES_DELETE_REJECTED); + MetricDiff deleteVerbCount = new MetricDiff(roleFS, OBJECT_DELETE_REQUESTS); + + describe("Trying to delete read only directory"); + AccessDeniedException ex = expectDeleteForbidden(readOnlyDir); + if (multiDelete) { + // multi-delete status checks + extractCause(MultiObjectDeleteException.class, ex); + rejectionCount.assertDiffEquals("Wrong rejection count", + readOnlyFiles.size()); + deleteVerbCount.assertDiffEquals("Wrong delete count", 1); + reset(rejectionCount, deleteVerbCount); + } + // all the files are still there? (avoid in scale test due to cost) + if (!scaleTest) { + readOnlyFiles.forEach(this::pathMustExist); + } + + describe("Trying to delete upper-level directory"); + ex = expectDeleteForbidden(basePath); + if (multiDelete) { + // multi-delete status checks + extractCause(MultiObjectDeleteException.class, ex); + deleteVerbCount.assertDiffEquals("Wrong delete count", 1); + MultiObjectDeleteException mde = extractCause( + MultiObjectDeleteException.class, ex); + final List undeleted + = removeUndeletedPaths(mde, allFiles, fs::keyToQualifiedPath); + Assertions.assertThat(undeleted) + .as("files which could not be deleted") + .containsExactlyInAnyOrderElementsOf(readOnlyFiles); + Assertions.assertThat(allFiles) + .as("files which were deleted") + .containsExactlyInAnyOrderElementsOf(deletableFiles); + rejectionCount.assertDiffEquals("Wrong rejection count", + readOnlyFiles.size()); + } + reset(rejectionCount, deleteVerbCount); + + // build the set of all paths under the directory tree through + // a directory listing (i.e. not getFileStatus()). + // small risk of observed inconsistency here on unguarded stores. + final Set readOnlyListing = listFilesUnderPath(readOnlyDir, true); + + String directoryList = readOnlyListing.stream() + .map(Path::toString) + .collect(Collectors.joining(", ", "[", "]")); + + Assertions.assertThat(readOnlyListing) + .as("ReadOnly directory " + directoryList) + .containsAll(readOnlyFiles); + + // do this prune in the test as well as teardown, so that the test + // reporting includes it in the runtime of a successful run. + prune(basePath); + } + + /** + * Expect the delete() call to fail. + * @param path path to delete. + * @return the expected exception. + * @throws Exception any other failure. + */ + private AccessDeniedException expectDeleteForbidden(Path path) + throws Exception { + try (DurationInfo ignored = + new DurationInfo(LOG, true, "delete %s", path)) { + return forbidden("Expected an error deleting " + path, + "", + () -> { + boolean r = roleFS.delete(path, true); + return " delete=" + r + " " + ls(path.getParent()); + }); + } + } + + /** + * Expect that a rename will fail with an exception using the roleFS. + * @param src source + * @param dest dest + * @return the exception raised. + * @throws Exception any other failure + */ + private AccessDeniedException expectRenameForbidden(Path src, Path dest) + throws Exception { + try (DurationInfo ignored = + new DurationInfo(LOG, true, + "rename(%s, %s)", src, dest)) { + return forbidden( + "Renaming " + src + " to " + dest, + "", + () -> { + boolean result = roleFS.rename(src, dest); + LOG.error("Rename should have been forbidden but returned {}", + result); + LOG.error("Source directory:\n{}", + ContractTestUtils.ls(getFileSystem(), src.getParent())); + LOG.error("Destination directory:\n{}", + ContractTestUtils.ls(getFileSystem(), src.getParent())); + return "Rename unexpectedly returned " + result; + }); + } + } + + /** + * Assert that a path must exist, map IOEs to RTEs for loops. + * @param p path. + */ + private void pathMustExist(Path p) { + eval(() -> assertPathExists("Missing path", p)); + } + + /** + * Prune the store for everything under the test path. + * @param path path. + * @throws IOException on failure. + */ + private void prune(Path path) throws IOException { + S3AFileSystem fs = getFileSystem(); + if (fs.hasMetadataStore()) { + MetadataStore store = fs.getMetadataStore(); + try (DurationInfo ignored = + new DurationInfo(LOG, true, "prune %s", path)) { + store.prune( + MetadataStore.PruneMode.ALL_BY_MODTIME, + System.currentTimeMillis(), + PathMetadataDynamoDBTranslation.pathToParentKey(fs.qualify(path))); + } + } + } + + /** + * List all files under a path. + * @param path path to list + * @param recursive recursive listing? + * @return an unordered set of the paths. + * @throws IOException failure + */ + private Set listFilesUnderPath(Path path, boolean recursive) + throws IOException { + Set files = new TreeSet<>(); + try (DurationInfo ignore = + new DurationInfo(LOG, "ls -R %s", path)) { + applyLocatedFiles(getFileSystem().listFiles(path, recursive), + (status) -> files.add(status.getPath())); + } + return files; + } + + /** + * Write the text to a file asynchronously. Logs the operation duration. + * @param fs filesystem + * @param path path + * @return future to the patch created. + */ + private static CompletableFuture put(FileSystem fs, + Path path, String text) { + return submit(EXECUTOR, () -> { + try (DurationInfo ignore = + new DurationInfo(LOG, false, "Creating %s", path)) { + createFile(fs, path, true, text.getBytes(Charsets.UTF_8)); + return path; + } + }); + } + + /** + * Parallel-touch a set of files in the destination directory. + * @param fs filesystem + * @param destDir destination + * @param depth file depth + * @param fileCount number of files to create. + * @param dirCount number of dirs to create at each level + * @return the list of files created. + */ + public static List createFiles(final FileSystem fs, + final Path destDir, + final int depth, + final int fileCount, + final int dirCount) throws IOException { + List> futures = new ArrayList<>(fileCount); + List paths = new ArrayList<>(fileCount); + List dirs = new ArrayList<>(fileCount); + buildPaths(paths, dirs, destDir, depth, fileCount, dirCount); + try (DurationInfo ignore = + new DurationInfo(LOG, "Creating %d files", fileCount)) { + for (Path path : paths) { + futures.add(put(fs, path, path.getName())); + } + waitForCompletion(futures); + return paths; + } + } + + /** + * Recursive method to build up lists of files and directories. + * @param filePaths list of file paths to add entries to. + * @param dirPaths list of directory paths to add entries to. + * @param destDir destination directory. + * @param depth depth of directories + * @param fileCount number of files. + * @param dirCount number of directories. + */ + private static void buildPaths( + final List filePaths, + final List dirPaths, + final Path destDir, + final int depth, + final int fileCount, + final int dirCount) { + if (depth<=0) { + return; + } + // create the file paths + for (int i = 0; i < fileCount; i++) { + String name = "file-" + i; + Path p = new Path(destDir, name); + filePaths.add(p); + } + for (int i = 0; i < dirCount; i++) { + String name = "dir-" + i; + Path p = new Path(destDir, name); + dirPaths.add(p); + buildPaths(filePaths, dirPaths, p, depth - 1, fileCount, dirCount); + } + + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java new file mode 100644 index 0000000000000..c44fa00800444 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java @@ -0,0 +1,393 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import javax.annotation.Nullable; +import java.io.File; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.MultiObjectDeleteException; +import com.google.common.collect.Lists; +import org.assertj.core.api.Assertions; +import org.junit.Before; +import org.junit.Test; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.Constants; +import org.apache.hadoop.fs.s3a.Invoker; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.S3AInputPolicy; +import org.apache.hadoop.fs.s3a.S3AInstrumentation; +import org.apache.hadoop.fs.s3a.S3AStorageStatistics; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; +import org.apache.hadoop.fs.s3a.s3guard.DirListingMetadata; +import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; +import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; +import org.apache.hadoop.fs.s3a.s3guard.PathMetadata; +import org.apache.hadoop.fs.s3a.s3guard.RenameTracker; +import org.apache.hadoop.fs.s3a.s3guard.S3Guard; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.BlockingThreadPoolExecutorService; + +import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.ACCESS_DENIED; +import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.removeUndeletedPaths; +import static org.junit.Assert.assertEquals; + +/** + * Unit test suite covering translation of AWS SDK exceptions to S3A exceptions, + * and retry/recovery policies. + */ +public class TestPartialDeleteFailures { + + private static final ContextAccessors CONTEXT_ACCESSORS + = new MinimalContextAccessor(); + + private StoreContext context; + + private static Path qualifyKey(String k) { + return new Path("s3a://bucket/" + k); + } + + @Before + public void setUp() throws Exception { + context = createMockStoreContext(true, + new OperationTrackingStore()); + } + + @Test + public void testDeleteExtraction() { + List src = pathList("a", "a/b", "a/c"); + List rejected = pathList("a/b"); + MultiObjectDeleteException ex = createDeleteException(ACCESS_DENIED, + rejected); + List undeleted = removeUndeletedPaths(ex, src, + TestPartialDeleteFailures::qualifyKey); + assertEquals("mismatch of rejected and undeleted entries", + rejected, undeleted); + } + + @Test + public void testSplitKeysFromResults() throws Throwable { + List src = pathList("a", "a/b", "a/c"); + List rejected = pathList("a/b"); + List keys = keysToDelete(src); + MultiObjectDeleteException ex = createDeleteException(ACCESS_DENIED, + rejected); + Pair, List> pair = + new MultiObjectDeleteSupport(context) + .splitUndeletedKeys(ex, keys); + List undeleted = pair.getLeft(); + List deleted = pair.getRight(); + assertEquals(rejected, undeleted); + // now check the deleted list to verify that it is valid + src.remove(rejected.get(0)); + assertEquals(src, deleted); + } + + /** + * Build a list of qualified paths from vararg parameters. + * @param paths paths to qualify and then convert to a lst. + * @return same paths as a list. + */ + private List pathList(String... paths) { + return Arrays.stream(paths) + .map(TestPartialDeleteFailures::qualifyKey) + .collect(Collectors.toList()); + } + + /** + * Build a delete exception containing all the rejected paths. + * The list of successful entries is empty. + * @param rejected the rejected paths. + * @return a new exception + */ + private MultiObjectDeleteException createDeleteException( + final String code, + final List rejected) { + List errors = rejected.stream() + .map((p) -> { + MultiObjectDeleteException.DeleteError e + = new MultiObjectDeleteException.DeleteError(); + e.setKey(p.toUri().getPath()); + e.setCode(code); + e.setMessage("forbidden"); + return e; + }).collect(Collectors.toList()); + return new MultiObjectDeleteException(errors, Collections.emptyList()); + } + + /** + * From a list of paths, build up the list of keys for a delete request. + * @param paths path list + * @return a key list suitable for a delete request. + */ + public static List keysToDelete( + List paths) { + return paths.stream() + .map((p) -> p.toUri().getPath()) + .map(DeleteObjectsRequest.KeyVersion::new) + .collect(Collectors.toList()); + } + + /** + * Verify that on a partial delete, the S3Guard tables are updated + * with deleted items. And only them. + */ + @Test + public void testProcessDeleteFailure() throws Throwable { + Path pathA = qualifyKey("/a"); + Path pathAB = qualifyKey("/a/b"); + Path pathAC = qualifyKey("/a/c"); + List src = Lists.newArrayList(pathA, pathAB, pathAC); + List keyList = keysToDelete(src); + List deleteForbidden = Lists.newArrayList(pathAB); + final List deleteAllowed = Lists.newArrayList(pathA, pathAC); + MultiObjectDeleteException ex = createDeleteException(ACCESS_DENIED, + deleteForbidden); + OperationTrackingStore store + = new OperationTrackingStore(); + StoreContext storeContext = createMockStoreContext(true, store); + MultiObjectDeleteSupport deleteSupport + = new MultiObjectDeleteSupport(storeContext); + Triple, List, List>> + triple = deleteSupport.processDeleteFailure(ex, keyList); + Assertions.assertThat(triple.getRight()) + .as("failure list") + .isEmpty(); + List undeleted = triple.getLeft(); + List deleted = triple.getMiddle(); + Assertions.assertThat(deleted). + as("deleted files") + .containsAll(deleteAllowed) + .doesNotContainAnyElementsOf(deleteForbidden); + Assertions.assertThat(undeleted). + as("undeleted store entries") + .containsAll(deleteForbidden) + .doesNotContainAnyElementsOf(deleteAllowed); + } + + + private StoreContext createMockStoreContext(boolean multiDelete, + OperationTrackingStore store) throws URISyntaxException, IOException { + URI name = new URI("s3a://bucket"); + Configuration conf = new Configuration(); + return new StoreContext( + name, + "bucket", + conf, + "alice", + UserGroupInformation.getCurrentUser(), + BlockingThreadPoolExecutorService.newInstance( + 4, + 4, + 10, TimeUnit.SECONDS, + "s3a-transfer-shared"), + Constants.DEFAULT_EXECUTOR_CAPACITY, + new Invoker(RetryPolicies.TRY_ONCE_THEN_FAIL, Invoker.LOG_EVENT), + new S3AInstrumentation(name), + new S3AStorageStatistics(), + S3AInputPolicy.Normal, + ChangeDetectionPolicy.createPolicy(ChangeDetectionPolicy.Mode.None, + ChangeDetectionPolicy.Source.ETag, false), + multiDelete, + store, + false, + CONTEXT_ACCESSORS, + new S3Guard.TtlTimeProvider(conf)); + } + + private static class MinimalContextAccessor implements ContextAccessors { + + @Override + public Path keyToPath(final String key) { + return qualifyKey(key); + } + + @Override + public String pathToKey(final Path path) { + return null; + } + + @Override + public File createTempFile(final String prefix, final long size) + throws IOException { + throw new UnsupportedOperationException("unsppported"); + } + + @Override + public String getBucketLocation() throws IOException { + return null; + } + } + /** + * MetadataStore which tracks what is deleted and added. + */ + private static class OperationTrackingStore implements MetadataStore { + + private final List deleted = new ArrayList<>(); + + private final List created = new ArrayList<>(); + + @Override + public void initialize(final FileSystem fs) { + } + + @Override + public void initialize(final Configuration conf) { + } + + @Override + public void forgetMetadata(final Path path) { + } + + @Override + public PathMetadata get(final Path path) { + return null; + } + + @Override + public PathMetadata get(final Path path, + final boolean wantEmptyDirectoryFlag) { + return null; + } + + @Override + public DirListingMetadata listChildren(final Path path) { + return null; + } + + @Override + public void put(final PathMetadata meta) { + put(meta, null); + } + + @Override + public void put(final PathMetadata meta, + final BulkOperationState operationState) { + created.add(meta.getFileStatus().getPath()); + } + + @Override + public void put(final Collection metas, + final BulkOperationState operationState) { + metas.stream().forEach(meta -> put(meta, null)); + } + + @Override + public void put(final DirListingMetadata meta, + final BulkOperationState operationState) { + created.add(meta.getPath()); + } + + @Override + public void destroy() { + } + + @Override + public void delete(final Path path, + final ITtlTimeProvider ttlTimeProvider) { + deleted.add(path); + } + + @Override + public void deleteSubtree(final Path path, + final ITtlTimeProvider ttlTimeProvider) { + + } + + @Override + public void move(@Nullable final Collection pathsToDelete, + @Nullable final Collection pathsToCreate, + final ITtlTimeProvider ttlTimeProvider, + @Nullable final BulkOperationState operationState) { + } + + @Override + public void prune(final PruneMode pruneMode, final long cutoff) { + } + + @Override + public void prune(final PruneMode pruneMode, + final long cutoff, + final String keyPrefix) { + + } + + @Override + public BulkOperationState initiateBulkWrite( + final BulkOperationState.OperationType operation, + final Path dest) { + return null; + } + + @Override + public Map getDiagnostics() { + return null; + } + + @Override + public void updateParameters(final Map parameters) { + } + + @Override + public void close() { + } + + public List getDeleted() { + return deleted; + } + + public List getCreated() { + return created; + } + + @Override + public RenameTracker initiateRenameOperation( + final StoreContext storeContext, + final Path source, + final S3AFileStatus sourceStatus, + final Path dest) { + throw new UnsupportedOperationException("unsupported"); + } + + @Override + public void addAncestors(final Path qualifiedPath, + final ITtlTimeProvider timeProvider, + @Nullable final BulkOperationState operationState) { + + } + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java index f616190040a3b..eaaa50c1c0f3a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java @@ -80,6 +80,16 @@ public abstract class AbstractS3GuardToolTestBase extends AbstractS3ATestBase { private MetadataStore ms; private S3AFileSystem rawFs; + /** + * The test timeout is increased in case previous tests have created + * many tombstone markers which now need to be purged. + * @return the test timeout. + */ + @Override + protected int getTestTimeoutMillis() { + return SCALE_TEST_TIMEOUT_SECONDS * 1000; + } + protected static void expectResult(int expected, String message, S3GuardTool tool, @@ -187,19 +197,24 @@ protected void mkdirs(Path path, boolean onS3, boolean onMetadataStore) fs.mkdirs(path); } else if (onMetadataStore) { S3AFileStatus status = new S3AFileStatus(true, path, OWNER); - ms.put(new PathMetadata(status)); + ms.put(new PathMetadata(status), null); } } protected static void putFile(MetadataStore ms, S3AFileStatus f) throws IOException { assertNotNull(f); - ms.put(new PathMetadata(f)); - Path parent = f.getPath().getParent(); - while (parent != null) { - S3AFileStatus dir = new S3AFileStatus(false, parent, f.getOwner()); - ms.put(new PathMetadata(dir)); - parent = parent.getParent(); + try (BulkOperationState bulkWrite = + ms.initiateBulkWrite( + BulkOperationState.OperationType.Put, + f.getPath())) { + ms.put(new PathMetadata(f), bulkWrite); + Path parent = f.getPath().getParent(); + while (parent != null) { + S3AFileStatus dir = new S3AFileStatus(false, parent, f.getOwner()); + ms.put(new PathMetadata(dir), bulkWrite); + parent = parent.getParent(); + } } } @@ -252,12 +267,13 @@ private void testPruneCommand(Configuration cmdConf, Path parent, String...args) throws Exception { Path keepParent = path("prune-cli-keep"); StopWatch timer = new StopWatch(); + final S3AFileSystem fs = getFileSystem(); try { S3GuardTool.Prune cmd = new S3GuardTool.Prune(cmdConf); cmd.setMetadataStore(ms); - getFileSystem().mkdirs(parent); - getFileSystem().mkdirs(keepParent); + fs.mkdirs(parent); + fs.mkdirs(keepParent); createFile(new Path(parent, "stale"), true, true); createFile(new Path(keepParent, "stale-to-keep"), true, true); @@ -279,8 +295,14 @@ private void testPruneCommand(Configuration cmdConf, Path parent, assertMetastoreListingCount(keepParent, "This child should have been kept (prefix restriction).", 1); } finally { - getFileSystem().delete(parent, true); - ms.prune(MetadataStore.PruneMode.ALL_BY_MODTIME, Long.MAX_VALUE); + fs.delete(parent, true); + fs.delete(keepParent, true); + ms.prune(MetadataStore.PruneMode.ALL_BY_MODTIME, + Long.MAX_VALUE, + fs.pathToKey(parent)); + ms.prune(MetadataStore.PruneMode.ALL_BY_MODTIME, + Long.MAX_VALUE, + fs.pathToKey(keepParent)); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java index 5241dd481d313..03ebe1ee76a5b 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java @@ -35,19 +35,21 @@ import com.amazonaws.services.dynamodbv2.model.ListTagsOfResourceRequest; import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException; import com.amazonaws.services.dynamodbv2.model.TableDescription; - import com.amazonaws.services.dynamodbv2.model.Tag; import com.google.common.collect.Lists; +import org.assertj.core.api.Assertions; + import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.contract.s3a.S3AContract; import org.apache.hadoop.fs.s3a.Constants; import org.apache.hadoop.fs.s3a.S3ATestConstants; import org.apache.hadoop.fs.s3a.Tristate; - import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.DurationInfo; + import org.junit.AfterClass; -import org.junit.Assert; import org.junit.Assume; import org.junit.BeforeClass; import org.junit.Test; @@ -62,6 +64,7 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.security.UserGroupInformation; +import static com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; import static org.apache.hadoop.fs.s3a.S3AUtils.clearBucketOption; @@ -159,7 +162,8 @@ public static void beforeClassSetup() throws IOException { testDynamoDBTableName = conf.get( S3ATestConstants.S3GUARD_DDB_TEST_TABLE_NAME_KEY); String dynamoDbTableName = conf.getTrimmed(S3GUARD_DDB_TABLE_NAME_KEY); - Assume.assumeTrue("No DynamoDB table name configured", + Assume.assumeTrue("No DynamoDB table name configured in " + + S3GUARD_DDB_TABLE_NAME_KEY, !StringUtils.isEmpty(dynamoDbTableName)); // We should assert that the table name is configured, so the test should @@ -181,6 +185,19 @@ public static void beforeClassSetup() throws IOException { // We can use that table in the test if these assertions are valid conf.set(S3GUARD_DDB_TABLE_NAME_KEY, testDynamoDBTableName); + // remove some prune delays + conf.setInt(S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY, 0); + + // clear all table tagging config before this test + conf.getPropsWithPrefix(S3GUARD_DDB_TABLE_TAG).keySet().forEach( + propKey -> conf.unset(S3GUARD_DDB_TABLE_TAG + propKey) + ); + + // set the tags on the table so that it can be tested later. + Map tagMap = createTagMap(); + for (Map.Entry tagEntry : tagMap.entrySet()) { + conf.set(S3GUARD_DDB_TABLE_TAG + tagEntry.getKey(), tagEntry.getValue()); + } LOG.debug("Creating static ddbms which will be shared between tests."); enableOnDemand(conf); @@ -191,14 +208,23 @@ public static void beforeClassSetup() throws IOException { @AfterClass public static void afterClassTeardown() { LOG.debug("Destroying static DynamoDBMetadataStore."); - if (ddbmsStatic != null) { + destroy(ddbmsStatic); + ddbmsStatic = null; + } + + /** + * Destroy and then close() a metastore instance. + * Exceptions are caught and logged at debug. + * @param ddbms store -may be null. + */ + private static void destroy(final DynamoDBMetadataStore ddbms) { + if (ddbms != null) { try { - ddbmsStatic.destroy(); - } catch (Exception e) { - LOG.warn("Failed to destroy tables in teardown", e); + ddbms.destroy(); + IOUtils.closeStream(ddbms); + } catch (IOException e) { + LOG.debug("On ddbms shutdown", e); } - IOUtils.closeStream(ddbmsStatic); - ddbmsStatic = null; } } @@ -208,26 +234,91 @@ private static void assumeThatDynamoMetadataStoreImpl(Configuration conf){ Constants.S3GUARD_METASTORE_DYNAMO)); } - + /** + * This teardown does not call super.teardown() so as to avoid the DDMBS table + * from being destroyed. + *

    + * This is potentially quite slow, depending on DDB IO Capacity and number + * of entries to forget. + */ @Override public void tearDown() throws Exception { + LOG.info("Removing data from ddbms table in teardown."); + Thread.currentThread().setName("Teardown"); + // The following is a way to be sure the table will be cleared and there + // will be no leftovers after the test. try { - if (ddbmsStatic != null) { - LOG.info("Removing data from ddbms table in teardown."); - // The following is a way to be sure the table will be cleared and there - // will be no leftovers after the test. - PathMetadata meta = ddbmsStatic.get(strToPath("/")); - if (meta != null){ - for (DescendantsIterator desc = - new DescendantsIterator(ddbmsStatic, meta); - desc.hasNext();) { - ddbmsStatic.forgetMetadata(desc.next().getPath()); - } + deleteAllMetadata(); + } finally { + IOUtils.cleanupWithLogger(LOG, fileSystem); + } + } + + /** + * Forget all metadata in the store. + * This originally did an iterate and forget, but using prune() hands off the + * bulk IO into the metastore itself; the forgetting is used + * to purge anything which wasn't pruned. + */ + private void deleteAllMetadata() throws IOException { + // The following is a way to be sure the table will be cleared and there + // will be no leftovers after the test. + // only executed if there is a filesystem, as failure during test setup + // means that strToPath will NPE. + if (getContract() != null && getContract().getFileSystem() != null) { + deleteMetadataUnderPath(ddbmsStatic, strToPath("/"), true); + } + } + + /** + * Delete all metadata under a path. + * Attempt to use prune first as it scales slightly better. + * @param ms store + * @param path path to prune under + * @param suppressErrors should errors be suppressed? + * @throws IOException if there is a failure and suppressErrors == false + */ + public static void deleteMetadataUnderPath(final DynamoDBMetadataStore ms, + final Path path, final boolean suppressErrors) throws IOException { + ThrottleTracker throttleTracker = new ThrottleTracker(ms); + try (DurationInfo ignored = new DurationInfo(LOG, true, "prune")) { + ms.prune(PruneMode.ALL_BY_MODTIME, + System.currentTimeMillis(), + PathMetadataDynamoDBTranslation.pathToParentKey(path)); + LOG.info("Throttle statistics: {}", throttleTracker); + } catch (FileNotFoundException fnfe) { + // there is no table. + return; + } catch (IOException ioe) { + // prune failed. warn and then fall back to forget. + LOG.warn("Failed to prune {}", path, ioe); + if (!suppressErrors) { + throw ioe; + } + } + // and after the pruning, make sure all other metadata is gone + int forgotten = 0; + try (DurationInfo ignored = new DurationInfo(LOG, true, "forget")) { + PathMetadata meta = ms.get(path); + if (meta != null) { + for (DescendantsIterator desc = new DescendantsIterator(ms, + meta); + desc.hasNext();) { + forgotten++; + ms.forgetMetadata(desc.next().getPath()); } + LOG.info("Forgot {} entries", forgotten); + } + } catch (FileNotFoundException fnfe) { + // there is no table. + return; + } catch (IOException ioe) { + LOG.warn("Failed to forget entries under {}", path, ioe); + if (!suppressErrors) { + throw ioe; } - } catch (IOException ignored) { } - IOUtils.cleanupWithLogger(LOG, fileSystem); + LOG.info("Throttle statistics: {}", throttleTracker); } @Override protected String getPathStringForPrune(String path) @@ -270,7 +361,7 @@ public DynamoDBMSContract createContract(Configuration conf) { } @Override - S3AFileStatus basicFileStatus(Path path, int size, boolean isDir) + protected S3AFileStatus basicFileStatus(Path path, int size, boolean isDir) throws IOException { String owner = UserGroupInformation.getCurrentUser().getShortUserName(); return isDir @@ -279,6 +370,15 @@ S3AFileStatus basicFileStatus(Path path, int size, boolean isDir) null, null); } + /** + * Create a directory status entry. + * @param dir directory. + * @return the status + */ + private S3AFileStatus dirStatus(Path dir) throws IOException { + return basicFileStatus(dir, 0, true); + } + private DynamoDBMetadataStore getDynamoMetadataStore() throws IOException { return (DynamoDBMetadataStore) getContract().getMetadataStore(); } @@ -335,8 +435,7 @@ public void testInitialize() throws IOException { expectedRegion, ddbms.getRegion()); } finally { - ddbms.destroy(); - ddbms.close(); + destroy(ddbms); } } @@ -377,31 +476,69 @@ public void testInitializeWithConfiguration() throws IOException { keySchema(), ddbms.getTable().describe().getKeySchema()); } finally { - ddbms.destroy(); - ddbms.close(); + destroy(ddbms); } } /** - * Test that for a large batch write request, the limit is handled correctly. + * This should really drive a parameterized test run of 5^2 entries, but it + * would require a major refactoring to set things up. + * For now, each source test has its own entry, with the destination written + * to. + * This seems to be enough to stop DDB throttling from triggering test + * timeouts. */ + private static final int[] NUM_METAS_TO_DELETE_OR_PUT = { + -1, // null + 0, // empty collection + 1, // one path + S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT, // exact limit of a batch request + S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT + 1 // limit + 1 + }; + @Test - public void testBatchWrite() throws IOException { - final int[] numMetasToDeleteOrPut = { - -1, // null - 0, // empty collection - 1, // one path - S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT, // exact limit of a batch request - S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT + 1 // limit + 1 - }; - DynamoDBMetadataStore ms = getDynamoMetadataStore(); - for (int numOldMetas : numMetasToDeleteOrPut) { - for (int numNewMetas : numMetasToDeleteOrPut) { - doTestBatchWrite(numOldMetas, numNewMetas, ms); - } + public void testBatchWrite00() throws IOException { + doBatchWriteForOneSet(0); + } + + @Test + public void testBatchWrite01() throws IOException { + doBatchWriteForOneSet(1); + } + + @Test + public void testBatchWrite02() throws IOException { + doBatchWriteForOneSet(2); + } + + @Test + public void testBatchWrite03() throws IOException { + doBatchWriteForOneSet(3); + } + + @Test + public void testBatchWrite04() throws IOException { + doBatchWriteForOneSet(4); + } + + /** + * Test that for a large batch write request, the limit is handled correctly. + * With cleanup afterwards. + */ + private void doBatchWriteForOneSet(int index) throws IOException { + for (int numNewMetas : NUM_METAS_TO_DELETE_OR_PUT) { + doTestBatchWrite(NUM_METAS_TO_DELETE_OR_PUT[index], + numNewMetas, + getDynamoMetadataStore()); } + // The following is a way to be sure the table will be cleared and there + // will be no leftovers after the test. + deleteMetadataUnderPath(ddbmsStatic, strToPath("/"), false); } + /** + * Test that for a large batch write request, the limit is handled correctly. + */ private void doTestBatchWrite(int numDelete, int numPut, DynamoDBMetadataStore ms) throws IOException { Path path = new Path( @@ -411,15 +548,20 @@ private void doTestBatchWrite(int numDelete, int numPut, final Path oldDir = new Path(root, "oldDir"); final Path newDir = new Path(root, "newDir"); LOG.info("doTestBatchWrite: oldDir={}, newDir={}", oldDir, newDir); + Thread.currentThread() + .setName(String.format("Bulk put=%d; delete=%d", numPut, numDelete)); - ms.put(new PathMetadata(basicFileStatus(oldDir, 0, true))); - ms.put(new PathMetadata(basicFileStatus(newDir, 0, true))); + AncestorState putState = checkNotNull(ms.initiateBulkWrite( + BulkOperationState.OperationType.Put, newDir), + "No state from initiateBulkWrite()"); + ms.put(new PathMetadata(dirStatus(oldDir)), putState); + ms.put(new PathMetadata(dirStatus(newDir)), putState); final List oldMetas = numDelete < 0 ? null : new ArrayList<>(numDelete); for (int i = 0; i < numDelete; i++) { oldMetas.add(new PathMetadata( - basicFileStatus(new Path(oldDir, "child" + i), i, true))); + basicFileStatus(new Path(oldDir, "child" + i), i, false))); } final List newMetas = numPut < 0 ? null : new ArrayList<>(numPut); @@ -431,8 +573,13 @@ private void doTestBatchWrite(int numDelete, int numPut, Collection pathsToDelete = null; if (oldMetas != null) { // put all metadata of old paths and verify - ms.put(new DirListingMetadata(oldDir, oldMetas, false)); - assertEquals(0, ms.listChildren(newDir).withoutTombstones().numEntries()); + ms.put(new DirListingMetadata(oldDir, oldMetas, false), putState); + assertEquals("Child count", + 0, ms.listChildren(newDir).withoutTombstones().numEntries()); + Assertions.assertThat(ms.listChildren(oldDir).getListing()) + .describedAs("Old Directory listing") + .containsExactlyInAnyOrderElementsOf(oldMetas); + assertTrue(CollectionUtils .isEqualCollection(oldMetas, ms.listChildren(oldDir).getListing())); @@ -443,11 +590,28 @@ private void doTestBatchWrite(int numDelete, int numPut, } // move the old paths to new paths and verify - ms.move(pathsToDelete, newMetas, getTtlTimeProvider()); - assertEquals(0, ms.listChildren(oldDir).withoutTombstones().numEntries()); + AncestorState state = checkNotNull(ms.initiateBulkWrite( + BulkOperationState.OperationType.Put, newDir), + "No state from initiateBulkWrite()"); + assertEquals("bulk write destination", newDir, state.getDest()); + + ThrottleTracker throttleTracker = new ThrottleTracker(ms); + try(DurationInfo ignored = new DurationInfo(LOG, true, + "Move")) { + ms.move(pathsToDelete, newMetas, getTtlTimeProvider(), state); + } + LOG.info("Throttle status {}", throttleTracker); + assertEquals("Number of children in source directory", + 0, ms.listChildren(oldDir).withoutTombstones().numEntries()); if (newMetas != null) { - assertTrue(CollectionUtils - .isEqualCollection(newMetas, ms.listChildren(newDir).getListing())); + Assertions.assertThat(ms.listChildren(newDir).getListing()) + .describedAs("Directory listing") + .containsAll(newMetas); + if (!newMetas.isEmpty()) { + Assertions.assertThat(state.size()) + .describedAs("Size of ancestor state") + .isGreaterThan(newMetas.size()); + } } } @@ -483,63 +647,66 @@ public void testItemLacksVersion() throws Throwable { } /** + * Test versioning handling. + *

      + *
    1. Create the table.
    2. + *
    3. Verify tag propagation.
    4. + *
    5. Delete the version marker -verify failure.
    6. + *
    7. Reinstate a different version marker -verify failure
    8. + *
    * Delete the version marker and verify that table init fails. + * This also includes the checks for tagging, which goes against all + * principles of unit tests. + * However, merging the routines saves */ @Test - public void testTableVersionRequired() throws Exception { + public void testTableVersioning() throws Exception { String tableName = getTestTableName("testTableVersionRequired"); Configuration conf = getTableCreationConfig(); int maxRetries = conf.getInt(S3GUARD_DDB_MAX_RETRIES, S3GUARD_DDB_MAX_RETRIES_DEFAULT); conf.setInt(S3GUARD_DDB_MAX_RETRIES, 3); conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - + tagConfiguration(conf); DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); try { ddbms.initialize(conf); Table table = verifyTableInitialized(tableName, ddbms.getDynamoDB()); + // check the tagging too + verifyStoreTags(createTagMap(), ddbms); + + Item originalVersionMarker = table.getItem(VERSION_MARKER_PRIMARY_KEY); table.deleteItem(VERSION_MARKER_PRIMARY_KEY); // create existing table intercept(IOException.class, E_NO_VERSION_MARKER, () -> ddbms.initTable()); - conf.setInt(S3GUARD_DDB_MAX_RETRIES, maxRetries); - } finally { - ddbms.destroy(); - ddbms.close(); - } - } - - /** - * Set the version value to a different number and verify that - * table init fails. - */ - @Test - public void testTableVersionMismatch() throws Exception { - String tableName = getTestTableName("testTableVersionMismatch"); - Configuration conf = getTableCreationConfig(); - conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - - DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); - try { - ddbms.initialize(conf); - Table table = verifyTableInitialized(tableName, ddbms.getDynamoDB()); - table.deleteItem(VERSION_MARKER_PRIMARY_KEY); - Item v200 = createVersionMarker(VERSION_MARKER, 200, 0); + // now add a different version marker + Item v200 = createVersionMarker(VERSION_MARKER, VERSION * 2, 0); table.putItem(v200); // create existing table intercept(IOException.class, E_INCOMPATIBLE_VERSION, () -> ddbms.initTable()); - } finally { - ddbms.destroy(); - ddbms.close(); - } - } + // create a marker with no version and expect failure + final Item invalidMarker = new Item().withPrimaryKey( + createVersionMarkerPrimaryKey(VERSION_MARKER)) + .withLong(TABLE_CREATED, 0); + table.putItem(invalidMarker); + intercept(IOException.class, E_NOT_VERSION_MARKER, + () -> ddbms.initTable()); + // reinstate the version marker + table.putItem(originalVersionMarker); + ddbms.initTable(); + conf.setInt(S3GUARD_DDB_MAX_RETRIES, maxRetries); + } finally { + destroy(ddbms); + } + } /** * Test that initTable fails with IOException when table does not exist and @@ -580,7 +747,8 @@ public void testRootDirectory() throws IOException { ddbms.put(new PathMetadata(new S3AFileStatus(true, new Path(rootPath, "foo"), - UserGroupInformation.getCurrentUser().getShortUserName()))); + UserGroupInformation.getCurrentUser().getShortUserName())), + null); verifyRootDirectory(ddbms.get(rootPath), false); } @@ -631,9 +799,13 @@ public void testMovePopulatesAncestors() throws IOException { final String destRoot = testRoot + "/c/d/e/dest"; final Path nestedPath1 = strToPath(srcRoot + "/file1.txt"); - ddbms.put(new PathMetadata(basicFileStatus(nestedPath1, 1024, false))); + AncestorState bulkWrite = ddbms.initiateBulkWrite( + BulkOperationState.OperationType.Put, nestedPath1); + ddbms.put(new PathMetadata(basicFileStatus(nestedPath1, 1024, false)), + bulkWrite); final Path nestedPath2 = strToPath(srcRoot + "/dir1/dir2"); - ddbms.put(new PathMetadata(basicFileStatus(nestedPath2, 0, true))); + ddbms.put(new PathMetadata(basicFileStatus(nestedPath2, 0, true)), + bulkWrite); // We don't put the destRoot path here, since put() would create ancestor // entries, and we want to ensure that move() does it, instead. @@ -643,8 +815,8 @@ public void testMovePopulatesAncestors() throws IOException { strToPath(srcRoot), strToPath(srcRoot + "/dir1"), strToPath(srcRoot + "/dir1/dir2"), - strToPath(srcRoot + "/file1.txt") - ); + strToPath(srcRoot + "/file1.txt")); + final String finalFile = destRoot + "/file1.txt"; final Collection pathsToCreate = Lists.newArrayList( new PathMetadata(basicFileStatus(strToPath(destRoot), 0, true)), @@ -652,22 +824,101 @@ public void testMovePopulatesAncestors() throws IOException { 0, true)), new PathMetadata(basicFileStatus(strToPath(destRoot + "/dir1/dir2"), 0, true)), - new PathMetadata(basicFileStatus(strToPath(destRoot + "/file1.txt"), + new PathMetadata(basicFileStatus(strToPath(finalFile), 1024, false)) ); - ddbms.move(fullSourcePaths, pathsToCreate, getTtlTimeProvider()); - + ddbms.move(fullSourcePaths, pathsToCreate, getTtlTimeProvider(), + bulkWrite); + bulkWrite.close(); // assert that all the ancestors should have been populated automatically - assertCached(testRoot + "/c"); - assertCached(testRoot + "/c/d"); - assertCached(testRoot + "/c/d/e"); - assertCached(destRoot /* /c/d/e/dest */); - + List paths = Lists.newArrayList( + testRoot + "/c", testRoot + "/c/d", testRoot + "/c/d/e", destRoot, + destRoot + "/dir1", destRoot + "/dir1/dir2"); + for (String p : paths) { + assertCached(p); + verifyInAncestor(bulkWrite, p, true); + } // Also check moved files while we're at it - assertCached(destRoot + "/dir1"); - assertCached(destRoot + "/dir1/dir2"); - assertCached(destRoot + "/file1.txt"); + assertCached(finalFile); + verifyInAncestor(bulkWrite, finalFile, false); + } + + @Test + public void testAncestorOverwriteConflict() throws Throwable { + final DynamoDBMetadataStore ddbms = getDynamoMetadataStore(); + String testRoot = "/" + getMethodName(); + String parent = testRoot + "/parent"; + Path parentPath = strToPath(parent); + String child = parent + "/child"; + Path childPath = strToPath(child); + String grandchild = child + "/grandchild"; + Path grandchildPath = strToPath(grandchild); + String child2 = parent + "/child2"; + String grandchild2 = child2 + "/grandchild2"; + Path grandchild2Path = strToPath(grandchild2); + AncestorState bulkWrite = ddbms.initiateBulkWrite( + BulkOperationState.OperationType.Put, parentPath); + + // writing a child creates ancestors + ddbms.put( + new PathMetadata(basicFileStatus(childPath, 1024, false)), + bulkWrite); + verifyInAncestor(bulkWrite, child, false); + verifyInAncestor(bulkWrite, parent, true); + + // overwrite an ancestor with a file entry in the same operation + // is an error. + intercept(PathIOException.class, E_INCONSISTENT_UPDATE, + () -> ddbms.put( + new PathMetadata(basicFileStatus(parentPath, 1024, false)), + bulkWrite)); + + // now put a file under the child and expect the put operation + // to fail fast, because the ancestor state includes a file at a parent. + + intercept(PathIOException.class, E_INCONSISTENT_UPDATE, + () -> ddbms.put( + new PathMetadata(basicFileStatus(grandchildPath, 1024, false)), + bulkWrite)); + + // and expect a failure for directory update under the child + DirListingMetadata grandchildListing = new DirListingMetadata( + grandchildPath, + new ArrayList<>(), false); + intercept(PathIOException.class, E_INCONSISTENT_UPDATE, + () -> ddbms.put(grandchildListing, bulkWrite)); + + // but a directory update under another path is fine + DirListingMetadata grandchild2Listing = new DirListingMetadata( + grandchild2Path, + new ArrayList<>(), false); + ddbms.put(grandchild2Listing, bulkWrite); + // and it creates a new entry for its parent + verifyInAncestor(bulkWrite, child2, true); + } + + /** + * Assert that a path has an entry in the ancestor state. + * @param state ancestor state + * @param path path to look for + * @param isDirectory is it a directory + * @return the value + * @throws IOException IO failure + * @throws AssertionError assertion failure. + */ + private DDBPathMetadata verifyInAncestor(AncestorState state, + String path, + final boolean isDirectory) + throws IOException { + final Path p = strToPath(path); + assertTrue("Path " + p + " not found in ancestor state", state.contains(p)); + final DDBPathMetadata md = state.get(p); + assertTrue("Ancestor value for "+ path, + isDirectory + ? md.getFileStatus().isDirectory() + : md.getFileStatus().isFile()); + return md; } @Test @@ -731,50 +982,48 @@ public void testDeleteTable() throws Exception { intercept(IOException.class, "", "Should have failed after the table is destroyed!", () -> ddbms.listChildren(testPath)); - } finally { ddbms.destroy(); - ddbms.close(); + intercept(FileNotFoundException.class, "", + "Destroyed table should raise FileNotFoundException when pruned", + () -> ddbms.prune(PruneMode.ALL_BY_MODTIME, 0)); + } finally { + destroy(ddbms); } } - @Test - public void testTableTagging() throws IOException { - final Configuration conf = getTableCreationConfig(); - // clear all table tagging config before this test - conf.getPropsWithPrefix(S3GUARD_DDB_TABLE_TAG).keySet().forEach( - propKey -> conf.unset(S3GUARD_DDB_TABLE_TAG + propKey) - ); + protected void verifyStoreTags(final Map tagMap, + final DynamoDBMetadataStore store) { + List tags = listTagsOfStore(store); + Map actual = new HashMap<>(); + tags.forEach(t -> actual.put(t.getKey(), t.getValue())); + Assertions.assertThat(actual) + .describedAs("Tags from DDB table") + .containsExactlyEntriesOf(tagMap); + assertEquals(tagMap.size(), tags.size()); + } - String tableName = - getTestTableName("testTableTagging-" + UUID.randomUUID()); - conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - conf.set(S3GUARD_DDB_TABLE_CREATE_KEY, "true"); + protected List listTagsOfStore(final DynamoDBMetadataStore store) { + ListTagsOfResourceRequest listTagsOfResourceRequest = + new ListTagsOfResourceRequest() + .withResourceArn(store.getTable().getDescription() + .getTableArn()); + return store.getAmazonDynamoDB() + .listTagsOfResource(listTagsOfResourceRequest).getTags(); + } + private static Map createTagMap() { Map tagMap = new HashMap<>(); tagMap.put("hello", "dynamo"); tagMap.put("tag", "youre it"); + return tagMap; + } + + private static void tagConfiguration(Configuration conf) { + // set the tags on the table so that it can be tested later. + Map tagMap = createTagMap(); for (Map.Entry tagEntry : tagMap.entrySet()) { conf.set(S3GUARD_DDB_TABLE_TAG + tagEntry.getKey(), tagEntry.getValue()); } - - DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); - try { - ddbms.initialize(conf); - assertNotNull(ddbms.getTable()); - assertEquals(tableName, ddbms.getTable().getTableName()); - ListTagsOfResourceRequest listTagsOfResourceRequest = - new ListTagsOfResourceRequest() - .withResourceArn(ddbms.getTable().getDescription().getTableArn()); - List tags = ddbms.getAmazonDynamoDB() - .listTagsOfResource(listTagsOfResourceRequest).getTags(); - assertEquals(tagMap.size(), tags.size()); - for (Tag tag : tags) { - Assert.assertEquals(tagMap.get(tag.getKey()), tag.getValue()); - } - } finally { - ddbms.destroy(); - ddbms.close(); - } } @Test @@ -793,7 +1042,7 @@ private void testGetEmptyDirFlagCanSetTrueOrUnknown(boolean auth) throws IOException { // setup final DynamoDBMetadataStore ms = getDynamoMetadataStore(); - String rootPath = "/testAuthoritativeEmptyDirFlag"+ UUID.randomUUID(); + String rootPath = "/testAuthoritativeEmptyDirFlag-" + UUID.randomUUID(); String filePath = rootPath + "/file1"; final Path dirToPut = fileSystem.makeQualified(new Path(rootPath)); final Path fileToPut = fileSystem.makeQualified(new Path(filePath)); @@ -809,7 +1058,7 @@ private void testGetEmptyDirFlagCanSetTrueOrUnknown(boolean auth) assertEquals(auth, dlm.isAuthoritative()); // Test with non-authoritative listing, empty dir - ms.put(dlm); + ms.put(dlm, null); final PathMetadata pmdResultEmpty = ms.get(dirToPut, true); if(auth){ assertEquals(Tristate.TRUE, pmdResultEmpty.isEmptyDirectory()); @@ -819,7 +1068,7 @@ private void testGetEmptyDirFlagCanSetTrueOrUnknown(boolean auth) // Test with non-authoritative listing, non-empty dir dlm.put(basicFileStatus(fileToPut, 1, false)); - ms.put(dlm); + ms.put(dlm, null); final PathMetadata pmdResultNotEmpty = ms.get(dirToPut, true); assertEquals(Tristate.FALSE, pmdResultNotEmpty.isEmptyDirectory()); } @@ -852,4 +1101,120 @@ private void verifyTableNotExist(String tableName, DynamoDB dynamoDB) throws private String getTestTableName(String suffix) { return getTestDynamoTablePrefix(s3AContract.getConf()) + suffix; } + + @Test + public void testPruneAgainstInvalidTable() throws Throwable { + describe("Create an Invalid listing and prune it"); + DynamoDBMetadataStore ms + = ITestDynamoDBMetadataStore.ddbmsStatic; + String base = "/testPruneAgainstInvalidTable"; + String subdir = base + "/subdir"; + Path subDirPath = strToPath(subdir); + createNewDirs(base, subdir); + + String subFile = subdir + "/file1"; + Path subFilePath = strToPath(subFile); + putListStatusFiles(subdir, true, + subFile); + final DDBPathMetadata subDirMetadataOrig = ms.get(subDirPath); + Assertions.assertThat(subDirMetadataOrig.isAuthoritativeDir()) + .describedAs("Subdirectory %s", subDirMetadataOrig) + .isTrue(); + + // now let's corrupt the graph by putting a file + // over the subdirectory + + long now = getTime(); + long oldTime = now - 60_000; + putFile(subdir, oldTime, null); + final DDBPathMetadata subDirAsFile = ms.get(subDirPath); + + Assertions.assertThat(subDirAsFile.getFileStatus().isFile()) + .describedAs("Subdirectory entry %s is now file", subDirMetadataOrig) + .isTrue(); + + Path basePath = strToPath(base); + DirListingMetadata listing = ms.listChildren(basePath); + String childText = listing.prettyPrint(); + LOG.info("Listing {}", childText); + Collection childList = listing.getListing(); + Assertions.assertThat(childList) + .as("listing of %s with %s", basePath, childText) + .hasSize(1); + PathMetadata[] pm = new PathMetadata[0]; + S3AFileStatus status = childList.toArray(pm)[0] + .getFileStatus(); + Assertions.assertThat(status.isFile()) + .as("Entry %s", (Object)pm) + .isTrue(); + DDBPathMetadata subFilePm = checkNotNull(ms.get(subFilePath)); + LOG.info("Pruning"); + + // now prune + ms.prune(PruneMode.ALL_BY_MODTIME, + now + 60_000, subdir); + DDBPathMetadata prunedFile = ms.get(subFilePath); + + final PathMetadata subDirMetadataFinal = getNonNull(subdir); + + Assertions.assertThat(subDirMetadataFinal.getFileStatus().isFile()) + .describedAs("Subdirectory entry %s is still a file", + subDirMetadataFinal) + .isTrue(); + } + + @Test + public void testPutFileDirectlyUnderTombstone() throws Throwable { + describe("Put a file under a tombstone"); + String base = "/testPutFileDirectlyUnderTombstone"; + long now = getTime(); + putTombstone(base, now, null); + PathMetadata baseMeta1 = get(base); + Assertions.assertThat(baseMeta1.isDeleted()) + .as("Metadata %s", baseMeta1) + .isTrue(); + String child = base + "/file"; + putFile(child, now, null); + PathMetadata baseMeta2 = get(base); + Assertions.assertThat(baseMeta2.isDeleted()) + .as("Metadata %s", baseMeta2) + .isFalse(); + } + + @Test + public void testPutFileDeepUnderTombstone() throws Throwable { + describe("Put a file two levels under a tombstone"); + String base = "/testPutFileDeepUnderTombstone"; + String subdir = base + "/subdir"; + long now = getTime(); + // creating a file MUST create its parents + String child = subdir + "/file"; + Path childPath = strToPath(child); + putFile(child, now, null); + getFile(child); + getDirectory(subdir); + getDirectory(base); + + // now put the tombstone + putTombstone(base, now, null); + PathMetadata baseMeta1 = getNonNull(base); + Assertions.assertThat(baseMeta1.isDeleted()) + .as("Metadata %s", baseMeta1) + .isTrue(); + + // this is the same ordering as S3FileSystem.finishedWrite() + + AncestorState ancestorState = getDynamoMetadataStore() + .initiateBulkWrite(BulkOperationState.OperationType.Put, + childPath); + S3Guard.addAncestors(getDynamoMetadataStore(), + childPath, + getTtlTimeProvider(), + ancestorState); + // now write the file again. + putFile(child, now, ancestorState); + // the ancestor will now exist. + getDirectory(base); + } + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreScale.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreScale.java index 95c607aa66183..72a4bb468c6bd 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreScale.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreScale.java @@ -32,6 +32,7 @@ import com.amazonaws.services.dynamodbv2.document.DynamoDB; import com.amazonaws.services.dynamodbv2.document.Table; import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputDescription; +import org.junit.Assume; import org.junit.FixMethodOrder; import org.junit.Test; import org.junit.internal.AssumptionViolatedException; @@ -53,7 +54,9 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.DurationInfo; +import static com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.s3guard.MetadataStoreTestBase.basicFileStatus; import static org.junit.Assume.*; @@ -64,7 +67,10 @@ * The throttle tests aren't quite trying to verify that throttling can * be recovered from, because that makes for very slow tests: you have * to overload the system and them have them back of until they finally complete. - * Instead + *

    + * With DDB on demand, throttling is very unlikely. + * Here the tests simply run to completion, so act as regression tests of + * parallel invocations on the metastore APIs */ @FixMethodOrder(MethodSorters.NAME_ASCENDING) public class ITestDynamoDBMetadataStoreScale @@ -99,6 +105,8 @@ public class ITestDynamoDBMetadataStoreScale private static final int OPERATIONS_PER_THREAD = 50; + private boolean isOnDemandTable; + /** * Create the metadata store. The table and region are determined from * the attributes of the FS used in the tests. @@ -117,8 +125,7 @@ public MetadataStore createMetadataStore() throws IOException { store instanceof DynamoDBMetadataStore); DDBCapacities capacities = DDBCapacities.extractCapacities( store.getDiagnostics()); - assumeTrue("DBB table is on-demand", - !capacities.isOnDemandTable()); + isOnDemandTable = capacities.isOnDemandTable(); DynamoDBMetadataStore fsStore = (DynamoDBMetadataStore) store; Configuration conf = new Configuration(fs.getConf()); @@ -161,8 +168,6 @@ public void setup() throws Exception { isOverProvisionedForTest = ( originalCapacity.getReadCapacityUnits() > MAXIMUM_READ_CAPACITY || originalCapacity.getWriteCapacityUnits() > MAXIMUM_WRITE_CAPACITY); - assumeFalse("Table has too much capacity: " + originalCapacity.toString(), - isOverProvisionedForTest); } @Override @@ -171,18 +176,22 @@ public void teardown() throws Exception { super.teardown(); } + private boolean expectThrottling() { + return !isOverProvisionedForTest && !isOnDemandTable; + } + /** * The subclass expects the superclass to be throttled; sometimes it is. */ @Test @Override public void test_020_Moves() throws Throwable { - ThrottleTracker tracker = new ThrottleTracker(); + ThrottleTracker tracker = new ThrottleTracker(ddbms); try { // if this doesn't throttle, all is well. super.test_020_Moves(); } catch (AWSServiceThrottledException ex) { - // if the service was throttled, we ex;ect the exception text + // if the service was throttled, we expect the exception text GenericTestUtils.assertExceptionContains( DynamoDBMetadataStore.HINT_DDB_IOPS_TOO_LOW, ex, @@ -221,15 +230,18 @@ public void test_030_BatchedWrite() throws Exception { try { describe("Running %d iterations of batched put, size %d", iterations, BATCH_SIZE); + Path base = path(getMethodName()); + final String pathKey = base.toUri().getPath(); ThrottleTracker result = execute("prune", 1, - true, + expectThrottling(), () -> { - ThrottleTracker tracker = new ThrottleTracker(); + ThrottleTracker tracker = new ThrottleTracker(ddbms); long pruneItems = 0; for (long i = 0; i < iterations; i++) { - Path longPath = pathOfDepth(BATCH_SIZE, String.valueOf(i)); + Path longPath = pathOfDepth(BATCH_SIZE, + pathKey, String.valueOf(i)); S3AFileStatus status = basicFileStatus(longPath, 0, false, 12345); PathMetadata pm = new PathMetadata(status); @@ -237,14 +249,14 @@ public void test_030_BatchedWrite() throws Exception { toCleanup.add(pm); } - ddbms.put(pm); + ddbms.put(pm, null); pruneItems++; if (pruneItems == BATCH_SIZE) { describe("pruning files"); ddbms.prune(MetadataStore.PruneMode.ALL_BY_MODTIME, - Long.MAX_VALUE /* all files */); + Long.MAX_VALUE, pathKey); pruneItems = 0; } if (tracker.probe()) { @@ -253,8 +265,10 @@ public void test_030_BatchedWrite() throws Exception { } } }); - assertNotEquals("No batch retries in " + result, - 0, result.batchThrottles); + if (expectThrottling()) { + assertNotEquals("No batch retries in " + result, + 0, result.getBatchThrottles()); + } } finally { describe("Cleaning up table %s", tableName); for (PathMetadata pm : toCleanup) { @@ -274,11 +288,11 @@ public void test_040_get() throws Throwable { Path path = new Path("s3a://example.org/get"); S3AFileStatus status = new S3AFileStatus(true, path, "alice"); PathMetadata metadata = new PathMetadata(status); - ddbms.put(metadata); + ddbms.put(metadata, null); try { execute("get", OPERATIONS_PER_THREAD, - true, + expectThrottling(), () -> ddbms.get(path, true) ); } finally { @@ -293,7 +307,7 @@ public void test_040_get() throws Throwable { public void test_050_getVersionMarkerItem() throws Throwable { execute("get", OPERATIONS_PER_THREAD * 2, - true, + expectThrottling(), () -> ddbms.getVersionMarkerItem() ); } @@ -318,12 +332,12 @@ public void test_060_list() throws Throwable { Path path = new Path("s3a://example.org/list"); S3AFileStatus status = new S3AFileStatus(true, path, "alice"); PathMetadata metadata = new PathMetadata(status); - ddbms.put(metadata); + ddbms.put(metadata, null); try { Path parent = path.getParent(); execute("list", OPERATIONS_PER_THREAD, - true, + expectThrottling(), () -> ddbms.listChildren(parent) ); } finally { @@ -337,14 +351,16 @@ public void test_070_putDirMarker() throws Throwable { Path path = new Path("s3a://example.org/putDirMarker"); S3AFileStatus status = new S3AFileStatus(true, path, "alice"); PathMetadata metadata = new PathMetadata(status); - ddbms.put(metadata); + ddbms.put(metadata, null); DirListingMetadata children = ddbms.listChildren(path.getParent()); - try { + try (DynamoDBMetadataStore.AncestorState state = + ddbms.initiateBulkWrite( + BulkOperationState.OperationType.Put, + path)) { execute("list", OPERATIONS_PER_THREAD, - true, - () -> ddbms.put(children) - ); + expectThrottling(), + () -> ddbms.put(children, state)); } finally { retryingDelete(path); } @@ -356,27 +372,73 @@ public void test_080_fullPathsToPut() throws Throwable { Path base = new Path("s3a://example.org/test_080_fullPathsToPut"); Path child = new Path(base, "child"); List pms = new ArrayList<>(); - ddbms.put(new PathMetadata(makeDirStatus(base))); - ddbms.put(new PathMetadata(makeDirStatus(child))); - ddbms.getInvoker().retry("set up directory tree", - base.toString(), - true, - () -> ddbms.put(pms)); try { - DDBPathMetadata dirData = ddbms.get(child, true); - execute("list", + try (BulkOperationState bulkUpdate + = ddbms.initiateBulkWrite( + BulkOperationState.OperationType.Put, child)) { + ddbms.put(new PathMetadata(makeDirStatus(base)), bulkUpdate); + ddbms.put(new PathMetadata(makeDirStatus(child)), bulkUpdate); + ddbms.getInvoker().retry("set up directory tree", + base.toString(), + true, + () -> ddbms.put(pms, bulkUpdate)); + } + try (BulkOperationState bulkUpdate + = ddbms.initiateBulkWrite( + BulkOperationState.OperationType.Put, child)) { + DDBPathMetadata dirData = ddbms.get(child, true); + execute("put", + OPERATIONS_PER_THREAD, + expectThrottling(), + () -> ddbms.fullPathsToPut(dirData, bulkUpdate) + ); + } + } finally { + ddbms.forgetMetadata(child); + ddbms.forgetMetadata(base); + } + } + + /** + * Try many deletes in parallel; this will create tombstones. + */ + @Test + public void test_090_delete() throws Throwable { + Path path = new Path("s3a://example.org/delete"); + S3AFileStatus status = new S3AFileStatus(true, path, "alice"); + PathMetadata metadata = new PathMetadata(status); + ddbms.put(metadata, null); + ITtlTimeProvider time = checkNotNull(getTtlTimeProvider(), "time provider"); + + try (DurationInfo ignored = new DurationInfo(LOG, true, "delete")) { + execute("delete", + OPERATIONS_PER_THREAD, + expectThrottling(), + () -> { + ddbms.delete(path, time); + }); + } + } + + /** + * Forget Metadata: delete entries without tombstones. + */ + @Test + public void test_100_forgetMetadata() throws Throwable { + Path path = new Path("s3a://example.org/delete"); + try (DurationInfo ignored = new DurationInfo(LOG, true, "delete")) { + execute("delete", OPERATIONS_PER_THREAD, - true, - () -> ddbms.fullPathsToPut(dirData) + expectThrottling(), + () -> ddbms.forgetMetadata(path) ); - } finally { - retryingDelete(base); } } @Test public void test_900_instrumentation() throws Throwable { describe("verify the owner FS gets updated after throttling events"); + Assume.assumeTrue("No throttling expected", expectThrottling()); // we rely on the FS being shared S3AFileSystem fs = getFileSystem(); String fsSummary = fs.toString(); @@ -411,7 +473,7 @@ public ThrottleTracker execute(String operation, throws Exception { final ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); - final ThrottleTracker tracker = new ThrottleTracker(); + final ThrottleTracker tracker = new ThrottleTracker(ddbms); final ExecutorService executorService = Executors.newFixedThreadPool( THREADS); final List> tasks = new ArrayList<>(THREADS); @@ -488,22 +550,21 @@ public ThrottleTracker execute(String operation, * @param ms store * @param pm path to clean up */ - private void cleanupMetadata(MetadataStore ms, PathMetadata pm) { + private void cleanupMetadata(DynamoDBMetadataStore ms, PathMetadata pm) { Path path = pm.getFileStatus().getPath(); try { - ddbms.getInvoker().retry("clean up", path.toString(), true, - () -> ms.forgetMetadata(path)); + ITestDynamoDBMetadataStore.deleteMetadataUnderPath(ms, path, true); } catch (IOException ioe) { // Ignore. LOG.info("Ignoring error while cleaning up {} in database", path, ioe); } } - private Path pathOfDepth(long n, @Nullable String fileSuffix) { + private Path pathOfDepth(long n, + String name, @Nullable String fileSuffix) { StringBuilder sb = new StringBuilder(); for (long i = 0; i < n; i++) { - sb.append(i == 0 ? "/" + this.getClass().getSimpleName() : "lvl"); - sb.append(i); + sb.append(i == 0 ? "/" + name : String.format("level-%03d", i)); if (i == n - 1 && fileSuffix != null) { sb.append(fileSuffix); } @@ -512,86 +573,6 @@ private Path pathOfDepth(long n, @Nullable String fileSuffix) { return new Path(getFileSystem().getUri().toString(), sb.toString()); } - /** - * Something to track throttles. - * The constructor sets the counters to the current count in the - * DDB table; a call to {@link #reset()} will set it to the latest values. - * The {@link #probe()} will pick up the latest values to compare them with - * the original counts. - */ - private class ThrottleTracker { - - private long writeThrottleEventOrig = ddbms.getWriteThrottleEventCount(); - - private long readThrottleEventOrig = ddbms.getReadThrottleEventCount(); - - private long batchWriteThrottleCountOrig = - ddbms.getBatchWriteCapacityExceededCount(); - - private long readThrottles; - - private long writeThrottles; - - private long batchThrottles; - - ThrottleTracker() { - reset(); - } - - /** - * Reset the counters. - */ - private synchronized void reset() { - writeThrottleEventOrig - = ddbms.getWriteThrottleEventCount(); - - readThrottleEventOrig - = ddbms.getReadThrottleEventCount(); - - batchWriteThrottleCountOrig - = ddbms.getBatchWriteCapacityExceededCount(); - } - - /** - * Update the latest throttle count; synchronized. - * @return true if throttling has been detected. - */ - private synchronized boolean probe() { - readThrottles = ddbms.getReadThrottleEventCount() - readThrottleEventOrig; - writeThrottles = ddbms.getWriteThrottleEventCount() - - writeThrottleEventOrig; - batchThrottles = ddbms.getBatchWriteCapacityExceededCount() - - batchWriteThrottleCountOrig; - return isThrottlingDetected(); - } - - @Override - public String toString() { - return String.format( - "Tracker with read throttle events = %d;" - + " write events = %d;" - + " batch throttles = %d", - readThrottles, writeThrottles, batchThrottles); - } - - /** - * Assert that throttling has been detected. - */ - void assertThrottlingDetected() { - assertTrue("No throttling detected in " + this + - " against " + ddbms.toString(), - isThrottlingDetected()); - } - - /** - * Has there been any throttling on an operation? - * @return true iff read, write or batch operations were throttled. - */ - private boolean isThrottlingDetected() { - return readThrottles > 0 || writeThrottles > 0 || batchThrottles > 0; - } - } - /** * Outcome of a thread's execution operation. */ diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java index 45c5e79fad263..d0304f7b4d6a0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java @@ -29,6 +29,7 @@ import com.amazonaws.services.dynamodbv2.document.DynamoDB; import com.amazonaws.services.dynamodbv2.document.Table; import com.amazonaws.services.dynamodbv2.model.ListTagsOfResourceRequest; +import com.amazonaws.services.dynamodbv2.model.ResourceInUseException; import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException; import com.amazonaws.services.dynamodbv2.model.Tag; import org.junit.Assert; @@ -251,7 +252,9 @@ public void testDynamoDBInitDestroyCycle() throws Throwable { try { table.delete(); table.waitForDelete(); - } catch (ResourceNotFoundException e) { /* Ignore */ } + } catch (ResourceNotFoundException | ResourceInUseException e) { + /* Ignore */ + } } } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java index f81f0e2bc13b7..3b7227de9555d 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java @@ -417,7 +417,8 @@ private void uploadCommandAssertCount(S3AFileSystem fs, String options[], String[] fields = line.split("\\s"); if (fields.length == 4 && fields[0].equals(Uploads.TOTAL)) { int parsedUploads = Integer.valueOf(fields[1]); - LOG.debug("Matched CLI output: {} {} {} {}", fields); + LOG.debug("Matched CLI output: {} {} {} {}", + fields[0], fields[1], fields[2], fields[3]); assertEquals("Unexpected number of uploads", numUploads, parsedUploads); return; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java index 754da0db7992a..75bd760e1fcb7 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java @@ -26,7 +26,7 @@ import java.util.List; import java.util.Set; -import com.google.common.collect.Sets; +import org.assertj.core.api.Assertions; import org.junit.After; import org.junit.Assume; import org.junit.Before; @@ -45,6 +45,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.HadoopTestBase; +import static com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.S3ATestUtils.metadataStorePersistsAuthoritativeBit; /** @@ -143,6 +144,17 @@ public void tearDown() throws Exception { } } + /** + * Describe a test in the logs. + * @param text text to print + * @param args arguments to format in the printing + */ + protected void describe(String text, Object... args) { + LOG.info("\n\n{}: {}\n", + getMethodName(), + String.format(text, args)); + } + /** * Helper function for verifying DescendantsIterator and * MetadataStoreListFilesIterator behavior. @@ -157,7 +169,7 @@ private void doTestDescendantsIterator( final S3AFileStatus status = pathStr.contains("file") ? basicFileStatus(strToPath(pathStr), 100, false) : basicFileStatus(strToPath(pathStr), 0, true); - ms.put(new PathMetadata(status)); + ms.put(new PathMetadata(status), null); } final PathMetadata rootMeta = new PathMetadata(makeDirStatus("/")); @@ -178,7 +190,9 @@ private void doTestDescendantsIterator( LOG.info("We got {} by iterating DescendantsIterator", actual); if (!allowMissing()) { - assertEquals(Sets.newHashSet(checkNodes), actual); + Assertions.assertThat(actual) + .as("files listed through DescendantsIterator") + .containsExactlyInAnyOrder(checkNodes); } } @@ -244,7 +258,7 @@ public void testPutNew() throws Exception { * containing directory. We only track direct children of the directory. * Thus this will not affect entry for /da1. */ - ms.put(new PathMetadata(makeFileStatus("/da1/db1/fc1", 100))); + ms.put(new PathMetadata(makeFileStatus("/da1/db1/fc1", 100)), null); assertEmptyDirs("/da2", "/da3"); assertDirectorySize("/da1/db1", 1); @@ -256,7 +270,7 @@ public void testPutNew() throws Exception { } /* This already exists, and should silently replace it. */ - ms.put(new PathMetadata(makeDirStatus("/da1/db1"))); + ms.put(new PathMetadata(makeDirStatus("/da1/db1")), null); /* If we had putNew(), and used it above, this would be empty again. */ assertDirectorySize("/da1", 1); @@ -264,8 +278,8 @@ public void testPutNew() throws Exception { assertEmptyDirs("/da2", "/da3"); /* Ensure new files update correct parent dirs. */ - ms.put(new PathMetadata(makeFileStatus("/da1/db1/fc1", 100))); - ms.put(new PathMetadata(makeFileStatus("/da1/db1/fc2", 200))); + ms.put(new PathMetadata(makeFileStatus("/da1/db1/fc1", 100)), null); + ms.put(new PathMetadata(makeFileStatus("/da1/db1/fc2", 200)), null); assertDirectorySize("/da1", 1); assertDirectorySize("/da1/db1", 2); assertEmptyDirs("/da2", "/da3"); @@ -280,14 +294,15 @@ public void testPutNew() throws Exception { public void testPutOverwrite() throws Exception { final String filePath = "/a1/b1/c1/some_file"; final String dirPath = "/a1/b1/c1/d1"; - ms.put(new PathMetadata(makeFileStatus(filePath, 100))); - ms.put(new PathMetadata(makeDirStatus(dirPath))); + ms.put(new PathMetadata(makeFileStatus(filePath, 100)), null); + ms.put(new PathMetadata(makeDirStatus(dirPath)), null); PathMetadata meta = ms.get(strToPath(filePath)); if (!allowMissing() || meta != null) { verifyFileStatus(meta.getFileStatus(), 100); } - ms.put(new PathMetadata(basicFileStatus(strToPath(filePath), 9999, false))); + ms.put(new PathMetadata(basicFileStatus(strToPath(filePath), 9999, false)), + null); meta = ms.get(strToPath(filePath)); if (!allowMissing() || meta != null) { verifyFileStatus(meta.getFileStatus(), 9999); @@ -298,15 +313,17 @@ public void testPutOverwrite() throws Exception { public void testRootDirPutNew() throws Exception { Path rootPath = strToPath("/"); - ms.put(new PathMetadata(makeFileStatus("/file1", 100))); + ms.put(new PathMetadata(makeFileStatus("/file1", 100)), null); DirListingMetadata dir = ms.listChildren(rootPath); if (!allowMissing() || dir != null) { assertNotNull("Root dir cached", dir); assertFalse("Root not fully cached", dir.isAuthoritative()); - assertNotNull("have root dir file listing", dir.getListing()); - assertEquals("One file in root dir", 1, dir.getListing().size()); - assertEquals("file1 in root dir", strToPath("/file1"), - dir.getListing().iterator().next().getFileStatus().getPath()); + final Collection listing = dir.getListing(); + Assertions.assertThat(listing) + .describedAs("Root dir listing") + .isNotNull() + .extracting(p -> p.getFileStatus().getPath()) + .containsExactly(strToPath("/file1")); } } @@ -338,7 +355,8 @@ private void deleteSubtreeHelper(String pathPrefix) throws Exception { setUpDeleteTest(p); createNewDirs(p + "/ADirectory1/db1/dc1", p + "/ADirectory1/db1/dc1/dd1"); ms.put(new PathMetadata( - makeFileStatus(p + "/ADirectory1/db1/dc1/dd1/deepFile", 100))); + makeFileStatus(p + "/ADirectory1/db1/dc1/dd1/deepFile", 100)), + null); if (!allowMissing()) { assertCached(p + "/ADirectory1/db1"); } @@ -388,9 +406,11 @@ private void setUpDeleteTest(String prefix) throws IOException { createNewDirs(prefix + "/ADirectory1", prefix + "/ADirectory2", prefix + "/ADirectory1/db1"); ms.put(new PathMetadata(makeFileStatus(prefix + "/ADirectory1/db1/file1", - 100))); + 100)), + null); ms.put(new PathMetadata(makeFileStatus(prefix + "/ADirectory1/db1/file2", - 100))); + 100)), + null); PathMetadata meta = ms.get(strToPath(prefix + "/ADirectory1/db1/file2")); if (!allowMissing() || meta != null) { @@ -403,8 +423,8 @@ private void setUpDeleteTest(String prefix) throws IOException { public void testGet() throws Exception { final String filePath = "/a1/b1/c1/some_file"; final String dirPath = "/a1/b1/c1/d1"; - ms.put(new PathMetadata(makeFileStatus(filePath, 100))); - ms.put(new PathMetadata(makeDirStatus(dirPath))); + ms.put(new PathMetadata(makeFileStatus(filePath, 100)), null); + ms.put(new PathMetadata(makeDirStatus(dirPath)), null); PathMetadata meta = ms.get(strToPath(filePath)); if (!allowMissing() || meta != null) { assertNotNull("Get found file", meta); @@ -532,7 +552,7 @@ public void testListChildrenAuthoritative() throws IOException { DirListingMetadata dirMeta = ms.listChildren(strToPath("/a1/b1")); dirMeta.setAuthoritative(true); dirMeta.put(makeFileStatus("/a1/b1/file_new", 100)); - ms.put(dirMeta); + ms.put(dirMeta, null); dirMeta = ms.listChildren(strToPath("/a1/b1")); assertListingsEqual(dirMeta.getListing(), "/a1/b1/file1", "/a1/b1/file2", @@ -590,7 +610,7 @@ public void testMove() throws Exception { destMetas.add(new PathMetadata(makeDirStatus("/b1"))); destMetas.add(new PathMetadata(makeFileStatus("/b1/file1", 100))); destMetas.add(new PathMetadata(makeFileStatus("/b1/file2", 100))); - ms.move(srcPaths, destMetas, ttlTimeProvider); + ms.move(srcPaths, destMetas, ttlTimeProvider, null); // Assert src is no longer there dirMeta = ms.listChildren(strToPath("/a1")); @@ -634,7 +654,7 @@ public void testMultiBucketPaths() throws Exception { assertNull("Path2 should not be present yet.", meta); // Put p1, assert p2 doesn't match - ms.put(new PathMetadata(makeFileStatus(p1, 100))); + ms.put(new PathMetadata(makeFileStatus(p1, 100)), null); meta = ms.get(new Path(p2)); assertNull("Path 2 should not match path 1.", meta); @@ -653,7 +673,8 @@ public void testPruneFiles() throws Exception { createNewDirs("/pruneFiles"); long oldTime = getTime(); - ms.put(new PathMetadata(makeFileStatus("/pruneFiles/old", 1, oldTime))); + ms.put(new PathMetadata(makeFileStatus("/pruneFiles/old", 1, oldTime)), + null); DirListingMetadata ls2 = ms.listChildren(strToPath("/pruneFiles")); if (!allowMissing()) { assertListingsEqual(ls2.getListing(), "/pruneFiles/old"); @@ -664,7 +685,8 @@ public void testPruneFiles() throws Exception { Thread.sleep(1); long cutoff = System.currentTimeMillis(); long newTime = getTime(); - ms.put(new PathMetadata(makeFileStatus("/pruneFiles/new", 1, newTime))); + ms.put(new PathMetadata(makeFileStatus("/pruneFiles/new", 1, newTime)), + null); DirListingMetadata ls; ls = ms.listChildren(strToPath("/pruneFiles")); @@ -695,7 +717,7 @@ public void testPruneDirs() throws Exception { long oldTime = getTime(); ms.put(new PathMetadata(makeFileStatus("/pruneDirs/dir/file", - 1, oldTime))); + 1, oldTime)), null); // It's possible for the Local implementation to get from the old // modification time to here in under 1ms, causing it to not get pruned @@ -720,16 +742,18 @@ public void testPruneUnsetsAuthoritative() throws Exception { long time = System.currentTimeMillis(); ms.put(new PathMetadata( basicFileStatus(0, false, 0, time - 1, strToPath(staleFile)), - Tristate.FALSE, false)); + Tristate.FALSE, false), + null); ms.put(new PathMetadata( basicFileStatus(0, false, 0, time + 1, strToPath(freshFile)), - Tristate.FALSE, false)); + Tristate.FALSE, false), + null); // set parent dir as authoritative if (!allowMissing()) { DirListingMetadata parentDirMd = ms.listChildren(strToPath(parentDir)); parentDirMd.setAuthoritative(true); - ms.put(parentDirMd); + ms.put(parentDirMd, null); } ms.prune(MetadataStore.PruneMode.ALL_BY_MODTIME, time); @@ -757,16 +781,18 @@ public void testPrunePreservesAuthoritative() throws Exception { long time = System.currentTimeMillis(); ms.put(new PathMetadata( basicFileStatus(0, false, 0, time + 1, strToPath(staleFile)), - Tristate.FALSE, false)); + Tristate.FALSE, false), + null); ms.put(new PathMetadata( basicFileStatus(0, false, 0, time + 1, strToPath(freshFile)), - Tristate.FALSE, false)); + Tristate.FALSE, false), + null); if (!allowMissing()) { // set parent dir as authoritative DirListingMetadata parentDirMd = ms.listChildren(strToPath(parentDir)); parentDirMd.setAuthoritative(true); - ms.put(parentDirMd); + ms.put(parentDirMd, null); // prune the ms ms.prune(MetadataStore.PruneMode.ALL_BY_MODTIME, time); @@ -798,7 +824,7 @@ public void testPutDirListingMetadataPutsFileMetadata() } DirListingMetadata dirMeta = new DirListingMetadata(strToPath(dirPath), metas, authoritative); - ms.put(dirMeta); + ms.put(dirMeta, null); if (!allowMissing()) { assertDirectorySize(dirPath, filenames.length); @@ -818,7 +844,7 @@ public void testPutRetainsIsDeletedInParentListing() throws Exception { final S3AFileStatus fileStatus = basicFileStatus(path, 0, false); PathMetadata pm = new PathMetadata(fileStatus); pm.setIsDeleted(true); - ms.put(pm); + ms.put(pm, null); if(!allowMissing()) { final PathMetadata pathMetadata = ms.listChildren(path.getParent()).get(path); @@ -951,8 +977,8 @@ private void commonTestPutListStatus(final String parent) throws IOException { private void setupListStatus() throws IOException { createNewDirs("/a1", "/a2", "/a1/b1", "/a1/b2", "/a1/b1/c1", "/a1/b1/c1/d1"); - ms.put(new PathMetadata(makeFileStatus("/a1/b1/file1", 100))); - ms.put(new PathMetadata(makeFileStatus("/a1/b1/file2", 100))); + ms.put(new PathMetadata(makeFileStatus("/a1/b1/file1", 100)), null); + ms.put(new PathMetadata(makeFileStatus("/a1/b1/file2", 100)), null); } private void assertListingsEqual(Collection listing, @@ -966,10 +992,12 @@ private void assertListingsEqual(Collection listing, for (String ps : pathStrs) { b.add(strToPath(ps)); } - assertEquals("Same set of files", b, a); + Assertions.assertThat(a) + .as("Directory Listing") + .containsExactlyInAnyOrderElementsOf(b); } - private void putListStatusFiles(String dirPath, boolean authoritative, + protected void putListStatusFiles(String dirPath, boolean authoritative, String... filenames) throws IOException { ArrayList metas = new ArrayList<>(filenames .length); for (String filename : filenames) { @@ -977,13 +1005,13 @@ private void putListStatusFiles(String dirPath, boolean authoritative, } DirListingMetadata dirMeta = new DirListingMetadata(strToPath(dirPath), metas, authoritative); - ms.put(dirMeta); + ms.put(dirMeta, null); } - private void createNewDirs(String... dirs) + protected void createNewDirs(String... dirs) throws IOException { for (String pathStr : dirs) { - ms.put(new PathMetadata(makeDirStatus(pathStr))); + ms.put(new PathMetadata(makeDirStatus(pathStr)), null); } } @@ -995,8 +1023,9 @@ private void assertDirectorySize(String pathStr, int size) } if (!allowMissing() || dirMeta != null) { dirMeta = dirMeta.withoutTombstones(); - assertEquals("Number of entries in dir " + pathStr, size, - nonDeleted(dirMeta.getListing()).size()); + Assertions.assertThat(nonDeleted(dirMeta.getListing())) + .as("files in directory %s", pathStr) + .hasSize(size); } } @@ -1012,45 +1041,89 @@ private Collection nonDeleted( return currentStatuses; } - private void assertDeleted(String pathStr) throws IOException { + protected PathMetadata get(final String pathStr) throws IOException { Path path = strToPath(pathStr); - PathMetadata meta = ms.get(path); + return ms.get(path); + } + + protected PathMetadata getNonNull(final String pathStr) throws IOException { + return checkNotNull(get(pathStr), "No metastore entry for %s", pathStr); + } + + protected void assertDeleted(String pathStr) throws IOException { + PathMetadata meta = get(pathStr); boolean cached = meta != null && !meta.isDeleted(); - assertFalse(pathStr + " should not be cached.", cached); + assertFalse(pathStr + " should not be cached: " + meta, cached); } protected void assertCached(String pathStr) throws IOException { - Path path = strToPath(pathStr); - PathMetadata meta = ms.get(path); - boolean cached = meta != null && !meta.isDeleted(); - assertTrue(pathStr + " should be cached.", cached); + verifyCached(pathStr); + } + + /** + * Get an entry which must exist and not be deleted. + * @param pathStr path + * @return the entry + * @throws IOException IO failure. + */ + protected PathMetadata verifyCached(final String pathStr) throws IOException { + PathMetadata meta = getNonNull(pathStr); + assertFalse(pathStr + " was found but marked deleted: "+ meta, + meta.isDeleted()); + return meta; + } + + /** + * Get an entry which must be a file. + * @param pathStr path + * @return the entry + * @throws IOException IO failure. + */ + protected PathMetadata getFile(final String pathStr) throws IOException { + PathMetadata meta = verifyCached(pathStr); + assertFalse(pathStr + " is not a file: " + meta, + meta.getFileStatus().isDirectory()); + return meta; + } + + /** + * Get an entry which must be a directory. + * @param pathStr path + * @return the entry + * @throws IOException IO failure. + */ + protected PathMetadata getDirectory(final String pathStr) throws IOException { + PathMetadata meta = verifyCached(pathStr); + assertTrue(pathStr + " is not a directory: " + meta, + meta.getFileStatus().isDirectory()); + return meta; } /** * Convenience to create a fully qualified Path from string. */ - Path strToPath(String p) throws IOException { + protected Path strToPath(String p) throws IOException { final Path path = new Path(p); - assert path.isAbsolute(); + assertTrue("Non-absolute path: " + path, path.isAbsolute()); return path.makeQualified(contract.getFileSystem().getUri(), null); } - private void assertEmptyDirectory(String pathStr) throws IOException { + protected void assertEmptyDirectory(String pathStr) throws IOException { assertDirectorySize(pathStr, 0); } - private void assertEmptyDirs(String ...dirs) throws IOException { + protected void assertEmptyDirs(String...dirs) throws IOException { for (String pathStr : dirs) { assertEmptyDirectory(pathStr); } } - S3AFileStatus basicFileStatus(Path path, int size, boolean isDir) throws - IOException { + protected S3AFileStatus basicFileStatus(Path path, int size, boolean isDir) + throws IOException { return basicFileStatus(path, size, isDir, modTime); } - S3AFileStatus basicFileStatus(int size, boolean isDir, + protected S3AFileStatus basicFileStatus(int size, boolean isDir, long blockSize, long modificationTime, Path path) { if (isDir) { return new S3AFileStatus(Tristate.UNKNOWN, path, null); @@ -1066,33 +1139,33 @@ public static S3AFileStatus basicFileStatus(Path path, int size, return new S3AFileStatus(Tristate.UNKNOWN, path, OWNER); } else { return new S3AFileStatus(size, newModTime, path, BLOCK_SIZE, OWNER, - null, null); + "etag", "version"); } } - private S3AFileStatus makeFileStatus(String pathStr, int size) throws + protected S3AFileStatus makeFileStatus(String pathStr, int size) throws IOException { return makeFileStatus(pathStr, size, modTime); } - private S3AFileStatus makeFileStatus(String pathStr, int size, + protected S3AFileStatus makeFileStatus(String pathStr, int size, long newModTime) throws IOException { return basicFileStatus(strToPath(pathStr), size, false, newModTime); } - void verifyFileStatus(FileStatus status, long size) { + protected void verifyFileStatus(FileStatus status, long size) { S3ATestUtils.verifyFileStatus(status, size, BLOCK_SIZE, modTime); } - private S3AFileStatus makeDirStatus(String pathStr) throws IOException { + protected S3AFileStatus makeDirStatus(String pathStr) throws IOException { return basicFileStatus(strToPath(pathStr), 0, true, modTime); } /** * Verify the directory file status. Subclass may verify additional fields. */ - void verifyDirStatus(S3AFileStatus status) { + protected void verifyDirStatus(S3AFileStatus status) { assertTrue("Is a dir", status.isDirectory()); assertEquals("zero length", 0, status.getLen()); } @@ -1113,4 +1186,51 @@ protected static ITtlTimeProvider getTtlTimeProvider() { return ttlTimeProvider; } + /** + * Put a file to the shared DDB table. + * @param key key + * @param time timestamp. + * @param operationState ongoing state + * @return the entry + * @throws IOException IO failure + */ + protected PathMetadata putFile( + final String key, + final long time, + BulkOperationState operationState) throws IOException { + PathMetadata meta = new PathMetadata(makeFileStatus(key, 1, time)); + ms.put(meta, + operationState); + return meta; + } + + /** + * Put a tombstone to the shared DDB table. + * @param key key + * @param time timestamp. + * @param operationState ongoing state + * @return the entry + * @throws IOException IO failure + */ + protected PathMetadata putTombstone( + final String key, + final long time, + BulkOperationState operationState) throws IOException { + PathMetadata meta = tombstone(strToPath(key), time); + ms.put(meta, operationState); + return meta; + } + + /** + * Create a tombstone from the timestamp. + * @param path path to tombstone + * @param time timestamp. + * @return the entry. + */ + public static PathMetadata tombstone(Path path, long time) { + S3AFileStatus s3aStatus = new S3AFileStatus(0, + time, path, 0, null, + null, null); + return new PathMetadata(s3aStatus, Tristate.UNKNOWN, true); + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMiscOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMiscOperations.java index fc0f8940de0f6..a6d20fd966af2 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMiscOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMiscOperations.java @@ -26,7 +26,9 @@ import com.amazonaws.waiters.WaiterTimedOutException; import org.junit.Test; +import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.s3a.AWSClientIOException; +import org.apache.hadoop.fs.s3a.S3AFileStatus; import org.apache.hadoop.test.HadoopTestBase; import org.apache.hadoop.fs.Path; @@ -109,4 +111,71 @@ public void testInnerListChildrenDirectoryNpe() throws Exception { ddbms.getDirListingMetadataFromDirMetaAndList(p, metas, dirPathMeta)); } + @Test + public void testAncestorStateForDir() throws Throwable { + final DynamoDBMetadataStore.AncestorState ancestorState + = new DynamoDBMetadataStore.AncestorState( + BulkOperationState.OperationType.Rename, null); + + // path 1 is a directory + final Path path1 = new Path("s3a://bucket/1"); + final S3AFileStatus status1 = new S3AFileStatus(true, + path1, "hadoop"); + final DDBPathMetadata md1 = new DDBPathMetadata( + status1); + ancestorState.put(md1); + assertTrue("Status not found in ancestors", + ancestorState.contains(path1)); + final DDBPathMetadata result = ancestorState.get(path1); + assertEquals(status1, result.getFileStatus()); + assertTrue("Lookup failed", + ancestorState.findEntry(path1, true)); + final Path path2 = new Path("s3a://bucket/2"); + assertFalse("Lookup didn't fail", + ancestorState.findEntry(path2, true)); + assertFalse("Lookup didn't fail", + ancestorState.contains(path2)); + assertNull("Lookup didn't fail", + ancestorState.get(path2)); + } + + @Test + public void testAncestorStateForFile() throws Throwable { + final DynamoDBMetadataStore.AncestorState ancestorState + = new DynamoDBMetadataStore.AncestorState( + BulkOperationState.OperationType.Rename, null); + + // path 1 is a file + final Path path1 = new Path("s3a://bucket/1"); + final S3AFileStatus status1 = new S3AFileStatus( + 1024_1024_1024L, + 0, + path1, + 32_000_000, + "hadoop", + "e4", + "f5"); + final DDBPathMetadata md1 = new DDBPathMetadata( + status1); + ancestorState.put(md1); + assertTrue("Lookup failed", + ancestorState.findEntry(path1, false)); + intercept(PathIOException.class, + DynamoDBMetadataStore.E_INCONSISTENT_UPDATE, + () -> ancestorState.findEntry(path1, true)); + } + + @Test + public void testNoBulkRenameThroughInitiateBulkWrite() throws Throwable { + intercept(IllegalArgumentException.class, + () -> S3Guard.initiateBulkWrite(null, + BulkOperationState.OperationType.Rename, null)); + } + @Test + public void testInitiateBulkWrite() throws Throwable { + assertNull( + S3Guard.initiateBulkWrite(null, + BulkOperationState.OperationType.Put, null)); + } + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathOrderComparators.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathOrderComparators.java new file mode 100644 index 0000000000000..9a3db1ab21a5d --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathOrderComparators.java @@ -0,0 +1,197 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.s3guard; + +import java.util.Comparator; +import java.util.List; + +import org.junit.Test; + +import org.apache.hadoop.fs.Path; + +import static com.google.common.collect.Lists.newArrayList; +import static org.apache.hadoop.fs.s3a.s3guard.PathOrderComparators.TOPMOST_PATH_FIRST; +import static org.apache.hadoop.fs.s3a.s3guard.PathOrderComparators.TOPMOST_PATH_LAST; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertEquals; + +/** + * Test ordering of paths with the comparator matches requirements. + */ +public class TestPathOrderComparators { + + private static final Path ROOT = new Path("s3a://bucket/"); + + public static final Path DIR_A = new Path(ROOT, "dirA"); + + public static final Path DIR_B = new Path(ROOT, "dirB"); + + public static final Path DIR_A_FILE_1 = new Path(DIR_A, "file1"); + + public static final Path DIR_A_FILE_2 = new Path(DIR_A, "file2"); + + public static final Path DIR_B_FILE_3 = new Path(DIR_B, "file3"); + + public static final Path DIR_B_FILE_4 = new Path(DIR_B, "file4"); + + @Test + public void testRootEqual() throws Throwable { + assertComparesEqual(ROOT, ROOT); + } + + @Test + public void testRootFirst() throws Throwable { + assertComparesTopmost(ROOT, DIR_A_FILE_1); + } + + @Test + public void testDirOrdering() throws Throwable { + assertComparesTopmost(DIR_A, DIR_B); + } + + @Test + public void testFilesEqual() throws Throwable { + assertComparesEqual(DIR_A_FILE_1, DIR_A_FILE_1); + } + + @Test + public void testFilesInSameDir() throws Throwable { + assertComparesTopmost(ROOT, DIR_A_FILE_1); + assertComparesTopmost(DIR_A, DIR_A_FILE_1); + assertComparesTopmost(DIR_A, DIR_A_FILE_2); + assertComparesTopmost(DIR_A_FILE_1, DIR_A_FILE_2); + } + + @Test + public void testReversedFiles() throws Throwable { + assertReverseOrder(DIR_A_FILE_1, ROOT); + assertReverseOrder(DIR_A_FILE_1, DIR_A); + assertReverseOrder(DIR_A_FILE_2, DIR_A); + assertReverseOrder(DIR_A_FILE_2, DIR_A_FILE_1); + } + + @Test + public void testFilesAndDifferentShallowDir() throws Throwable { + assertComparesTopmost(DIR_B, DIR_A_FILE_1); + assertComparesTopmost(DIR_A, DIR_B_FILE_3); + } + + @Test + public void testOrderRoot() throws Throwable { + verifySorted(ROOT); + } + + @Test + public void testOrderRootDirs() throws Throwable { + verifySorted(ROOT, DIR_A, DIR_B); + } + + @Test + public void testOrderRootDirsAndFiles() throws Throwable { + verifySorted(ROOT, DIR_A, DIR_B, DIR_A_FILE_1, DIR_A_FILE_2); + } + + @Test + public void testOrderRootDirsAndAllFiles() throws Throwable { + verifySorted(ROOT, DIR_A, DIR_B, + DIR_A_FILE_1, DIR_A_FILE_2, + DIR_B_FILE_3, DIR_B_FILE_4); + } + + @Test + public void testSortOrderConstant() throws Throwable { + List sort1 = verifySorted(ROOT, DIR_A, DIR_B, + DIR_A_FILE_1, DIR_A_FILE_2, + DIR_B_FILE_3, DIR_B_FILE_4); + List sort2 = newArrayList(sort1); + assertSortsTo(sort2, sort1, true); + } + + @Test + public void testSortReverse() throws Throwable { + List sort1 = newArrayList( + ROOT, + DIR_A, + DIR_B, + DIR_A_FILE_1, + DIR_A_FILE_2, + DIR_B_FILE_3, + DIR_B_FILE_4); + List expected = newArrayList( + DIR_B_FILE_4, + DIR_B_FILE_3, + DIR_A_FILE_2, + DIR_A_FILE_1, + DIR_B, + DIR_A, + ROOT); + assertSortsTo(expected, sort1, false); + } + + + private List verifySorted(Path... paths) { + List original = newArrayList(paths); + List sorted = newArrayList(paths); + assertSortsTo(original, sorted, true); + return sorted; + } + + private void assertSortsTo( + final List original, + final List sorted, + boolean topmost) { + sorted.sort(topmost ? TOPMOST_PATH_FIRST : TOPMOST_PATH_LAST); + assertThat(sorted) + .as("Sorted paths") + .containsExactlyElementsOf(original); + } + + private void assertComparesEqual(Path l, Path r) { + assertOrder(0, l, r); + } + + private void assertComparesTopmost(Path l, Path r) { + assertOrder(-1, l, r); + assertOrder(1, r, l); + } + + private void assertReverseOrder(Path l, Path r) { + assertComparesTo(-1, TOPMOST_PATH_LAST, l, r); + assertComparesTo(1, TOPMOST_PATH_LAST, r, l); + } + + private void assertOrder(int res, + Path l, Path r) { + assertComparesTo(res, TOPMOST_PATH_FIRST, l, r); + } + + private void assertComparesTo(final int expected, + final Comparator comparator, + final Path l, final Path r) { + int actual = comparator.compare(l, r); + if (actual < -1) { + actual = -1; + } + if (actual > 1) { + actual = 1; + } + assertEquals("Comparing " + l + " to " + r, + expected, actual); + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestS3Guard.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestS3Guard.java index bdb256cba3dea..bb5557e597308 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestS3Guard.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestS3Guard.java @@ -91,12 +91,12 @@ public void testPutWithTtlDirListingMeta() throws Exception { when(timeProvider.getNow()).thenReturn(100L); // act - S3Guard.putWithTtl(ms, dlm, timeProvider); + S3Guard.putWithTtl(ms, dlm, timeProvider, null); // assert assertEquals("last update in " + dlm, 100L, dlm.getLastUpdated()); verify(timeProvider, times(1)).getNow(); - verify(ms, times(1)).put(dlm); + verify(ms, times(1)).put(dlm, null); } @Test @@ -111,12 +111,12 @@ public void testPutWithTtlFileMeta() throws Exception { when(timeProvider.getNow()).thenReturn(100L); // act - S3Guard.putWithTtl(ms, pm, timeProvider); + S3Guard.putWithTtl(ms, pm, timeProvider, null); // assert assertEquals("last update in " + pm, 100L, pm.getLastUpdated()); verify(timeProvider, times(1)).getNow(); - verify(ms, times(1)).put(pm); + verify(ms, times(1)).put(pm, null); } @Test @@ -134,14 +134,14 @@ public void testPutWithTtlCollection() throws Exception { when(timeProvider.getNow()).thenReturn(100L); // act - S3Guard.putWithTtl(ms, pmCollection, timeProvider); + S3Guard.putWithTtl(ms, pmCollection, timeProvider, null); // assert pmCollection.forEach( pm -> assertEquals(100L, pm.getLastUpdated()) ); verify(timeProvider, times(1)).getNow(); - verify(ms, times(1)).put(pmCollection); + verify(ms, times(1)).put(pmCollection, null); } @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ThrottleTracker.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ThrottleTracker.java new file mode 100644 index 0000000000000..5e33be8367de4 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ThrottleTracker.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.s3guard; + +import org.junit.Assert; + +/** + * Something to track throttles in DynamoDB metastores. + * The constructor sets the counters to the current count in the + * DDB table; a call to {@link #reset()} will set it to the latest values. + * The {@link #probe()} will pick up the latest values to compare them with + * the original counts. + *

    + * The toString value logs the state. + *

    + * This class was originally part of ITestDynamoDBMetadataStoreScale; + * it was converted to a toplevel class for broader use. + */ +class ThrottleTracker { + + private final DynamoDBMetadataStore ddbms; + + private long writeThrottleEventOrig = 0; + + private long readThrottleEventOrig = 0; + + private long batchWriteThrottleCountOrig = 0; + + private long readThrottles; + + private long writeThrottles; + + private long batchThrottles; + + ThrottleTracker(final DynamoDBMetadataStore ddbms) { + this.ddbms = ddbms; + reset(); + } + + /** + * Reset the counters. + */ + public synchronized void reset() { + writeThrottleEventOrig + = ddbms.getWriteThrottleEventCount(); + + readThrottleEventOrig + = ddbms.getReadThrottleEventCount(); + + batchWriteThrottleCountOrig + = ddbms.getBatchWriteCapacityExceededCount(); + } + + /** + * Update the latest throttle count; synchronized. + * @return true if throttling has been detected. + */ + public synchronized boolean probe() { + setReadThrottles( + ddbms.getReadThrottleEventCount() - readThrottleEventOrig); + setWriteThrottles(ddbms.getWriteThrottleEventCount() + - writeThrottleEventOrig); + setBatchThrottles(ddbms.getBatchWriteCapacityExceededCount() + - batchWriteThrottleCountOrig); + return isThrottlingDetected(); + } + + @Override + public String toString() { + return String.format( + "Tracker with read throttle events = %d;" + + " write events = %d;" + + " batch throttles = %d", + getReadThrottles(), getWriteThrottles(), getBatchThrottles()); + } + + /** + * Assert that throttling has been detected. + */ + public void assertThrottlingDetected() { + Assert.assertTrue("No throttling detected in " + this + + " against " + ddbms.toString(), + isThrottlingDetected()); + } + + /** + * Has there been any throttling on an operation? + * @return true iff read, write or batch operations were throttled. + */ + public boolean isThrottlingDetected() { + return getReadThrottles() > 0 || getWriteThrottles() + > 0 || getBatchThrottles() > 0; + } + + public long getReadThrottles() { + return readThrottles; + } + + public void setReadThrottles(long readThrottles) { + this.readThrottles = readThrottles; + } + + public long getWriteThrottles() { + return writeThrottles; + } + + public void setWriteThrottles(long writeThrottles) { + this.writeThrottles = writeThrottles; + } + + public long getBatchThrottles() { + return batchThrottles; + } + + public void setBatchThrottles(long batchThrottles) { + this.batchThrottles = batchThrottles; + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractITestS3AMetadataStoreScale.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractITestS3AMetadataStoreScale.java index 1bffc3b1b72fc..f1771a315047d 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractITestS3AMetadataStoreScale.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractITestS3AMetadataStoreScale.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.S3AFileStatus; import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; import org.apache.hadoop.fs.s3a.s3guard.PathMetadata; import org.apache.hadoop.fs.s3a.s3guard.S3Guard; @@ -73,6 +74,10 @@ public void initialize() { */ public abstract MetadataStore createMetadataStore() throws IOException; + protected ITtlTimeProvider getTtlTimeProvider() { + return ttlTimeProvider; + } + @Test public void test_010_Put() throws Throwable { describe("Test workload of put() operations"); @@ -139,13 +144,15 @@ public void test_020_Moves() throws Throwable { toDelete = movedPaths; toCreate = origMetas; } - ms.move(toDelete, toCreate, ttlTimeProvider); + ms.move(toDelete, toCreate, ttlTimeProvider, null); } moveTimer.end(); printTiming(LOG, "move", moveTimer, operations); } finally { // Cleanup clearMetadataStore(ms, count); + ms.move(origPaths, null, ttlTimeProvider, null); + ms.move(movedPaths, null, ttlTimeProvider, null); } } } @@ -191,9 +198,13 @@ private long populateMetadataStore(Collection paths, long count = 0; NanoTimer putTimer = new NanoTimer(); describe("Inserting into MetadataStore"); - for (PathMetadata p : paths) { - ms.put(p); - count++; + try (BulkOperationState operationState = + ms.initiateBulkWrite(BulkOperationState.OperationType.Put, + BUCKET_ROOT)) { + for (PathMetadata p : paths) { + ms.put(p, operationState); + count++; + } } putTimer.end(); printTiming(LOG, "put", putTimer, count); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/ExtraAssertions.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/ExtraAssertions.java new file mode 100644 index 0000000000000..2b93d72face4e --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/ExtraAssertions.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.util.DurationInfo; + +import static org.apache.hadoop.fs.s3a.S3AUtils.applyLocatedFiles; +import static org.junit.Assert.assertTrue; + +/** + * Some extra assertions for tests. + */ +@InterfaceAudience.Private +public final class ExtraAssertions { + + private static final Logger LOG = LoggerFactory.getLogger( + ExtraAssertions.class); + + private ExtraAssertions() { + } + + /** + * Assert that the number of files in a destination matches that expected. + * @param message text to use in the message + * @param fs filesystem + * @param path path to list (recursively) + * @param expected expected count + * @throws IOException IO problem + */ + public static void assertFileCount(final String message, + final FileSystem fs, + final Path path, + final long expected) + throws IOException { + List files = new ArrayList<>(); + try (DurationInfo ignored = new DurationInfo(LOG, false, + "Counting files in %s", path)) { + applyLocatedFiles(fs.listFiles(path, true), + (status) -> files.add(status.getPath().toString())); + } + long actual = files.size(); + if (actual != expected) { + String ls = files.stream().collect(Collectors.joining("\n")); + Assert.fail(message + ": expected " + expected + " files in " + path + + " but got " + actual + "\n" + ls); + } + } + + /** + * Assert that a string contains a piece of text. + * @param text text to can. + * @param contained text to look for. + */ + public static void assertTextContains(String text, String contained) { + assertTrue("string \"" + contained + "\" not found in \"" + text + "\"", + text != null && text.contains(contained)); + } + + /** + * If the condition is met, throw an AssertionError with the message + * and any nested exception. + * @param condition condition + * @param message text to use in the exception + * @param cause a (possibly null) throwable to init the cause with + * @throws AssertionError with the text and throwable if condition == true. + */ + public static void failIf(boolean condition, + String message, + Throwable cause) { + if (condition) { + ContractTestUtils.fail(message, cause); + } + } + + /** + * If the condition is met, throw an AssertionError with the message + * and any nested exception. + * @param condition condition + * @param message text to use in the exception + * @param cause a (possibly null) throwable to init the cause with + * @throws AssertionError with the text and throwable if condition == true. + */ + public static void failUnless(boolean condition, + String message, + Throwable cause) { + failIf(!condition, message, cause); + } + + /** + * Extract the inner cause of an exception. + * @param expected expected class of the cause + * @param thrown thrown exception. + * @param type of the cause + * @return the extracted exception. + * @throws AssertionError with the text and throwable if the cause is not as + * expected + */ + public static T extractCause(Class expected, + Throwable thrown) { + Throwable cause = thrown.getCause(); + failIf(cause == null, + "No inner cause", + thrown); + failUnless(cause.getClass().equals(expected), + "Inner cause is of wrong type : expected " + expected, + thrown); + return (T)cause; + } +} From 9c4b15d2f4ef6eb600a3f2b36902f232536d7120 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Elek?= Date: Thu, 20 Jun 2019 16:33:59 +0200 Subject: [PATCH 0243/1308] HDDS-1508. Provide example k8s deployment files for the new CSI server Closes #905 --- .../definitions/ozone-csi/csi-controller.yaml | 53 ++++++++++ .../k8s/definitions/ozone-csi/csi-crd.yaml | 21 ++++ .../k8s/definitions/ozone-csi/csi-node.yaml | 95 ++++++++++++++++++ .../k8s/definitions/ozone-csi/csi-rbac.yaml | 66 +++++++++++++ .../ozone-csi/csi-storageclass.yaml | 20 ++++ .../ozone-csi/definitions/csi.yaml | 28 ++++++ .../src/main/k8s/definitions/ozone/om-ss.yaml | 2 +- .../main/k8s/definitions/ozone/scm-ss.yaml | 2 +- .../k8s/definitions/pv-test/flekszible.yaml | 16 +++ .../pv-test/nginx-conf-configmap.yaml | 37 +++++++ .../definitions/pv-test/nginx-deployment.yaml | 49 ++++++++++ .../pv-test/nginx-service-service.yaml | 28 ++++++ .../nginx-storage-persistentvolumeclaim.yaml | 28 ++++++ .../main/k8s/examples/ozone-csi/Flekszible | 25 +++++ .../k8s/examples/ozone-csi/LICENSE.header | 15 +++ .../examples/ozone-csi/config-configmap.yaml | 37 +++++++ .../ozone-csi/csi-node-daemonset.yaml | 97 ++++++++++++++++++ .../ozone-csi/csi-ozone-clusterrole.yaml | 98 +++++++++++++++++++ .../csi-ozone-clusterrolebinding.yaml | 28 ++++++ .../ozone-csi/csi-ozone-serviceaccount.yaml | 21 ++++ .../ozone-csi/csi-provisioner-deployment.yaml | 54 ++++++++++ .../ozone-csi/datanode-daemonset.yaml | 56 +++++++++++ .../k8s/examples/ozone-csi/om-service.yaml | 28 ++++++ .../examples/ozone-csi/om-statefulset.yaml | 73 ++++++++++++++ .../org.apache.hadoop.ozone-csidriver.yaml | 22 +++++ .../ozone-csi/ozone-storageclass.yaml | 21 ++++ .../pv-test/nginx-conf-configmap.yaml | 38 +++++++ .../ozone-csi/pv-test/nginx-deployment.yaml | 50 ++++++++++ .../pv-test/nginx-service-service.yaml | 29 ++++++ .../nginx-storage-persistentvolumeclaim.yaml | 29 ++++++ .../k8s/examples/ozone-csi/s3g-service.yaml | 28 ++++++ .../examples/ozone-csi/s3g-statefulset.yaml | 51 ++++++++++ .../k8s/examples/ozone-csi/scm-service.yaml | 28 ++++++ .../examples/ozone-csi/scm-statefulset.yaml | 67 +++++++++++++ 34 files changed, 1338 insertions(+), 2 deletions(-) create mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-crd.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-node.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-rbac.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-storageclass.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/definitions/csi.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-conf-configmap.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-deployment.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-service-service.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-storage-persistentvolumeclaim.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/Flekszible create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/LICENSE.header create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/config-configmap.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-node-daemonset.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-ozone-clusterrole.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-ozone-clusterrolebinding.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-ozone-serviceaccount.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-provisioner-deployment.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/datanode-daemonset.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/om-service.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/om-statefulset.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/org.apache.hadoop.ozone-csidriver.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/ozone-storageclass.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-conf-configmap.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-deployment.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-service-service.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-storage-persistentvolumeclaim.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/s3g-service.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/s3g-statefulset.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/scm-service.yaml create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/scm-statefulset.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml new file mode 100644 index 0000000000000..e7c2222601010 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml @@ -0,0 +1,53 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: csi-provisioner + template: + metadata: + labels: + app: csi-provisioner + spec: + serviceAccount: csi-ozone + containers: + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v1.0.1 + args: + - "--csi-address=/var/lib/csi/csi.sock" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/ + - name: ozone-csi + image: "@docker.image@" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/ + imagePullPolicy: Always + envFrom: + - configMapRef: + name: config + args: + - ozone + - csi + volumes: + - name: socket-dir + emptyDir: diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-crd.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-crd.yaml new file mode 100644 index 0000000000000..f0ca37c30aa79 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-crd.yaml @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: storage.k8s.io/v1beta1 +kind: CSIDriver +metadata: + name: org.apache.hadoop.ozone +spec: + attachRequired: false diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-node.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-node.yaml new file mode 100644 index 0000000000000..6c3a1ac36b7b8 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-node.yaml @@ -0,0 +1,95 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +kind: DaemonSet +apiVersion: apps/v1beta2 +metadata: + name: csi-node +spec: + selector: + matchLabels: + app: csi-node + template: + metadata: + labels: + app: csi-node + spec: + serviceAccount: csi-ozone + containers: + - name: driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2 + args: + - "--v=4" + - "--csi-address=/var/lib/csi/csi.sock" + - "--kubelet-registration-path=/var/lib/kubelet/plugins/org.apache.hadoop.ozone/csi.sock" + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /var/lib/csi + - name: registration-dir + mountPath: /registration/ + - name: csi-node + image: "@docker.image@" + securityContext: + runAsUser: 0 + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + args: + - ozone + - csi + envFrom: + - configMapRef: + name: config + imagePullPolicy: "Always" + volumeMounts: + - name: plugin-dir + mountPath: /var/lib/csi + - name: pods-mount-dir + mountPath: /var/lib/kubelet/pods + mountPropagation: "Bidirectional" + - name: fuse-device + mountPath: /dev/fuse + - name: dbus + mountPath: /var/run/dbus + - name: systemd + mountPath: /run/systemd + volumes: + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/org.apache.hadoop.ozone + type: DirectoryOrCreate + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet/pods + type: Directory + - name: fuse-device + hostPath: + path: /dev/fuse + - name: dbus + hostPath: + path: /var/run/dbus + - name: systemd + hostPath: + path: /run/systemd diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-rbac.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-rbac.yaml new file mode 100644 index 0000000000000..d83ffb3e1f1ef --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-rbac.yaml @@ -0,0 +1,66 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: default + name: csi-ozone +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-ozone +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "update","watch"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "create"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-ozone +subjects: + - kind: ServiceAccount + name: csi-ozone + namespace: default +roleRef: + kind: ClusterRole + name: csi-ozone + apiGroup: rbac.authorization.k8s.io diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-storageclass.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-storageclass.yaml new file mode 100644 index 0000000000000..9780160550962 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-storageclass.yaml @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: ozone +provisioner: org.apache.hadoop.ozone diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/definitions/csi.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/definitions/csi.yaml new file mode 100644 index 0000000000000..14c2ea30affaf --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/definitions/csi.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name: ozone/csi +description: Configuration for CSI interface +--- +- type: Add + trigger: + metadata: + name: config + path: + - data + value: + OZONE-SITE.XML_ozone.csi.s3g.address: http://s3g-0.s3g:9878 + OZONE-SITE.XML_ozone.csi.socket: /var/lib/csi/csi.sock + OZONE-SITE.XML_ozone.csi.owner: hadoop diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml index 820d5622811a9..9f9d87df7ddf3 100644 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml @@ -38,7 +38,7 @@ spec: spec: initContainers: - name: init - image: elek/ozone + image: "@docker.image@" args: ["ozone","om","--init"] env: - name: WAITFOR diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml index a87111ca85d8e..9329a5ffe1aa0 100644 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml @@ -42,5 +42,5 @@ spec: args: ["ozone","scm", "--init"] containers: - name: scm - image: elek/ozone + image: "@docker.image@" args: ["ozone","scm"] diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml new file mode 100644 index 0000000000000..bfe82ff63365d --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +description: Nginx example deployment with persistent volume claim. diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-conf-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-conf-configmap.yaml new file mode 100644 index 0000000000000..1fd8941bc77ad --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-conf-configmap.yaml @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-conf + labels: {} + annotations: {} +data: + default.conf: |- + server { + listen 80; + server_name localhost; + + location / { + root /usr/share/nginx/html; + index index.html index.htm; + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + } diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-deployment.yaml new file mode 100644 index 0000000000000..b6cafc5b5ee9b --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-deployment.yaml @@ -0,0 +1,49 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + labels: + app: nginx + annotations: {} +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + annotations: {} + spec: + containers: + - name: nginx + image: nginx + volumeMounts: + - mountPath: /var/lib/www/html + name: webroot + env: [] + envFrom: [] + volumes: + - name: webroot + persistentVolumeClaim: + claimName: nginx-storage + readOnly: false + - name: config + configMap: + name: nginx-conf diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-service-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-service-service.yaml new file mode 100644 index 0000000000000..3a338bac7ff2a --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-service-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: Service +metadata: + name: nginx-service + labels: {} + annotations: {} +spec: + type: NodePort + ports: + - port: 80 + name: web + selector: + app: csi-s3-test-nginx diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-storage-persistentvolumeclaim.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-storage-persistentvolumeclaim.yaml new file mode 100644 index 0000000000000..ec0d76b2da023 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/nginx-storage-persistentvolumeclaim.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nginx-storage + labels: {} + annotations: {} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: ozone diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/Flekszible new file mode 100644 index 0000000000000..7e4f3f192753f --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/Flekszible @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source: + - path: ../../definitions +import: + - path: ozone-csi + - path: ozone + transformations: + - type: ozone/csi + - path: pv-test + destination: pv-test diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/LICENSE.header b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/LICENSE.header new file mode 100644 index 0000000000000..635f0d9e60e16 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/LICENSE.header @@ -0,0 +1,15 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/config-configmap.yaml new file mode 100644 index 0000000000000..e554145bac699 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/config-configmap.yaml @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config +data: + OZONE-SITE.XML_hdds.datanode.dir: /data/storage + OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data + OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata + OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm + OZONE-SITE.XML_ozone.om.address: om-0.om + OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm + OZONE-SITE.XML_ozone.scm.names: scm-0.scm + OZONE-SITE.XML_ozone.enabled: "true" + LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout + LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender + LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout + LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern: '%d{yyyy-MM-dd + HH:mm:ss} %-5p %c{1}:%L - %m%n' + OZONE-SITE.XML_ozone.csi.s3g.address: http://s3g-0.s3g:9878 + OZONE-SITE.XML_ozone.csi.socket: /var/lib/csi/csi.sock + OZONE-SITE.XML_ozone.csi.owner: hadoop diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-node-daemonset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-node-daemonset.yaml new file mode 100644 index 0000000000000..fe4453232d20e --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-node-daemonset.yaml @@ -0,0 +1,97 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: DaemonSet +apiVersion: apps/v1beta2 +metadata: + name: csi-node +spec: + selector: + matchLabels: + app: csi-node + template: + metadata: + labels: + app: csi-node + spec: + serviceAccount: csi-ozone + containers: + - name: driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2 + args: + - --v=4 + - --csi-address=/var/lib/csi/csi.sock + - --kubelet-registration-path=/var/lib/kubelet/plugins/org.apache.hadoop.ozone/csi.sock + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /var/lib/csi + - name: registration-dir + mountPath: /registration/ + - name: csi-node + image: '@docker.image@' + securityContext: + runAsUser: 0 + privileged: true + capabilities: + add: + - SYS_ADMIN + allowPrivilegeEscalation: true + args: + - ozone + - csi + envFrom: + - configMapRef: + name: config + imagePullPolicy: Always + volumeMounts: + - name: plugin-dir + mountPath: /var/lib/csi + - name: pods-mount-dir + mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + - name: fuse-device + mountPath: /dev/fuse + - name: dbus + mountPath: /var/run/dbus + - name: systemd + mountPath: /run/systemd + volumes: + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/org.apache.hadoop.ozone + type: DirectoryOrCreate + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet/pods + type: Directory + - name: fuse-device + hostPath: + path: /dev/fuse + - name: dbus + hostPath: + path: /var/run/dbus + - name: systemd + hostPath: + path: /run/systemd diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-ozone-clusterrole.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-ozone-clusterrole.yaml new file mode 100644 index 0000000000000..efcd51f0724b8 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-ozone-clusterrole.yaml @@ -0,0 +1,98 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-ozone +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - create +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update +- apiGroups: + - storage.k8s.io + resources: + - csinodes + verbs: + - get + - list + - watch diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-ozone-clusterrolebinding.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-ozone-clusterrolebinding.yaml new file mode 100644 index 0000000000000..2f2537ca740e2 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-ozone-clusterrolebinding.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-ozone +subjects: +- kind: ServiceAccount + name: csi-ozone + namespace: default +roleRef: + kind: ClusterRole + name: csi-ozone + apiGroup: rbac.authorization.k8s.io diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-ozone-serviceaccount.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-ozone-serviceaccount.yaml new file mode 100644 index 0000000000000..628d2a1c5957e --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-ozone-serviceaccount.yaml @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: default + name: csi-ozone diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-provisioner-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-provisioner-deployment.yaml new file mode 100644 index 0000000000000..03478ffeee27c --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/csi-provisioner-deployment.yaml @@ -0,0 +1,54 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: csi-provisioner + template: + metadata: + labels: + app: csi-provisioner + spec: + serviceAccount: csi-ozone + containers: + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v1.0.1 + args: + - --csi-address=/var/lib/csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/ + - name: ozone-csi + image: '@docker.image@' + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/ + imagePullPolicy: Always + envFrom: + - configMapRef: + name: config + args: + - ozone + - csi + volumes: + - name: socket-dir + emptyDir: null diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/datanode-daemonset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/datanode-daemonset.yaml new file mode 100644 index 0000000000000..6e3b3b96cf09a --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/datanode-daemonset.yaml @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: datanode + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: datanode + template: + metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9882" + prometheus.io/path: /prom + labels: + app: ozone + component: datanode + spec: + containers: + - name: datanode + image: '@docker.image@' + args: + - ozone + - datanode + ports: + - containerPort: 9870 + name: rpc + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + initContainers: [] + volumes: + - name: data + emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/om-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/om-service.yaml new file mode 100644 index 0000000000000..617277d9b8503 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/om-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: om +spec: + ports: + - port: 9874 + name: ui + clusterIP: None + selector: + app: ozone + component: om diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/om-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/om-statefulset.yaml new file mode 100644 index 0000000000000..884dd46241768 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/om-statefulset.yaml @@ -0,0 +1,73 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: om + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: om + serviceName: om + replicas: 1 + template: + metadata: + labels: + app: ozone + component: om + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9874" + prometheus.io/path: /prom + spec: + initContainers: + - name: init + image: '@docker.image@' + args: + - ozone + - om + - --init + env: + - name: WAITFOR + value: scm-0.scm:9876 + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + containers: + - name: om + image: '@docker.image@' + args: + - ozone + - om + env: + - name: WAITFOR + value: scm-0.scm:9876 + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + volumes: + - name: data + emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/org.apache.hadoop.ozone-csidriver.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/org.apache.hadoop.ozone-csidriver.yaml new file mode 100644 index 0000000000000..e657c50f75837 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/org.apache.hadoop.ozone-csidriver.yaml @@ -0,0 +1,22 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: storage.k8s.io/v1beta1 +kind: CSIDriver +metadata: + name: org.apache.hadoop.ozone +spec: + attachRequired: false diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/ozone-storageclass.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/ozone-storageclass.yaml new file mode 100644 index 0000000000000..c6c1c6c9d1e14 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/ozone-storageclass.yaml @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: ozone +provisioner: org.apache.hadoop.ozone diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-conf-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-conf-configmap.yaml new file mode 100644 index 0000000000000..2f1813c5534aa --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-conf-configmap.yaml @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-conf + labels: {} + annotations: {} +data: + default.conf: |- + server { + listen 80; + server_name localhost; + + location / { + root /usr/share/nginx/html; + index index.html index.htm; + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + } diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-deployment.yaml new file mode 100644 index 0000000000000..db6eb107edb53 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-deployment.yaml @@ -0,0 +1,50 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + labels: + app: nginx + annotations: {} +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + annotations: {} + spec: + containers: + - name: nginx + image: nginx + volumeMounts: + - mountPath: /var/lib/www/html + name: webroot + env: [] + envFrom: [] + volumes: + - name: webroot + persistentVolumeClaim: + claimName: nginx-storage + readOnly: false + - name: config + configMap: + name: nginx-conf diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-service-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-service-service.yaml new file mode 100644 index 0000000000000..b31a51068904a --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-service-service.yaml @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: nginx-service + labels: {} + annotations: {} +spec: + type: NodePort + ports: + - port: 80 + name: web + selector: + app: csi-s3-test-nginx diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-storage-persistentvolumeclaim.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-storage-persistentvolumeclaim.yaml new file mode 100644 index 0000000000000..5968b926a9f22 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/pv-test/nginx-storage-persistentvolumeclaim.yaml @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nginx-storage + labels: {} + annotations: {} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: ozone diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/s3g-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/s3g-service.yaml new file mode 100644 index 0000000000000..dd1ca83479919 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/s3g-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: s3g +spec: + ports: + - port: 9878 + name: rest + clusterIP: None + selector: + app: ozone + component: s3g diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/s3g-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/s3g-statefulset.yaml new file mode 100644 index 0000000000000..b608cde832ef2 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/s3g-statefulset.yaml @@ -0,0 +1,51 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: s3g + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: s3g + serviceName: s3g + replicas: 1 + template: + metadata: + labels: + app: ozone + component: s3g + spec: + containers: + - name: s3g + image: '@docker.image@' + args: + - ozone + - s3g + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + initContainers: [] + volumes: + - name: data + emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/scm-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/scm-service.yaml new file mode 100644 index 0000000000000..0df15d6453159 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/scm-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: scm +spec: + ports: + - port: 9876 + name: ui + clusterIP: None + selector: + app: ozone + component: scm diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/scm-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/scm-statefulset.yaml new file mode 100644 index 0000000000000..55b56722d105b --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-csi/scm-statefulset.yaml @@ -0,0 +1,67 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: scm + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: scm + serviceName: scm + replicas: 1 + template: + metadata: + labels: + app: ozone + component: scm + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9876" + prometheus.io/path: /prom + spec: + initContainers: + - name: init + image: '@docker.image@' + args: + - ozone + - scm + - --init + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + containers: + - name: scm + image: '@docker.image@' + args: + - ozone + - scm + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + volumes: + - name: data + emptyDir: {} From 5962a518bd141e0e187342af89f6cab6011e75fb Mon Sep 17 00:00:00 2001 From: Surendra Singh Lilhore Date: Thu, 20 Jun 2019 09:38:51 -0700 Subject: [PATCH 0244/1308] HDFS-14581. Appending to EC files crashes NameNode. Contributed by Surendra Singh Lilhore. Signed-off-by: Wei-Chiu Chuang --- .../hdfs/server/namenode/FSDirAppendOp.java | 11 +++--- .../hadoop/hdfs/TestStripedFileAppend.java | 35 +++++++++++++++++++ 2 files changed, 42 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java index 6b9fd8bfb0d12..919e853f4590f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.List; +import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.permission.FsAction; @@ -107,6 +108,12 @@ static LastBlockWithStatus appendFile(final FSNamesystem fsn, } final INodeFile file = INodeFile.valueOf(inode, path, true); + if (file.isStriped() && !newBlock) { + throw new UnsupportedOperationException( + "Append on EC file without new block is not supported. Use " + + CreateFlag.NEW_BLOCK + " create flag while appending file."); + } + BlockManager blockManager = fsd.getBlockManager(); final BlockStoragePolicy lpPolicy = blockManager .getStoragePolicy("LAZY_PERSIST"); @@ -186,10 +193,6 @@ static LocatedBlock prepareFileForAppend(final FSNamesystem fsn, LocatedBlock ret = null; if (!newBlock) { - if (file.isStriped()) { - throw new UnsupportedOperationException( - "Append on EC file without new block is not supported."); - } FSDirectory fsd = fsn.getFSDirectory(); ret = fsd.getBlockManager().convertLastBlockToUnderConstruction(file, 0); if (ret != null && delta != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java index b4cf102c7c059..29ac394363772 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java @@ -22,18 +22,26 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.OpenFileEntry; +import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType; import org.apache.log4j.Level; import org.junit.After; import org.junit.Before; import org.junit.Test; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; +import java.util.List; import java.util.Random; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * Tests append on erasure coded file. @@ -111,4 +119,31 @@ public void testAppendToNewBlock() throws IOException { StripedFileTestUtil.getDefaultECPolicy(), totalSplit); } + @Test + public void testAppendWithoutNewBlock() throws IOException { + Path file = new Path(dir, "testAppendWithoutNewBlock"); + + // Create file + FSDataOutputStream out = dfs.create(file); + out.write("testAppendWithoutNewBlock".getBytes()); + out.close(); + + // Append file + try { + out = dfs.append(file, EnumSet.of(CreateFlag.APPEND), 4096, null); + out.write("testAppendWithoutNewBlock".getBytes()); + fail("Should throw unsupported operation"); + } catch (Exception e) { + assertTrue(e.getMessage() + .contains("Append on EC file without new block is not supported")); + } + + List types = new ArrayList<>(); + types.add(OpenFilesType.ALL_OPEN_FILES); + + RemoteIterator listOpenFiles = dfs + .listOpenFiles(EnumSet.copyOf(types), file.toString()); + assertFalse("No file should be open after append failure", + listOpenFiles.hasNext()); + } } \ No newline at end of file From d9a9e9913ef4c7a36775c1b5bdb14c77339918ce Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Thu, 20 Jun 2019 09:42:45 -0700 Subject: [PATCH 0245/1308] HDDS-1579. Create OMDoubleBuffer metrics. (#871) --- .../om/ratis/OzoneManagerDoubleBuffer.java | 43 +++++++-- .../OzoneManagerDoubleBufferMetrics.java | 89 +++++++++++++++++++ .../ozone/om/ratis/metrics/package-info.java | 21 +++++ ...eManagerDoubleBufferWithDummyResponse.java | 31 +++++-- 4 files changed, 173 insertions(+), 11 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index 8c25347449c64..68be333a67294 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -23,15 +23,17 @@ import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicLong; +import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.ratis.helpers.DoubleBufferEntry; +import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.utils.db.BatchOperation; - import org.apache.ratis.util.ExitUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * This class implements DoubleBuffer implementation of OMClientResponse's. In @@ -63,6 +65,8 @@ public class OzoneManagerDoubleBuffer { private final AtomicLong flushedTransactionCount = new AtomicLong(0); private final AtomicLong flushIterations = new AtomicLong(0); private volatile boolean isRunning; + private OzoneManagerDoubleBufferMetrics ozoneManagerDoubleBufferMetrics; + private long maxFlushedTransactionsInOneIteration; private final OzoneManagerRatisSnapshot ozoneManagerRatisSnapShot; @@ -71,8 +75,9 @@ public OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager, this.currentBuffer = new ConcurrentLinkedQueue<>(); this.readyBuffer = new ConcurrentLinkedQueue<>(); this.omMetadataManager = omMetadataManager; - this.ozoneManagerRatisSnapShot = ozoneManagerRatisSnapShot; + this.ozoneManagerDoubleBufferMetrics = + OzoneManagerDoubleBufferMetrics.create(); isRunning = true; // Daemon thread which runs in back ground and flushes transactions to DB. @@ -80,7 +85,6 @@ public OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager, daemon.setName("OMDoubleBufferFlushThread"); daemon.start(); - } /** @@ -120,6 +124,7 @@ private void flushTransactions() { .max(Long::compareTo).get(); readyBuffer.clear(); + // cleanup cache. cleanupCache(lastRatisTransactionIndex); @@ -129,6 +134,9 @@ private void flushTransactions() { // update the last updated index in OzoneManagerStateMachine. ozoneManagerRatisSnapShot.updateLastAppliedIndex( lastRatisTransactionIndex); + + // set metrics. + updateMetrics(flushedTransactionsSize); } } catch (InterruptedException ex) { Thread.currentThread().interrupt(); @@ -162,6 +170,23 @@ private void cleanupCache(long lastRatisTransactionIndex) { omMetadataManager.getUserTable().cleanupCache(lastRatisTransactionIndex); } + /** + * Update OzoneManagerDoubleBuffer metrics values. + * @param flushedTransactionsSize + */ + private void updateMetrics( + long flushedTransactionsSize) { + ozoneManagerDoubleBufferMetrics.incrTotalNumOfFlushOperations(); + ozoneManagerDoubleBufferMetrics.incrTotalSizeOfFlushedTransactions( + flushedTransactionsSize); + if (maxFlushedTransactionsInOneIteration < flushedTransactionsSize) { + maxFlushedTransactionsInOneIteration = flushedTransactionsSize; + ozoneManagerDoubleBufferMetrics + .setMaxNumberOfTransactionsFlushedInOneIteration( + flushedTransactionsSize); + } + } + /** * Stop OM DoubleBuffer flush thread. */ @@ -170,6 +195,9 @@ public synchronized void stop() { LOG.info("Stopping OMDoubleBuffer flush thread"); isRunning = false; daemon.interrupt(); + + // stop metrics. + ozoneManagerDoubleBufferMetrics.unRegister(); } else { LOG.info("OMDoubleBuffer flush thread is not running."); } @@ -236,5 +264,10 @@ private synchronized void setReadyBuffer() { readyBuffer = temp; } + @VisibleForTesting + public OzoneManagerDoubleBufferMetrics getOzoneManagerDoubleBufferMetrics() { + return ozoneManagerDoubleBufferMetrics; + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java new file mode 100644 index 0000000000000..e2d7f72e44c4d --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.ratis.metrics; + +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MutableCounterLong; + +/** + * Class which maintains metrics related to OzoneManager DoubleBuffer. + */ +public class OzoneManagerDoubleBufferMetrics { + + private static final String SOURCE_NAME = + OzoneManagerDoubleBufferMetrics.class.getSimpleName(); + + @Metric(about = "Total Number of flush operations happened in " + + "OzoneManagerDoubleBuffer.") + private MutableCounterLong totalNumOfFlushOperations; + + @Metric(about = "Total Number of flushed transactions happened in " + + "OzoneManagerDoubleBuffer.") + private MutableCounterLong totalNumOfFlushedTransactions; + + @Metric(about = "Max Number of transactions flushed in a iteration in " + + "OzoneManagerDoubleBuffer. This will provide a value which is maximum " + + "number of transactions flushed in a single flush iteration till now.") + private MutableCounterLong maxNumberOfTransactionsFlushedInOneIteration; + + + public static OzoneManagerDoubleBufferMetrics create() { + MetricsSystem ms = DefaultMetricsSystem.instance(); + return ms.register(SOURCE_NAME, + "OzoneManager DoubleBuffer Metrics", + new OzoneManagerDoubleBufferMetrics()); + } + + public void incrTotalNumOfFlushOperations() { + this.totalNumOfFlushOperations.incr(); + } + + public void incrTotalSizeOfFlushedTransactions( + long flushedTransactions) { + this.totalNumOfFlushedTransactions.incr(flushedTransactions); + } + + public void setMaxNumberOfTransactionsFlushedInOneIteration( + long maxTransactions) { + // We should set the value with maxTransactions, so decrement old value + // first and then add the new value. + this.maxNumberOfTransactionsFlushedInOneIteration.incr( + Math.negateExact(getMaxNumberOfTransactionsFlushedInOneIteration()) + + maxTransactions); + } + + public long getTotalNumOfFlushOperations() { + return totalNumOfFlushOperations.value(); + } + + public long getTotalNumOfFlushedTransactions() { + return totalNumOfFlushedTransactions.value(); + } + + public long getMaxNumberOfTransactionsFlushedInOneIteration() { + return maxNumberOfTransactionsFlushedInOneIteration.value(); + } + + public void unRegister() { + MetricsSystem ms = DefaultMetricsSystem.instance(); + ms.unregisterSource(SOURCE_NAME); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java new file mode 100644 index 0000000000000..e41c645b581af --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +/** + * package which contains metrics classes. + */ +package org.apache.hadoop.ozone.om.ratis.metrics; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index 116595500c0b9..e5499222f5b6c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -22,11 +22,6 @@ import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -34,7 +29,12 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; - +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateBucketResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -91,6 +91,18 @@ public void stop() { public void testDoubleBufferWithDummyResponse() throws Exception { String volumeName = UUID.randomUUID().toString(); int bucketCount = 100; + OzoneManagerDoubleBufferMetrics ozoneManagerDoubleBufferMetrics = + doubleBuffer.getOzoneManagerDoubleBufferMetrics(); + + // As we have not flushed/added any transactions, all metrics should have + // value zero. + Assert.assertTrue(ozoneManagerDoubleBufferMetrics + .getTotalNumOfFlushOperations() == 0); + Assert.assertTrue(ozoneManagerDoubleBufferMetrics + .getTotalNumOfFlushedTransactions() == 0); + Assert.assertTrue(ozoneManagerDoubleBufferMetrics + .getMaxNumberOfTransactionsFlushedInOneIteration() == 0); + for (int i=0; i < bucketCount; i++) { doubleBuffer.add(createDummyBucketResponse(volumeName, UUID.randomUUID().toString()), trxId.incrementAndGet()); @@ -98,6 +110,13 @@ public void testDoubleBufferWithDummyResponse() throws Exception { GenericTestUtils.waitFor(() -> doubleBuffer.getFlushedTransactionCount() == bucketCount, 100, 60000); + + Assert.assertTrue(ozoneManagerDoubleBufferMetrics + .getTotalNumOfFlushOperations() > 0); + Assert.assertTrue(ozoneManagerDoubleBufferMetrics + .getTotalNumOfFlushedTransactions() == bucketCount); + Assert.assertTrue(ozoneManagerDoubleBufferMetrics + .getMaxNumberOfTransactionsFlushedInOneIteration() > 0); Assert.assertTrue(omMetadataManager.countRowsInTable( omMetadataManager.getBucketTable()) == (bucketCount)); Assert.assertTrue(doubleBuffer.getFlushIterations() > 0); From 840d02ca5ba013bddd28d7c1abd242b5bb8cc39f Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Thu, 20 Jun 2019 19:57:09 -0700 Subject: [PATCH 0246/1308] HADOOP-14385. HttpExceptionUtils#validateResponse swallows exceptions. Contributed by Wei-Chiu Chuang. Signed-off-by: Wei-Chiu Chuang --- .../org/apache/hadoop/util/HttpExceptionUtils.java | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java index 366c8c787f84c..12d1ef01201a2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java @@ -154,18 +154,20 @@ public static void validateResponse(HttpURLConnection conn, toThrow = (Exception) constr.newInstance(exMsg); } catch (Exception ex) { toThrow = new IOException(String.format( - "HTTP status [%d], exception [%s], message [%s] ", - conn.getResponseCode(), exClass, exMsg)); + "HTTP status [%d], exception [%s], message [%s], URL [%s]", + conn.getResponseCode(), exClass, exMsg, conn.getURL())); } } else { String msg = (exMsg != null) ? exMsg : conn.getResponseMessage(); toThrow = new IOException(String.format( - "HTTP status [%d], message [%s]", conn.getResponseCode(), msg)); + "HTTP status [%d], message [%s], URL [%s]", + conn.getResponseCode(), msg, conn.getURL())); } } catch (Exception ex) { toThrow = new IOException(String.format( - "HTTP status [%d], message [%s]", conn.getResponseCode(), - conn.getResponseMessage())); + "HTTP status [%d], message [%s], URL [%s], exception [%s]", + conn.getResponseCode(), conn.getResponseMessage(), conn.getURL(), + ex.toString()), ex); } finally { if (es != null) { try { From 98d20656433cdec76c2108d24ff3b935657c1e80 Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Thu, 20 Jun 2019 20:16:48 -0700 Subject: [PATCH 0247/1308] HDFS-12564. Add the documents of swebhdfs configurations on the client side. Contributed by Takanobu Asanuma. Signed-off-by: Wei-Chiu Chuang --- .../src/site/markdown/ServerSetup.md.vm | 3 +- .../hadoop-hdfs/src/site/markdown/WebHDFS.md | 51 +++++++++++++++++++ .../src/site/markdown/DistCp.md.vm | 4 +- 3 files changed, 56 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm index 072c067b5d81a..2d0a5b8cd2e7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm @@ -114,7 +114,7 @@ Configure `etc/hadoop/ssl-server.xml` with proper values, for example: ``` The SSL passwords can be secured by a credential provider. See -[Credential Provider API](../../../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html). +[Credential Provider API](../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html). You need to create an SSL certificate for the HttpFS server. As the `httpfs` Unix user, using the Java `keytool` command to create the SSL certificate: @@ -131,6 +131,7 @@ The answer to "What is your first and last name?" (i.e. "CN") must be the hostna Start HttpFS. It should work over HTTPS. Using the Hadoop `FileSystem` API or the Hadoop FS shell, use the `swebhdfs://` scheme. Make sure the JVM is picking up the truststore containing the public key of the SSL certificate if using a self-signed certificate. +For more information about the client side settings, see [SSL Configurations for SWebHDFS](../hadoop-project-dist/hadoop-hdfs/WebHDFS.html#SSL_Configurations_for_SWebHDFS). NOTE: Some old SSL clients may use weak ciphers that are not supported by the HttpFS server. It is recommended to upgrade the SSL client. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 6a7d345cad97f..0e224645ccac6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -104,6 +104,7 @@ In the REST API, the prefix "`/webhdfs/v1`" is inserted in the path and a query swebhdfs://:/ +See also: [SSL Configurations for SWebHDFS](#SSL_Configurations_for_SWebHDFS) ### HDFS Configuration Options @@ -164,6 +165,56 @@ The following properties control OAuth2 authentication. | `dfs.webhdfs.oauth2.refresh.token.expires.ms.since.epoch` | (required if using ConfRefreshTokenBasedAccessTokenProvider) Access token expiration measured in milliseconds since Jan 1, 1970. *Note this is a different value than provided by OAuth providers and has been munged as described in interface to be suitable for a client application* | | `dfs.webhdfs.oauth2.credential` | (required if using ConfCredentialBasedAccessTokenProvider). Credential used to obtain initial and subsequent access tokens. | +SSL Configurations for SWebHDFS +------------------------------------------------------- + +To use SWebHDFS FileSystem (i.e. using the swebhdfs protocol), a SSL configuration +file needs to be specified on the client side. This must specify 3 parameters: + +| SSL property | Description | +|:---- |:---- | +| `ssl.client.truststore.location` | The local-filesystem location of the trust-store file, containing the certificate for the NameNode. | +| `ssl.client.truststore.type` | (Optional) The format of the trust-store file. | +| `ssl.client.truststore.password` | (Optional) Password for the trust-store file. | + +The following is an example SSL configuration file (**ssl-client.xml**): + +```xml + + + ssl.client.truststore.location + /work/keystore.jks + Truststore to be used by clients. Must be specified. + + + + ssl.client.truststore.password + changeme + Optional. Default value is "". + + + + ssl.client.truststore.type + jks + Optional. Default value is "jks". + + +``` + +The SSL configuration file must be in the class-path of the client program and the filename needs to be specified in **core-site.xml**: + +```xml + + hadoop.ssl.client.conf + ssl-client.xml + + Resource file from which ssl client keystore information will be extracted. + This file is looked up in the classpath, typically it should be in Hadoop + conf/ directory. Default value is "ssl-client.xml". + + +``` + Proxy Users ----------- diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm index c678090908d7e..a5c40115aedf5 100644 --- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm +++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm @@ -542,10 +542,12 @@ $H3 Copying Between Versions of HDFS HftpFileSystem, as webhdfs is available for both read and write operations, DistCp can be run on both source and destination cluster. Remote cluster is specified as `webhdfs://:`. - (Use the "`swebhdfs://`" scheme when webhdfs is secured with SSL). When copying between same major versions of Hadoop cluster (e.g. between 2.X and 2.X), use hdfs protocol for better performance. +$H3 Secure Copy over the wire with distcp + Use the "`swebhdfs://`" scheme when webhdfs is secured with SSL. For more information see [SSL Configurations for SWebHDFS](../hadoop-project-dist/hadoop-hdfs/WebHDFS.html#SSL_Configurations_for_SWebHDFS). + $H3 MapReduce and other side-effects As has been mentioned in the preceding, should a map fail to copy one of its From 272b96d243383d9f50241d48cb070f638243bc9c Mon Sep 17 00:00:00 2001 From: Lokesh Jain Date: Thu, 20 Jun 2019 20:22:14 -0700 Subject: [PATCH 0248/1308] HDFS-13893. DiskBalancer: no validations for Disk balancer commands. Contributed by Lokesh Jain. Signed-off-by: Wei-Chiu Chuang --- .../org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java | 8 ++++++++ .../diskbalancer/command/TestDiskBalancerCommand.java | 11 +++++++++++ 2 files changed, 19 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java index 34bd68bc68464..980da67a5b7bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java @@ -21,6 +21,7 @@ import org.apache.commons.cli.Option; import org.apache.commons.cli.OptionBuilder; import org.apache.commons.cli.Options; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -37,6 +38,7 @@ import org.slf4j.LoggerFactory; import java.io.PrintStream; +import java.util.Arrays; /** * DiskBalancer is a tool that can be used to ensure that data is spread evenly @@ -191,6 +193,12 @@ public static void main(String[] argv) throws Exception { public int run(String[] args) throws Exception { Options opts = getOpts(); CommandLine cmd = parseArgs(args, opts); + String[] cmdArgs = cmd.getArgs(); + if (cmdArgs.length > 2) { + throw new HadoopIllegalArgumentException( + "Invalid or extra Arguments: " + Arrays + .toString(Arrays.copyOfRange(cmdArgs, 2, cmdArgs.length))); + } return dispatch(cmd, opts); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java index f2e998e20e725..e662fa1435265 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java @@ -44,6 +44,7 @@ import java.util.Scanner; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -384,6 +385,16 @@ private String runAndVerifyPlan( return planFileFullName; } + /* test exception on invalid arguments */ + @Test(timeout = 60000) + public void testExceptionOnInvalidArguments() throws Exception { + final String cmdLine = "hdfs diskbalancer random1 -report random2 random3"; + thrown.expect(HadoopIllegalArgumentException.class); + thrown.expectMessage( + "Invalid or extra Arguments: [random1, random2, random3]"); + runCommand(cmdLine); + } + /* test basic report */ @Test(timeout = 60000) public void testReportSimple() throws Exception { From b95a58e231cb1ac87680a072d7fa84998fd95921 Mon Sep 17 00:00:00 2001 From: S O'Donnell Date: Fri, 21 Jun 2019 11:35:32 +0200 Subject: [PATCH 0249/1308] HDDS-1674. Make ScmBlockLocationProtocol message type based. Closes #984 --- ...ocationProtocolClientSideTranslatorPB.java | 43 ++++++++++- ...ocationProtocolServerSideTranslatorPB.java | 65 +++++++++++++--- .../main/proto/ScmBlockLocationProtocol.proto | 76 ++++++++++++++----- 3 files changed, 151 insertions(+), 33 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java index 559022f286974..aadf585775d00 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java @@ -25,6 +25,9 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Type; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateBlockResponse; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockRequestProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto; @@ -72,6 +75,16 @@ public ScmBlockLocationProtocolClientSideTranslatorPB( this.rpcProxy = rpcProxy; } + /** + * Returns a SCMBlockLocationRequest builder with specified type. + * @param cmdType type of the request + */ + private SCMBlockLocationRequest.Builder createSCMBlockRequest(Type cmdType) { + return SCMBlockLocationRequest.newBuilder() + .setCmdType(cmdType) + .setTraceID(TracingUtil.exportCurrentSpan()); + } + /** * Asks SCM where a block should be allocated. SCM responds with the * set of datanodes that should be used creating this block. @@ -96,12 +109,19 @@ public List allocateBlock(long size, int num, .setType(type) .setFactor(factor) .setOwner(owner) - .setTraceID(TracingUtil.exportCurrentSpan()) .setExcludeList(excludeList.getProtoBuf()) .build(); + + SCMBlockLocationRequest wrapper = createSCMBlockRequest( + Type.AllocateScmBlock) + .setAllocateScmBlockRequest(request) + .build(); + final AllocateScmBlockResponseProto response; + final SCMBlockLocationResponse wrappedResponse; try { - response = rpcProxy.allocateScmBlock(NULL_RPC_CONTROLLER, request); + wrappedResponse = rpcProxy.send(NULL_RPC_CONTROLLER, wrapper); + response = wrappedResponse.getAllocateScmBlockResponse(); } catch (ServiceException e) { throw transformServiceException(e); } @@ -141,9 +161,16 @@ public List deleteKeyBlocks( .addAllKeyBlocks(keyBlocksProto) .build(); + SCMBlockLocationRequest wrapper = createSCMBlockRequest( + Type.DeleteScmKeyBlocks) + .setDeleteScmKeyBlocksRequest(request) + .build(); + final DeleteScmKeyBlocksResponseProto resp; + final SCMBlockLocationResponse wrappedResponse; try { - resp = rpcProxy.deleteScmKeyBlocks(NULL_RPC_CONTROLLER, request); + wrappedResponse = rpcProxy.send(NULL_RPC_CONTROLLER, wrapper); + resp = wrappedResponse.getDeleteScmKeyBlocksResponse(); } catch (ServiceException e) { throw transformServiceException(e); } @@ -191,8 +218,16 @@ public ScmInfo getScmInfo() throws IOException { HddsProtos.GetScmInfoRequestProto request = HddsProtos.GetScmInfoRequestProto.getDefaultInstance(); HddsProtos.GetScmInfoResponseProto resp; + + SCMBlockLocationRequest wrapper = createSCMBlockRequest( + Type.GetScmInfo) + .setGetScmInfoRequest(request) + .build(); + + final SCMBlockLocationResponse wrappedResponse; try { - resp = rpcProxy.getScmInfo(NULL_RPC_CONTROLLER, request); + wrappedResponse = rpcProxy.send(NULL_RPC_CONTROLLER, wrapper); + resp = wrappedResponse.getGetScmInfoResponse(); } catch (ServiceException e) { throw transformServiceException(e); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java index 65f0a973ce412..db1240a5f6de6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java @@ -22,6 +22,7 @@ import io.opentracing.Scope; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos .AllocateBlockResponse; import org.apache.hadoop.hdds.scm.ScmInfo; @@ -42,6 +43,12 @@ .DeleteScmKeyBlocksRequestProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos .DeleteScmKeyBlocksResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .SCMBlockLocationResponse; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .SCMBlockLocationRequest; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .Status; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; @@ -71,13 +78,51 @@ public ScmBlockLocationProtocolServerSideTranslatorPB( this.impl = impl; } + + private SCMBlockLocationResponse.Builder createSCMBlockResponse( + ScmBlockLocationProtocolProtos.Type cmdType, + String traceID) { + return SCMBlockLocationResponse.newBuilder() + .setCmdType(cmdType) + .setTraceID(traceID); + } + @Override + public SCMBlockLocationResponse send(RpcController controller, + SCMBlockLocationRequest request) throws ServiceException { + String traceId = request.getTraceID(); + + SCMBlockLocationResponse.Builder response = createSCMBlockResponse( + request.getCmdType(), + traceId); + + switch (request.getCmdType()) { + case AllocateScmBlock: + response.setAllocateScmBlockResponse( + allocateScmBlock(traceId, request.getAllocateScmBlockRequest())); + break; + case DeleteScmKeyBlocks: + response.setDeleteScmKeyBlocksResponse( + deleteScmKeyBlocks(traceId, request.getDeleteScmKeyBlocksRequest())); + break; + case GetScmInfo: + response.setGetScmInfoResponse( + getScmInfo(traceId, request.getGetScmInfoRequest())); + break; + default: + throw new ServiceException("Unknown Operation"); + } + response.setSuccess(true) + .setStatus(Status.OK); + return response.build(); + } + public AllocateScmBlockResponseProto allocateScmBlock( - RpcController controller, AllocateScmBlockRequestProto request) + String traceId, AllocateScmBlockRequestProto request) throws ServiceException { - try (Scope scope = TracingUtil + try(Scope scope = TracingUtil .importAndCreateScope("ScmBlockLocationProtocol.allocateBlock", - request.getTraceID())) { + traceId)) { List allocatedBlocks = impl.allocateBlock(request.getSize(), request.getNumBlocks(), request.getType(), @@ -107,13 +152,14 @@ public AllocateScmBlockResponseProto allocateScmBlock( } } - @Override public DeleteScmKeyBlocksResponseProto deleteScmKeyBlocks( - RpcController controller, DeleteScmKeyBlocksRequestProto req) + String traceId, DeleteScmKeyBlocksRequestProto req) throws ServiceException { DeleteScmKeyBlocksResponseProto.Builder resp = DeleteScmKeyBlocksResponseProto.newBuilder(); - try { + try(Scope scope = TracingUtil + .importAndCreateScope("ScmBlockLocationProtocol.deleteKeyBlocks", + traceId)) { List infoList = req.getKeyBlocksList().stream() .map(BlockGroup::getFromProto).collect(Collectors.toList()); final List results = @@ -132,12 +178,13 @@ public DeleteScmKeyBlocksResponseProto deleteScmKeyBlocks( return resp.build(); } - @Override public HddsProtos.GetScmInfoResponseProto getScmInfo( - RpcController controller, HddsProtos.GetScmInfoRequestProto req) + String traceId, HddsProtos.GetScmInfoRequestProto req) throws ServiceException { ScmInfo scmInfo; - try { + try(Scope scope = TracingUtil + .importAndCreateScope("ScmBlockLocationProtocol.getInfo", + traceId)) { scmInfo = impl.getScmInfo(); } catch (IOException ex) { throw new ServiceException(ex); diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto index 6745c6ee14bb5..8222d8b45b176 100644 --- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto +++ b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto @@ -33,6 +33,60 @@ import "hdds.proto"; // SCM Block protocol +enum Type { + AllocateScmBlock = 11; + DeleteScmKeyBlocks = 12; + GetScmInfo = 13; +} + +message SCMBlockLocationRequest { + required Type cmdType = 1; // Type of the command + + // A string that identifies this command, we generate Trace ID in Ozone + // frontend and this allows us to trace that command all over ozone. + optional string traceID = 2; + + optional UserInfo userInfo = 3; + + optional AllocateScmBlockRequestProto allocateScmBlockRequest = 11; + optional DeleteScmKeyBlocksRequestProto deleteScmKeyBlocksRequest = 12; + optional hadoop.hdds.GetScmInfoRequestProto getScmInfoRequest = 13; +} + +message SCMBlockLocationResponse { + required Type cmdType = 1; // Type of the command + + // A string that identifies this command, we generate Trace ID in Ozone + // frontend and this allows us to trace that command all over ozone. + optional string traceID = 2; + + optional bool success = 3 [default=true]; + + optional string message = 4; + + required Status status = 5; + + optional string leaderOMNodeId = 6; + + optional AllocateScmBlockResponseProto allocateScmBlockResponse = 11; + optional DeleteScmKeyBlocksResponseProto deleteScmKeyBlocksResponse = 12; + optional hadoop.hdds.GetScmInfoResponseProto getScmInfoResponse = 13; +} + +/** + User information which will be extracted during RPC context and used + during validating Acl. +*/ +message UserInfo { + optional string userName = 1; + optional string remoteAddress = 3; +} + +enum Status { + OK = 1; + UNKNOWN = 2; +} + /** * Request send to SCM asking allocate block of specified size. */ @@ -42,7 +96,6 @@ message AllocateScmBlockRequestProto { required ReplicationType type = 3; required hadoop.hdds.ReplicationFactor factor = 4; required string owner = 5; - optional string traceID = 6; optional ExcludeListProto excludeList = 7; } @@ -73,8 +126,6 @@ message KeyBlocks { */ message DeleteScmKeyBlocksResponseProto { repeated DeleteKeyBlocksResultProto results = 1; - optional string traceID = 2; - } /** @@ -122,21 +173,6 @@ message AllocateScmBlockResponseProto { */ service ScmBlockLocationProtocolService { - /** - * Creates a block entry in SCM. - */ - rpc allocateScmBlock(AllocateScmBlockRequestProto) - returns (AllocateScmBlockResponseProto); - - /** - * Deletes blocks for a set of object keys from SCM. - */ - rpc deleteScmKeyBlocks(DeleteScmKeyBlocksRequestProto) - returns (DeleteScmKeyBlocksResponseProto); - - /** - * Gets the scmInfo from SCM. - */ - rpc getScmInfo(hadoop.hdds.GetScmInfoRequestProto) - returns (hadoop.hdds.GetScmInfoResponseProto); + rpc send(SCMBlockLocationRequest) + returns (SCMBlockLocationResponse); } From 83dcb9d87ec75f2be0acb8972f5f0faefe6ffbcd Mon Sep 17 00:00:00 2001 From: Weiwei Yang Date: Fri, 21 Jun 2019 17:40:55 +0800 Subject: [PATCH 0250/1308] YARN-9209. When nodePartition is not set in Placement Constraints, containers are allocated only in default partition. Contributed by Tarun Parimi. --- .../scheduler/AppSchedulingInfo.java | 14 ++++++++++++++ .../SingleConstraintAppPlacementAllocator.java | 5 ++++- .../TestSingleConstraintAppPlacementAllocator.java | 7 ++++--- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java index c3269fbf7d6dc..4abb7129cc781 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java @@ -795,4 +795,18 @@ public boolean precheckNode(SchedulerRequestKey schedulerKey, public Map getApplicationSchedulingEnvs() { return applicationSchedulingEnvs; } + + /** + * Get the defaultNodeLabelExpression for the application's current queue. + * + * @return defaultNodeLabelExpression + */ + public String getDefaultNodeLabelExpression() { + try { + this.readLock.lock(); + return queue.getDefaultNodeLabelExpression(); + } finally { + this.readLock.unlock(); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java index e0307b8982e15..3fc3afbd09fc7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java @@ -255,7 +255,10 @@ private void validateAndSetSchedulingRequest(SchedulingRequest // Currently only single constraint is handled. private String validateAndGetTargetNodePartition( PlacementConstraint placementConstraint) { - String nodePartition = RMNodeLabelsManager.NO_LABEL; + String defaultNodeLabelExpression = + appSchedulingInfo.getDefaultNodeLabelExpression(); + String nodePartition = defaultNodeLabelExpression == null ? + RMNodeLabelsManager.NO_LABEL : defaultNodeLabelExpression; if (placementConstraint != null && placementConstraint.getConstraintExpr() != null) { PlacementConstraint.AbstractConstraint ac = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java index d9b6c20844d3b..e6518312664cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java @@ -67,7 +67,7 @@ public void setup() throws Exception { TestUtils.getMockApplicationId(1)); when(appSchedulingInfo.getApplicationAttemptId()).thenReturn( TestUtils.getMockApplicationAttemptId(1, 1)); - + when(appSchedulingInfo.getDefaultNodeLabelExpression()).thenReturn("y"); // stub RMContext rmContext = TestUtils.getMockRMContext(); @@ -153,7 +153,8 @@ public void testSchedulingRequestValidation() { .resourceSizing( ResourceSizing.newInstance(1, Resource.newInstance(1024, 1))) .build()); - Assert.assertEquals("", allocator.getTargetNodePartition()); + // Node partition is unspecified, use the default node label expression y + Assert.assertEquals("y", allocator.getTargetNodePartition()); // Valid (with application Id target) assertValidSchedulingRequest(SchedulingRequest.newBuilder().executionType( @@ -167,7 +168,7 @@ public void testSchedulingRequestValidation() { ResourceSizing.newInstance(1, Resource.newInstance(1024, 1))) .build()); // Allocation tags should not include application Id - Assert.assertEquals("", allocator.getTargetNodePartition()); + Assert.assertEquals("y", allocator.getTargetNodePartition()); // Invalid (without sizing) assertInvalidSchedulingRequest(SchedulingRequest.newBuilder().executionType( From 256bdd6982b0bcffa2f9ecc12d3f576c6c675911 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Elek?= Date: Fri, 21 Jun 2019 14:14:06 +0200 Subject: [PATCH 0251/1308] HDDS-1678. Default image name for kubernetes examples should be ozone and not hadoop Closes #959 --- hadoop-ozone/dist/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 4ae92faf500a0..5964bb052c577 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -28,7 +28,7 @@ UTF-8 true - apache/hadoop:${project.version} + apache/ozone:${project.version} jdk11 From cba13c7f83da3a1286f1716e7d01ce35fde90114 Mon Sep 17 00:00:00 2001 From: S O'Donnell Date: Fri, 21 Jun 2019 16:25:10 +0200 Subject: [PATCH 0252/1308] HDDS-1715. Update the Intellij runner definitition of SCM to use the new class name Closes #1000 --- .../intellij/runConfigurations/StorageContainerManager.xml | 2 +- .../intellij/runConfigurations/StorageContainerManagerInit.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManager.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManager.xml index 40097cf6c1829..46104d3ec7378 100644 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManager.xml +++ b/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManager.xml @@ -16,7 +16,7 @@ --> -

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.timelineservice.storage; + +import java.io.IOException; + +import org.junit.Test; +import org.junit.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext; +import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities; +import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity; + +import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS; +import static org.junit.Assert.assertTrue; + +/** + * This class tests HbaseTimelineWriter with Hbase Down. + */ +public class TestTimelineWriterHBaseDown { + + @Test(timeout=300000) + public void testTimelineWriterHBaseDown() throws Exception { + HBaseTestingUtility util = new HBaseTestingUtility(); + HBaseTimelineWriterImpl writer = new HBaseTimelineWriterImpl(); + try { + Configuration c1 = util.getConfiguration(); + c1.setLong(TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS, 5000); + writer.init(c1); + writer.start(); + + util.startMiniCluster(); + DataGeneratorForTest.createSchema(util.getConfiguration()); + + TimelineStorageMonitor storageMonitor = writer. + getTimelineStorageMonitor(); + waitForHBaseToUp(storageMonitor); + + try { + storageMonitor.checkStorageIsUp(); + } catch(IOException e) { + Assert.fail("HBaseStorageMonitor failed to detect HBase Up"); + } + + util.shutdownMiniHBaseCluster(); + waitForHBaseToDown(storageMonitor); + + TimelineEntities te = new TimelineEntities(); + ApplicationEntity entity = new ApplicationEntity(); + String appId = "application_1000178881110_2002"; + entity.setId(appId); + Long cTime = 1425016501000L; + entity.setCreatedTime(cTime); + te.addEntity(entity); + + boolean exceptionCaught = false; + try{ + writer.write(new TimelineCollectorContext("ATS1", "user1", "flow2", + "AB7822C10F1111", 1002345678919L, appId), te, + UserGroupInformation.createRemoteUser("user1")); + } catch (IOException e) { + if (e.getMessage().equals("HBase is down")) { + exceptionCaught = true; + } + } + assertTrue("HBaseStorageMonitor failed to detect HBase Down", + exceptionCaught); + } finally { + writer.stop(); + util.shutdownMiniCluster(); + } + } + + public void waitForHBaseToUp(TimelineStorageMonitor storageMonitor) + throws Exception { + GenericTestUtils.waitFor(() -> { + try { + storageMonitor.checkStorageIsUp(); + return true; + } catch (IOException e) { + return false; + } + }, 1000, 150000); + } + + public void waitForHBaseToDown(TimelineStorageMonitor storageMonitor) + throws Exception { + GenericTestUtils.waitFor(() -> { + try { + storageMonitor.checkStorageIsUp(); + return false; + } catch (IOException e) { + return true; + } + }, 1000, 150000); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java index 4c71fd6b49eea..f3592d2924088 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java @@ -78,10 +78,6 @@ protected void serviceStop() throws Exception { super.serviceStop(); } - public boolean isHBaseDown() { - return storageMonitor.isStorageDown(); - } - @Override public TimelineEntity getEntity(TimelineReaderContext context, TimelineDataToRetrieve dataToRetrieve) throws IOException { @@ -113,14 +109,19 @@ public Set getEntityTypes(TimelineReaderContext context) @Override public TimelineHealth getHealthStatus() { - if (!this.isHBaseDown()) { + try { + storageMonitor.checkStorageIsUp(); return new TimelineHealth(TimelineHealth.TimelineHealthStatus.RUNNING, ""); - } else { + } catch (IOException e){ return new TimelineHealth( TimelineHealth.TimelineHealthStatus.READER_CONNECTION_FAILURE, "HBase connection is down"); } } + protected TimelineStorageMonitor getTimelineStorageMonitor() { + return storageMonitor; + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java index 3414a56ef35eb..a398febccc793 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java @@ -100,6 +100,7 @@ public class HBaseTimelineWriterImpl extends AbstractService implements .getLogger(HBaseTimelineWriterImpl.class); private Connection conn; + private TimelineStorageMonitor storageMonitor; private TypedBufferedMutator entityTable; private TypedBufferedMutator appToFlowTable; private TypedBufferedMutator applicationTable; @@ -150,9 +151,16 @@ protected void serviceInit(Configuration conf) throws Exception { UserGroupInformation ugi = UserGroupInformation.isSecurityEnabled() ? UserGroupInformation.getLoginUser() : UserGroupInformation.getCurrentUser(); + storageMonitor = new HBaseStorageMonitor(conf); LOG.info("Initialized HBaseTimelineWriterImpl UGI to " + ugi); } + @Override + protected void serviceStart() throws Exception { + super.serviceStart(); + storageMonitor.start(); + } + /** * Stores the entire information in TimelineEntities to the timeline store. */ @@ -160,7 +168,7 @@ protected void serviceInit(Configuration conf) throws Exception { public TimelineWriteResponse write(TimelineCollectorContext context, TimelineEntities data, UserGroupInformation callerUgi) throws IOException { - + storageMonitor.checkStorageIsUp(); TimelineWriteResponse putStatus = new TimelineWriteResponse(); String clusterId = context.getClusterId(); @@ -242,6 +250,7 @@ public TimelineWriteResponse write(TimelineCollectorContext context, public TimelineWriteResponse write(TimelineCollectorContext context, TimelineDomain domain) throws IOException { + storageMonitor.checkStorageIsUp(); TimelineWriteResponse putStatus = new TimelineWriteResponse(); String clusterId = context.getClusterId(); @@ -591,6 +600,7 @@ private > void storeEvents( @Override public TimelineWriteResponse aggregate(TimelineEntity data, TimelineAggregationTrack track) throws IOException { + storageMonitor.checkStorageIsUp(); return null; } @@ -603,6 +613,7 @@ public TimelineWriteResponse aggregate(TimelineEntity data, */ @Override public void flush() throws IOException { + storageMonitor.checkStorageIsUp(); // flush all buffered mutators entityTable.flush(); appToFlowTable.flush(); @@ -653,6 +664,12 @@ protected void serviceStop() throws Exception { LOG.info("closing the hbase Connection"); conn.close(); } + storageMonitor.stop(); super.serviceStop(); } + + protected TimelineStorageMonitor getTimelineStorageMonitor() { + return storageMonitor; + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineStorageMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineStorageMonitor.java index fc96f19d75ebe..dce6b8d6f911f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineStorageMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineStorageMonitor.java @@ -81,10 +81,6 @@ public void checkStorageIsUp() throws IOException { } } - public boolean isStorageDown() { - return storageDown.get(); - } - private class MonitorThread implements Runnable { @Override public void run() { From 41c94a636b1a6ede9fa9cabff5487d305d06b48f Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Fri, 12 Oct 2018 17:19:55 +0530 Subject: [PATCH 0262/1308] HDFS-13906. RBF: Add multiple paths for dfsrouteradmin 'rm' and 'clrquota' commands. Contributed by Ayush Saxena. --- .../hdfs/tools/federation/RouterAdmin.java | 102 +++++++++--------- .../federation/router/TestRouterAdminCLI.java | 82 +++++++++++--- 2 files changed, 122 insertions(+), 62 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 1aefe4fe785d7..4a9cc7ac51f59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -151,17 +151,7 @@ private String getUsage(String cmd) { * @param arg List of of command line parameters. */ private void validateMax(String[] arg) { - if (arg[0].equals("-rm")) { - if (arg.length > 2) { - throw new IllegalArgumentException( - "Too many arguments, Max=1 argument allowed"); - } - } else if (arg[0].equals("-ls")) { - if (arg.length > 2) { - throw new IllegalArgumentException( - "Too many arguments, Max=1 argument allowed"); - } - } else if (arg[0].equals("-clrQuota")) { + if (arg[0].equals("-ls")) { if (arg.length > 2) { throw new IllegalArgumentException( "Too many arguments, Max=1 argument allowed"); @@ -183,63 +173,63 @@ private void validateMax(String[] arg) { } } - @Override - public int run(String[] argv) throws Exception { - if (argv.length < 1) { - System.err.println("Not enough parameters specified"); - printUsage(); - return -1; - } - - int exitCode = -1; - int i = 0; - String cmd = argv[i++]; - - // Verify that we have enough command line parameters + /** + * Usage: validates the minimum number of arguments for a command. + * @param argv List of of command line parameters. + * @return true if number of arguments are valid for the command else false. + */ + private boolean validateMin(String[] argv) { + String cmd = argv[0]; if ("-add".equals(cmd)) { if (argv.length < 4) { - System.err.println("Not enough parameters specified for cmd " + cmd); - printUsage(cmd); - return exitCode; + return false; } } else if ("-update".equals(cmd)) { if (argv.length < 4) { - System.err.println("Not enough parameters specified for cmd " + cmd); - printUsage(cmd); - return exitCode; + return false; } } else if ("-rm".equals(cmd)) { if (argv.length < 2) { - System.err.println("Not enough parameters specified for cmd " + cmd); - printUsage(cmd); - return exitCode; + return false; } } else if ("-setQuota".equals(cmd)) { if (argv.length < 4) { - System.err.println("Not enough parameters specified for cmd " + cmd); - printUsage(cmd); - return exitCode; + return false; } } else if ("-clrQuota".equals(cmd)) { if (argv.length < 2) { - System.err.println("Not enough parameters specified for cmd " + cmd); - printUsage(cmd); - return exitCode; + return false; } } else if ("-safemode".equals(cmd)) { if (argv.length < 2) { - System.err.println("Not enough parameters specified for cmd " + cmd); - printUsage(cmd); - return exitCode; + return false; } } else if ("-nameservice".equals(cmd)) { if (argv.length < 3) { - System.err.println("Not enough parameters specificed for cmd " + cmd); - printUsage(cmd); - return exitCode; + return false; } } + return true; + } + + @Override + public int run(String[] argv) throws Exception { + if (argv.length < 1) { + System.err.println("Not enough parameters specified"); + printUsage(); + return -1; + } + + int exitCode = -1; + int i = 0; + String cmd = argv[i++]; + // Verify that we have enough command line parameters + if (!validateMin(argv)) { + System.err.println("Not enough parameters specificed for cmd " + cmd); + printUsage(cmd); + return exitCode; + } // Initialize RouterClient try { String address = getConf().getTrimmed( @@ -273,8 +263,17 @@ public int run(String[] argv) throws Exception { exitCode = -1; } } else if ("-rm".equals(cmd)) { - if (removeMount(argv[i])) { - System.out.println("Successfully removed mount point " + argv[i]); + while (i < argv.length) { + try { + if (removeMount(argv[i])) { + System.out.println("Successfully removed mount point " + argv[i]); + } + } catch (IOException e) { + exitCode = -1; + System.err + .println(cmd.substring(1) + ": " + e.getLocalizedMessage()); + } + i++; } } else if ("-ls".equals(cmd)) { if (argv.length > 1) { @@ -288,9 +287,12 @@ public int run(String[] argv) throws Exception { "Successfully set quota for mount point " + argv[i]); } } else if ("-clrQuota".equals(cmd)) { - if (clrQuota(argv[i])) { - System.out.println( - "Successfully clear quota for mount point " + argv[i]); + while (i < argv.length) { + if (clrQuota(argv[i])) { + System.out + .println("Successfully clear quota for mount point " + argv[i]); + i++; + } } } else if ("-safemode".equals(cmd)) { manageSafeMode(argv[i]); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index 80aca55d8326f..66429420ab0c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -342,13 +342,43 @@ public void testRemoveMountTable() throws Exception { assertEquals(0, ToolRunner.run(admin, argv)); assertTrue(out.toString().contains( "Cannot remove mount point " + invalidPath)); + } - // test wrong number of arguments - System.setErr(new PrintStream(err)); - argv = new String[] {"-rm", src, "check" }; - ToolRunner.run(admin, argv); - assertTrue(err.toString() - .contains("Too many arguments, Max=1 argument allowed")); + @Test + public void testMultiArgsRemoveMountTable() throws Exception { + String nsId = "ns0"; + String src1 = "/test-rmmounttable1"; + String src2 = "/test-rmmounttable2"; + String dest1 = "/rmmounttable1"; + String dest2 = "/rmmounttable2"; + // Adding mount table entries + String[] argv = new String[] {"-add", src1, nsId, dest1}; + assertEquals(0, ToolRunner.run(admin, argv)); + argv = new String[] {"-add", src2, nsId, dest2}; + assertEquals(0, ToolRunner.run(admin, argv)); + + stateStore.loadCache(MountTableStoreImpl.class, true); + // Ensure mount table entries added successfully + GetMountTableEntriesRequest getRequest = + GetMountTableEntriesRequest.newInstance(src1); + GetMountTableEntriesResponse getResponse = + client.getMountTableManager().getMountTableEntries(getRequest); + MountTable mountTable = getResponse.getEntries().get(0); + getRequest = GetMountTableEntriesRequest.newInstance(src2); + getResponse = + client.getMountTableManager().getMountTableEntries(getRequest); + assertEquals(src1, mountTable.getSourcePath()); + mountTable = getResponse.getEntries().get(0); + assertEquals(src2, mountTable.getSourcePath()); + // Remove multiple mount table entries + argv = new String[] {"-rm", src1, src2}; + assertEquals(0, ToolRunner.run(admin, argv)); + + stateStore.loadCache(MountTableStoreImpl.class, true); + // Verify successful deletion of mount table entries + getResponse = + client.getMountTableManager().getMountTableEntries(getRequest); + assertEquals(0, getResponse.getEntries().size()); } @Test @@ -540,6 +570,7 @@ public void testInvalidArgumentMessage() throws Exception { public void testSetAndClearQuota() throws Exception { String nsId = "ns0"; String src = "/test-QuotaMounttable"; + String src1 = "/test-QuotaMounttable1"; String dest = "/QuotaMounttable"; String[] argv = new String[] {"-add", src, nsId, dest}; assertEquals(0, ToolRunner.run(admin, argv)); @@ -605,15 +636,42 @@ public void testSetAndClearQuota() throws Exception { assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getSpaceQuota()); + // verify multi args ClrQuota + String dest1 = "/QuotaMounttable1"; + // Add mount table entries. + argv = new String[] {"-add", src, nsId, dest}; + assertEquals(0, ToolRunner.run(admin, argv)); + argv = new String[] {"-add", src1, nsId, dest1}; + assertEquals(0, ToolRunner.run(admin, argv)); + + stateStore.loadCache(MountTableStoreImpl.class, true); + // SetQuota for the added entries + argv = new String[] {"-setQuota", src, "-nsQuota", String.valueOf(nsQuota), + "-ssQuota", String.valueOf(ssQuota)}; + assertEquals(0, ToolRunner.run(admin, argv)); + argv = new String[] {"-setQuota", src1, "-nsQuota", + String.valueOf(nsQuota), "-ssQuota", String.valueOf(ssQuota)}; + assertEquals(0, ToolRunner.run(admin, argv)); + stateStore.loadCache(MountTableStoreImpl.class, true); + // Clear quota for the added entries + argv = new String[] {"-clrQuota", src, src1}; + assertEquals(0, ToolRunner.run(admin, argv)); + + stateStore.loadCache(MountTableStoreImpl.class, true); + getResponse = + client.getMountTableManager().getMountTableEntries(getRequest); + + // Verify clear quota for the entries + for (int i = 0; i < 2; i++) { + mountTable = getResponse.getEntries().get(i); + quotaUsage = mountTable.getQuota(); + assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getQuota()); + assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getSpaceQuota()); + } + // verify wrong arguments System.setErr(new PrintStream(err)); - argv = new String[] {"-clrQuota", src, "check"}; - ToolRunner.run(admin, argv); - assertTrue(err.toString(), - err.toString().contains("Too many arguments, Max=1 argument allowed")); - argv = new String[] {"-setQuota", src, "check", "check2"}; - err.reset(); ToolRunner.run(admin, argv); assertTrue(err.toString().contains("Invalid argument : check")); } From b3fee1d2bfe5d289b8f279071589f21ace99e04c Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Tue, 23 Oct 2018 14:34:29 +0800 Subject: [PATCH 0263/1308] HDFS-14011. RBF: Add more information to HdfsFileStatus for a mount point. Contributed by Akira Ajisaka. --- .../resolver/FileSubclusterResolver.java | 6 +- .../router/RouterClientProtocol.java | 30 +++++++--- .../router/RouterQuotaUpdateService.java | 9 +-- .../hdfs/server/federation/MockResolver.java | 17 +++--- .../router/TestRouterMountTable.java | 55 ++++++++++++++++++- .../router/TestRouterRpcMultiDestination.java | 5 +- 6 files changed, 97 insertions(+), 25 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java index 5aa5ec934cbe0..6432bb0e8c44d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java @@ -61,8 +61,10 @@ public interface FileSubclusterResolver { * cache. * * @param path Path to get the mount points under. - * @return List of mount points present at this path or zero-length list if - * none are found. + * @return List of mount points present at this path. Return zero-length + * list if the path is a mount point but there are no mount points + * under the path. Return null if the path is not a mount point + * and there are no mount points under the path. * @throws IOException Throws exception if the data is not available. */ List getMountPoints(String path) throws IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index d603947894368..2e0713fba2e0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -726,6 +726,9 @@ public HdfsFileStatus getFileInfo(String src) throws IOException { date = dates.get(src); } ret = getMountPointStatus(src, children.size(), date); + } else if (children != null) { + // The src is a mount point, but there are no files or directories + ret = getMountPointStatus(src, 0, 0); } } @@ -1734,13 +1737,26 @@ private HdfsFileStatus getMountPointStatus( FsPermission permission = FsPermission.getDirDefault(); String owner = this.superUser; String group = this.superGroup; - try { - // TODO support users, it should be the user for the pointed folder - UserGroupInformation ugi = RouterRpcServer.getRemoteUser(); - owner = ugi.getUserName(); - group = ugi.getPrimaryGroupName(); - } catch (IOException e) { - LOG.error("Cannot get the remote user: {}", e.getMessage()); + if (subclusterResolver instanceof MountTableResolver) { + try { + MountTableResolver mountTable = (MountTableResolver) subclusterResolver; + MountTable entry = mountTable.getMountPoint(name); + if (entry != null) { + permission = entry.getMode(); + owner = entry.getOwnerName(); + group = entry.getGroupName(); + } + } catch (IOException e) { + LOG.error("Cannot get mount point: {}", e.getMessage()); + } + } else { + try { + UserGroupInformation ugi = RouterRpcServer.getRemoteUser(); + owner = ugi.getUserName(); + group = ugi.getPrimaryGroupName(); + } catch (IOException e) { + LOG.error("Cannot get remote user: {}", e.getMessage()); + } } long inodeId = 0; return new HdfsFileStatus.Builder() diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java index 4813b535e3729..9bfd705efbefe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java @@ -87,11 +87,12 @@ protected void periodicInvoke() { QuotaUsage currentQuotaUsage = null; - // Check whether destination path exists in filesystem. If destination - // is not present, reset the usage. For other mount entry get current - // quota usage + // Check whether destination path exists in filesystem. When the + // mtime is zero, the destination is not present and reset the usage. + // This is because mount table does not have mtime. + // For other mount entry get current quota usage HdfsFileStatus ret = this.rpcServer.getFileInfo(src); - if (ret == null) { + if (ret == null || ret.getModificationTime() == 0) { currentQuotaUsage = new RouterQuotaUsage.Builder() .fileAndDirectoryCount(0) .quota(nsQuota) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java index f5636ceccd11f..9bff00732ee91 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java @@ -303,15 +303,16 @@ public PathLocation getDestinationForPath(String path) throws IOException { @Override public List getMountPoints(String path) throws IOException { + // Mounts only supported under root level + if (!path.equals("/")) { + return null; + } List mounts = new ArrayList<>(); - if (path.equals("/")) { - // Mounts only supported under root level - for (String mount : this.locations.keySet()) { - if (mount.length() > 1) { - // Remove leading slash, this is the behavior of the mount tree, - // return only names. - mounts.add(mount.replace("/", "")); - } + for (String mount : this.locations.keySet()) { + if (mount.length() > 1) { + // Remove leading slash, this is the behavior of the mount tree, + // return only names. + mounts.add(mount.replace("/", "")); } } return mounts; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java index 4d8ffe10fcdf5..d2b78d34edfa8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -43,8 +44,12 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.util.Time; +import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -59,9 +64,11 @@ public class TestRouterMountTable { private static RouterContext routerContext; private static MountTableResolver mountTable; private static ClientProtocol routerProtocol; + private static long startTime; @BeforeClass public static void globalSetUp() throws Exception { + startTime = Time.now(); // Build and start a federated cluster cluster = new StateStoreDFSCluster(false, 1); @@ -92,6 +99,21 @@ public static void tearDown() { } } + @After + public void clearMountTable() throws IOException { + RouterClient client = routerContext.getAdminClient(); + MountTableManager mountTableManager = client.getMountTableManager(); + GetMountTableEntriesRequest req1 = + GetMountTableEntriesRequest.newInstance("/"); + GetMountTableEntriesResponse response = + mountTableManager.getMountTableEntries(req1); + for (MountTable entry : response.getEntries()) { + RemoveMountTableEntryRequest req2 = + RemoveMountTableEntryRequest.newInstance(entry.getSourcePath()); + mountTableManager.removeMountTableEntry(req2); + } + } + @Test public void testReadOnly() throws Exception { @@ -157,7 +179,6 @@ private boolean addMountTable(final MountTable entry) throws IOException { */ @Test public void testListFilesTime() throws Exception { - Long beforeCreatingTime = Time.now(); // Add mount table entry MountTable addEntry = MountTable.newInstance( "/testdir", Collections.singletonMap("ns0", "/testdir")); @@ -211,10 +232,40 @@ public void testListFilesTime() throws Exception { Long expectedTime = pathModTime.get(currentFile); assertEquals(currentFile, fileName); - assertTrue(currentTime > beforeCreatingTime); + assertTrue(currentTime > startTime); assertEquals(currentTime, expectedTime); } // Verify the total number of results found/matched assertEquals(pathModTime.size(), listing.getPartialListing().length); } + + /** + * Verify that the file listing contains correct permission. + */ + @Test + public void testMountTablePermissions() throws Exception { + // Add mount table entries + MountTable addEntry = MountTable.newInstance( + "/testdir1", Collections.singletonMap("ns0", "/testdir1")); + addEntry.setGroupName("group1"); + addEntry.setOwnerName("owner1"); + addEntry.setMode(FsPermission.createImmutable((short)0775)); + assertTrue(addMountTable(addEntry)); + addEntry = MountTable.newInstance( + "/testdir2", Collections.singletonMap("ns0", "/testdir2")); + addEntry.setGroupName("group2"); + addEntry.setOwnerName("owner2"); + addEntry.setMode(FsPermission.createImmutable((short)0755)); + assertTrue(addMountTable(addEntry)); + + HdfsFileStatus fs = routerProtocol.getFileInfo("/testdir1"); + assertEquals("group1", fs.getGroup()); + assertEquals("owner1", fs.getOwner()); + assertEquals((short) 0775, fs.getPermission().toShort()); + + fs = routerProtocol.getFileInfo("/testdir2"); + assertEquals("group2", fs.getGroup()); + assertEquals("owner2", fs.getOwner()); + assertEquals((short) 0755, fs.getPermission().toShort()); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 7e0976016be23..94b712f534e05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -123,8 +123,9 @@ private void testListing(String path) throws IOException { RouterContext rc = getRouterContext(); Router router = rc.getRouter(); FileSubclusterResolver subclusterResolver = router.getSubclusterResolver(); - for (String mount : subclusterResolver.getMountPoints(path)) { - requiredPaths.add(mount); + List mountList = subclusterResolver.getMountPoints(path); + if (mountList != null) { + requiredPaths.addAll(mountList); } // Get files/dirs from the Namenodes From c5065bf20b410b917a0df219e45be2cce21a48ab Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Tue, 30 Oct 2018 11:21:08 +0530 Subject: [PATCH 0264/1308] HDFS-13845. RBF: The default MountTableResolver should fail resolving multi-destination paths. Contributed by yanghuafeng. --- .../resolver/MountTableResolver.java | 15 +++++-- .../resolver/TestMountTableResolver.java | 45 +++++++++++++++---- .../router/TestDisableNameservices.java | 36 +++++++++------ 3 files changed, 70 insertions(+), 26 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index 121469fb8047a..9e69840af9c73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -539,21 +539,28 @@ public String toString() { * @param entry Mount table entry. * @return PathLocation containing the namespace, local path. */ - private static PathLocation buildLocation( - final String path, final MountTable entry) { - + private PathLocation buildLocation( + final String path, final MountTable entry) throws IOException { String srcPath = entry.getSourcePath(); if (!path.startsWith(srcPath)) { LOG.error("Cannot build location, {} not a child of {}", path, srcPath); return null; } + + List dests = entry.getDestinations(); + if (getClass() == MountTableResolver.class && dests.size() > 1) { + throw new IOException("Cannnot build location, " + + getClass().getSimpleName() + + " should not resolve multiple destinations for " + path); + } + String remainingPath = path.substring(srcPath.length()); if (remainingPath.startsWith(Path.SEPARATOR)) { remainingPath = remainingPath.substring(1); } List locations = new LinkedList<>(); - for (RemoteLocation oneDst : entry.getDestinations()) { + for (RemoteLocation oneDst : dests) { String nsId = oneDst.getNameserviceId(); String dest = oneDst.getDest(); String newPath = dest; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java index 5e3b861df2374..14ccb6112b9a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java @@ -79,6 +79,8 @@ private Map getMountTableEntry( * __usr * ____bin -> 2:/bin * __readonly -> 2:/tmp + * __multi -> 5:/dest1 + * 6:/dest2 * * @throws IOException If it cannot set the mount table. */ @@ -126,6 +128,12 @@ private void setupMountTable() throws IOException { MountTable readOnlyEntry = MountTable.newInstance("/readonly", map); readOnlyEntry.setReadOnly(true); mountTable.addEntry(readOnlyEntry); + + // /multi + map = getMountTableEntry("5", "/dest1"); + map.put("6", "/dest2"); + MountTable multiEntry = MountTable.newInstance("/multi", map); + mountTable.addEntry(multiEntry); } @Before @@ -201,6 +209,17 @@ public void testDefaultNameServiceEnable() throws IOException { } } + @Test + public void testMuiltipleDestinations() throws IOException { + try { + mountTable.getDestinationForPath("/multi"); + fail("The getDestinationForPath call should fail."); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains( + "MountTableResolver should not resolve multiple destinations", ioe); + } + } + private void compareLists(List list1, String[] list2) { assertEquals(list1.size(), list2.length); for (String item : list2) { @@ -236,8 +255,9 @@ public void testGetMountPoints() throws IOException { // Check getting all mount points (virtual and real) beneath a path List mounts = mountTable.getMountPoints("/"); - assertEquals(4, mounts.size()); - compareLists(mounts, new String[] {"tmp", "user", "usr", "readonly"}); + assertEquals(5, mounts.size()); + compareLists(mounts, new String[] {"tmp", "user", "usr", + "readonly", "multi"}); mounts = mountTable.getMountPoints("/user"); assertEquals(2, mounts.size()); @@ -263,6 +283,9 @@ public void testGetMountPoints() throws IOException { mounts = mountTable.getMountPoints("/unknownpath"); assertNull(mounts); + + mounts = mountTable.getMountPoints("/multi"); + assertEquals(0, mounts.size()); } private void compareRecords(List list1, String[] list2) { @@ -282,10 +305,10 @@ public void testGetMounts() throws IOException { // Check listing the mount table records at or beneath a path List records = mountTable.getMounts("/"); - assertEquals(9, records.size()); + assertEquals(10, records.size()); compareRecords(records, new String[] {"/", "/tmp", "/user", "/usr/bin", "user/a", "/user/a/demo/a", "/user/a/demo/b", "/user/b/file1.txt", - "readonly"}); + "readonly", "multi"}); records = mountTable.getMounts("/user"); assertEquals(5, records.size()); @@ -305,6 +328,10 @@ public void testGetMounts() throws IOException { assertEquals(1, records.size()); compareRecords(records, new String[] {"/readonly"}); assertTrue(records.get(0).isReadOnly()); + + records = mountTable.getMounts("/multi"); + assertEquals(1, records.size()); + compareRecords(records, new String[] {"/multi"}); } @Test @@ -313,7 +340,7 @@ public void testRemoveSubTree() // 3 mount points are present /tmp, /user, /usr compareLists(mountTable.getMountPoints("/"), - new String[] {"user", "usr", "tmp", "readonly"}); + new String[] {"user", "usr", "tmp", "readonly", "multi"}); // /tmp currently points to namespace 2 assertEquals("2", mountTable.getDestinationForPath("/tmp/testfile.txt") @@ -324,7 +351,7 @@ public void testRemoveSubTree() // Now 2 mount points are present /user, /usr compareLists(mountTable.getMountPoints("/"), - new String[] {"user", "usr", "readonly"}); + new String[] {"user", "usr", "readonly", "multi"}); // /tmp no longer exists, uses default namespace for mapping / assertEquals("1", mountTable.getDestinationForPath("/tmp/testfile.txt") @@ -337,7 +364,7 @@ public void testRemoveVirtualNode() // 3 mount points are present /tmp, /user, /usr compareLists(mountTable.getMountPoints("/"), - new String[] {"user", "usr", "tmp", "readonly"}); + new String[] {"user", "usr", "tmp", "readonly", "multi"}); // /usr is virtual, uses namespace 1->/ assertEquals("1", mountTable.getDestinationForPath("/usr/testfile.txt") @@ -348,7 +375,7 @@ public void testRemoveVirtualNode() // Verify the remove failed compareLists(mountTable.getMountPoints("/"), - new String[] {"user", "usr", "tmp", "readonly"}); + new String[] {"user", "usr", "tmp", "readonly", "multi"}); } @Test @@ -380,7 +407,7 @@ public void testRefreshEntries() // Initial table loaded testDestination(); - assertEquals(9, mountTable.getMounts("/").size()); + assertEquals(10, mountTable.getMounts("/").size()); // Replace table with /1 and /2 List records = new ArrayList<>(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java index 15b104df21514..610927d6c29c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.util.Time.monotonicNow; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; import java.util.Iterator; @@ -43,13 +44,13 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; -import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; import org.apache.hadoop.hdfs.server.federation.store.DisabledNameserviceStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceRequest; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.test.GenericTestUtils; import org.codehaus.jettison.json.JSONObject; import org.junit.After; import org.junit.AfterClass; @@ -106,14 +107,18 @@ private static void setupNamespace() throws IOException { // Setup a mount table to map to the two namespaces MountTableManager mountTable = routerAdminClient.getMountTableManager(); Map destinations = new TreeMap<>(); - destinations.put("ns0", "/"); - destinations.put("ns1", "/"); - MountTable newEntry = MountTable.newInstance("/", destinations); - newEntry.setDestOrder(DestinationOrder.RANDOM); + destinations.put("ns0", "/dirns0"); + MountTable newEntry = MountTable.newInstance("/dirns0", destinations); AddMountTableEntryRequest request = AddMountTableEntryRequest.newInstance(newEntry); mountTable.addMountTableEntry(request); + destinations = new TreeMap<>(); + destinations.put("ns1", "/dirns1"); + newEntry = MountTable.newInstance("/dirns1", destinations); + request = AddMountTableEntryRequest.newInstance(newEntry); + mountTable.addMountTableEntry(request); + // Refresh the cache in the Router Router router = routerContext.getRouter(); MountTableResolver mountTableResolver = @@ -122,9 +127,9 @@ private static void setupNamespace() throws IOException { // Add a folder to each namespace NamenodeContext nn0 = cluster.getNamenode("ns0", null); - nn0.getFileSystem().mkdirs(new Path("/dirns0")); + nn0.getFileSystem().mkdirs(new Path("/dirns0/0")); NamenodeContext nn1 = cluster.getNamenode("ns1", null); - nn1.getFileSystem().mkdirs(new Path("/dirns1")); + nn1.getFileSystem().mkdirs(new Path("/dirns1/1")); } @AfterClass @@ -153,14 +158,12 @@ public void cleanup() throws IOException { @Test public void testWithoutDisabling() throws IOException { - // ns0 is slow and renewLease should take a long time long t0 = monotonicNow(); routerProtocol.renewLease("client0"); long t = monotonicNow() - t0; assertTrue("It took too little: " + t + "ms", t > TimeUnit.SECONDS.toMillis(1)); - // Return the results from all subclusters even if slow FileSystem routerFs = routerContext.getFileSystem(); FileStatus[] filesStatus = routerFs.listStatus(new Path("/")); @@ -171,7 +174,6 @@ public void testWithoutDisabling() throws IOException { @Test public void testDisabling() throws Exception { - disableNameservice("ns0"); // renewLease should be fast as we are skipping ns0 @@ -180,12 +182,20 @@ public void testDisabling() throws Exception { long t = monotonicNow() - t0; assertTrue("It took too long: " + t + "ms", t < TimeUnit.SECONDS.toMillis(1)); - // We should not report anything from ns0 FileSystem routerFs = routerContext.getFileSystem(); - FileStatus[] filesStatus = routerFs.listStatus(new Path("/")); + FileStatus[] filesStatus = null; + try { + routerFs.listStatus(new Path("/")); + fail("The listStatus call should fail."); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains( + "No remote locations available", ioe); + } + + filesStatus = routerFs.listStatus(new Path("/dirns1")); assertEquals(1, filesStatus.length); - assertEquals("dirns1", filesStatus[0].getPath().getName()); + assertEquals("1", filesStatus[0].getPath().getName()); } @Test From 7b0bc492edbbd30c6945c89d8bc5ad0f6cf42b76 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 1 Nov 2018 11:49:33 -0700 Subject: [PATCH 0265/1308] HDFS-14024. RBF: ProvidedCapacityTotal json exception in NamenodeHeartbeatService. Contributed by CR Hota. --- .../hdfs/server/federation/router/NamenodeHeartbeatService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java index a1adf77bd50ca..1349aa3056c2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java @@ -351,7 +351,7 @@ private void updateJMXParameters( jsonObject.getLong("PendingReplicationBlocks"), jsonObject.getLong("UnderReplicatedBlocks"), jsonObject.getLong("PendingDeletionBlocks"), - jsonObject.getLong("ProvidedCapacityTotal")); + jsonObject.optLong("ProvidedCapacityTotal")); } } } From 6f2c871b05b97ea0e18f3c431af8b0d606f88561 Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Wed, 7 Nov 2018 07:33:37 +0530 Subject: [PATCH 0266/1308] HDFS-12284. RBF: Support for Kerberos authentication. Contributed by Sherwood Zheng and Inigo Goiri. --- hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml | 10 ++ .../federation/router/RBFConfigKeys.java | 11 ++ .../hdfs/server/federation/router/Router.java | 28 ++++ .../federation/router/RouterAdminServer.java | 7 + .../federation/router/RouterHttpServer.java | 5 +- .../federation/router/RouterRpcClient.java | 9 +- .../federation/router/RouterRpcServer.java | 12 ++ .../src/main/resources/hdfs-rbf-default.xml | 47 ++++++ .../contract/router/RouterHDFSContract.java | 9 +- .../fs/contract/router/SecurityConfUtil.java | 156 ++++++++++++++++++ .../TestRouterHDFSContractAppendSecure.java | 46 ++++++ .../TestRouterHDFSContractConcatSecure.java | 51 ++++++ .../TestRouterHDFSContractCreateSecure.java | 48 ++++++ .../TestRouterHDFSContractDeleteSecure.java | 46 ++++++ ...RouterHDFSContractGetFileStatusSecure.java | 47 ++++++ .../TestRouterHDFSContractMkdirSecure.java | 48 ++++++ .../TestRouterHDFSContractOpenSecure.java | 47 ++++++ .../TestRouterHDFSContractRenameSecure.java | 48 ++++++ ...RouterHDFSContractRootDirectorySecure.java | 63 +++++++ .../TestRouterHDFSContractSeekSecure.java | 48 ++++++ .../TestRouterHDFSContractSetTimesSecure.java | 48 ++++++ .../federation/MiniRouterDFSCluster.java | 58 ++++++- 22 files changed, 879 insertions(+), 13 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index 8c76214166c8d..400904365992b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -34,6 +34,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.bouncycastle + bcprov-jdk16 + test + + + org.apache.hadoop + hadoop-minikdc + test + org.apache.hadoop hadoop-common diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java index bbd4250b268c7..fa474f44dc8e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java @@ -242,4 +242,15 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic { FEDERATION_ROUTER_PREFIX + "quota-cache.update.interval"; public static final long DFS_ROUTER_QUOTA_CACHE_UPATE_INTERVAL_DEFAULT = 60000; + + // HDFS Router security + public static final String DFS_ROUTER_KEYTAB_FILE_KEY = + FEDERATION_ROUTER_PREFIX + "keytab.file"; + public static final String DFS_ROUTER_KERBEROS_PRINCIPAL_KEY = + FEDERATION_ROUTER_PREFIX + "kerberos.principal"; + public static final String DFS_ROUTER_KERBEROS_PRINCIPAL_HOSTNAME_KEY = + FEDERATION_ROUTER_PREFIX + "kerberos.principal.hostname"; + + public static final String DFS_ROUTER_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY = + FEDERATION_ROUTER_PREFIX + "kerberos.internal.spnego.principal"; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 5ddc1297a6bd8..32882735ebf61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_KERBEROS_PRINCIPAL_HOSTNAME_KEY; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_KEYTAB_FILE_KEY; + import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.newActiveNamenodeResolver; import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.newFileSubclusterResolver; @@ -41,6 +45,8 @@ import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.Time; @@ -145,6 +151,11 @@ protected void serviceInit(Configuration configuration) throws Exception { this.conf = configuration; updateRouterState(RouterServiceState.INITIALIZING); + // Enable the security for the Router + UserGroupInformation.setConfiguration(conf); + SecurityUtil.login(conf, DFS_ROUTER_KEYTAB_FILE_KEY, + DFS_ROUTER_KERBEROS_PRINCIPAL_KEY, getHostName(conf)); + if (conf.getBoolean( RBFConfigKeys.DFS_ROUTER_STORE_ENABLE, RBFConfigKeys.DFS_ROUTER_STORE_ENABLE_DEFAULT)) { @@ -246,6 +257,23 @@ protected void serviceInit(Configuration configuration) throws Exception { super.serviceInit(conf); } + /** + * Returns the hostname for this Router. If the hostname is not + * explicitly configured in the given config, then it is determined. + * + * @param config configuration + * @return the hostname (NB: may not be a FQDN) + * @throws UnknownHostException if the hostname cannot be determined + */ + private static String getHostName(Configuration config) + throws UnknownHostException { + String name = config.get(DFS_ROUTER_KERBEROS_PRINCIPAL_HOSTNAME_KEY); + if (name == null) { + name = InetAddress.getLocalHost().getHostName(); + } + return name; + } + @Override protected void serviceStart() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index e7fec9e7dcd5b..f34dc419eb63e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY; @@ -27,6 +28,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService; @@ -142,6 +144,11 @@ public RouterAdminServer(Configuration conf, Router router) .setVerbose(false) .build(); + // Set service-level authorization security policy + if (conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false)) { + this.adminServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); + } + // The RPC-server port can be ephemeral... ensure we have the correct info InetSocketAddress listenAddress = this.adminServer.getListenerAddress(); this.adminAddress = new InetSocketAddress( diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java index d223e2a2f8e62..d6a51465038c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java @@ -20,7 +20,6 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer; @@ -84,8 +83,8 @@ protected void serviceStart() throws Exception { String webApp = "router"; HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN( this.conf, this.httpAddress, this.httpsAddress, webApp, - DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, - DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY); + RBFConfigKeys.DFS_ROUTER_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, + RBFConfigKeys.DFS_ROUTER_KEYTAB_FILE_KEY); this.httpServer = builder.build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index 34f51ec3fecf0..a21e9802c7c25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -255,7 +255,14 @@ private ConnectionContext getConnection(UserGroupInformation ugi, String nsId, // for each individual request. // TODO Add tokens from the federated UGI - connection = this.connectionManager.getConnection(ugi, rpcAddress, proto); + UserGroupInformation connUGI = ugi; + if (UserGroupInformation.isSecurityEnabled()) { + UserGroupInformation routerUser = UserGroupInformation.getLoginUser(); + connUGI = UserGroupInformation.createProxyUser( + ugi.getUserName(), routerUser); + } + connection = this.connectionManager.getConnection( + connUGI, rpcAddress, proto); LOG.debug("User {} NN {} is using connection {}", ugi.getUserName(), rpcAddress, connection); } catch (Exception ex) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 36d3c81e169bd..fcb35f41271f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HANDLER_COUNT_DEFAULT; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT; @@ -61,6 +62,7 @@ import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.hdfs.AddBlockFlag; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.inotify.EventBatchList; import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; @@ -175,6 +177,9 @@ public class RouterRpcServer extends AbstractService /** Monitor metrics for the RPC calls. */ private final RouterRpcMonitor rpcMonitor; + /** If we use authentication for the connections. */ + private final boolean serviceAuthEnabled; + /** Interface to identify the active NN for a nameservice or blockpool ID. */ private final ActiveNamenodeResolver namenodeResolver; @@ -266,6 +271,13 @@ public RouterRpcServer(Configuration configuration, Router router, DFSUtil.addPBProtocol( conf, NamenodeProtocolPB.class, nnPbService, this.rpcServer); + // Set service-level authorization security policy + this.serviceAuthEnabled = conf.getBoolean( + HADOOP_SECURITY_AUTHORIZATION, false); + if (this.serviceAuthEnabled) { + rpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); + } + // We don't want the server to log the full stack trace for some exceptions this.rpcServer.addTerseExceptions( RemoteException.class, diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml index 3f560431ac77f..29c3093cb644a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml @@ -465,4 +465,51 @@ + + dfs.federation.router.keytab.file + + + The keytab file used by router to login as its + service principal. The principal name is configured with + dfs.federation.router.kerberos.principal. + + + + + dfs.federation.router.kerberos.principal + + + The Router service principal. This is typically set to + router/_HOST@REALM.TLD. Each Router will substitute _HOST with its + own fully qualified hostname at startup. The _HOST placeholder + allows using the same configuration setting on both Router + in an HA setup. + + + + + dfs.federation.router.kerberos.principal.hostname + + + Optional. The hostname for the Router containing this + configuration file. Will be different for each machine. + Defaults to current hostname. + + + + + dfs.federation.router.kerberos.internal.spnego.principal + ${dfs.web.authentication.kerberos.principal} + + The server principal used by the Router for web UI SPNEGO + authentication when Kerberos security is enabled. This is + typically set to HTTP/_HOST@REALM.TLD The SPNEGO server principal + begins with the prefix HTTP/ by convention. + + If the value is '*', the web server will attempt to login with + every principal specified in the keytab file + dfs.web.authentication.kerberos.keytab. + + + \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java index 97a426e2c2a8c..510cb95ee19f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java @@ -43,12 +43,17 @@ public RouterHDFSContract(Configuration conf) { } public static void createCluster() throws IOException { + createCluster(null); + } + + public static void createCluster(Configuration conf) throws IOException { try { - cluster = new MiniRouterDFSCluster(true, 2); + cluster = new MiniRouterDFSCluster(true, 2, conf); // Start NNs and DNs and wait until ready - cluster.startCluster(); + cluster.startCluster(conf); + cluster.addRouterOverrides(conf); // Start routers with only an RPC service cluster.startRouters(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java new file mode 100644 index 0000000000000..deb6ace16b16a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java @@ -0,0 +1,156 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hadoop.fs.contract.router; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SERVICE_USER_NAME_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.Properties; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; +import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; +import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl; +import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.ssl.KeyStoreTestUtil; +import org.apache.hadoop.test.GenericTestUtils; + +/** + * Test utility to provide a standard routine to initialize the configuration + * for secure RBF HDFS cluster. + */ +public final class SecurityConfUtil { + + // SSL keystore + private static String keystoresDir; + private static String sslConfDir; + + // State string for mini dfs + private static final String SPNEGO_USER_NAME = "HTTP"; + private static final String ROUTER_USER_NAME = "router"; + + private static String spnegoPrincipal; + private static String routerPrincipal; + + private SecurityConfUtil() { + // Utility Class + } + + public static Configuration initSecurity() throws Exception { + // delete old test dir + File baseDir = GenericTestUtils.getTestDir( + SecurityConfUtil.class.getSimpleName()); + FileUtil.fullyDelete(baseDir); + assertTrue(baseDir.mkdirs()); + + // start a mini kdc with default conf + Properties kdcConf = MiniKdc.createConf(); + MiniKdc kdc = new MiniKdc(kdcConf, baseDir); + kdc.start(); + + Configuration conf = new HdfsConfiguration(); + SecurityUtil.setAuthenticationMethod( + UserGroupInformation.AuthenticationMethod.KERBEROS, conf); + + UserGroupInformation.setConfiguration(conf); + assertTrue("Expected configuration to enable security", + UserGroupInformation.isSecurityEnabled()); + + // Setup the keytab + File keytabFile = new File(baseDir, "test.keytab"); + String keytab = keytabFile.getAbsolutePath(); + + // Windows will not reverse name lookup "127.0.0.1" to "localhost". + String krbInstance = Path.WINDOWS ? "127.0.0.1" : "localhost"; + + kdc.createPrincipal(keytabFile, + SPNEGO_USER_NAME + "/" + krbInstance, + ROUTER_USER_NAME + "/" + krbInstance); + + routerPrincipal = + ROUTER_USER_NAME + "/" + krbInstance + "@" + kdc.getRealm(); + spnegoPrincipal = + SPNEGO_USER_NAME + "/" + krbInstance + "@" + kdc.getRealm(); + + // Set auth configuration for mini DFS + conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + conf.set(HADOOP_SECURITY_SERVICE_USER_NAME_KEY, routerPrincipal); + + // Setup principles and keytabs for dfs + conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, routerPrincipal); + conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); + conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, routerPrincipal); + conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab); + conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal); + conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, keytab); + + conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); + conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); + + conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); + conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication"); + conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); + + // Setup SSL configuration + keystoresDir = baseDir.getAbsolutePath(); + sslConfDir = KeyStoreTestUtil.getClasspathDir( + SecurityConfUtil.class); + KeyStoreTestUtil.setupSSLConfig( + keystoresDir, sslConfDir, conf, false); + conf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY, + KeyStoreTestUtil.getClientSSLConfigFileName()); + conf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, + KeyStoreTestUtil.getServerSSLConfigFileName()); + + // Setup principals and keytabs for router + conf.set(DFS_ROUTER_KEYTAB_FILE_KEY, keytab); + conf.set(DFS_ROUTER_KERBEROS_PRINCIPAL_KEY, routerPrincipal); + conf.set(DFS_ROUTER_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, "*"); + + // Setup basic state store + conf.setClass(RBFConfigKeys.FEDERATION_STORE_DRIVER_CLASS, + StateStoreFileImpl.class, StateStoreDriver.class); + + // We need to specify the host to prevent 0.0.0.0 as the host address + conf.set(DFS_ROUTER_RPC_BIND_HOST_KEY, "localhost"); + + return conf; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java new file mode 100644 index 0000000000000..fe4951db0b1c1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java @@ -0,0 +1,46 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hadoop.fs.contract.router; + +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractAppendTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; + +/** + * Test secure append operations on the Router-based FS. + */ +public class TestRouterHDFSContractAppendSecure + extends AbstractContractAppendTest { + + @BeforeClass + public static void createCluster() throws Exception { + RouterHDFSContract.createCluster(initSecurity()); + } + + @AfterClass + public static void teardownCluster() throws IOException { + RouterHDFSContract.destroyCluster(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RouterHDFSContract(conf); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java new file mode 100644 index 0000000000000..c9a0cc8f215ce --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java @@ -0,0 +1,51 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hadoop.fs.contract.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.AbstractContractConcatTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; + +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; + + +/** + * Test secure concat operations on the Router-based FS. + */ +public class TestRouterHDFSContractConcatSecure + extends AbstractContractConcatTest { + + @BeforeClass + public static void createCluster() throws Exception { + RouterHDFSContract.createCluster(initSecurity()); + // perform a simple operation on the cluster to verify it is up + RouterHDFSContract.getFileSystem().getDefaultBlockSize(new Path("/")); + } + + @AfterClass + public static void teardownCluster() throws IOException { + RouterHDFSContract.destroyCluster(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RouterHDFSContract(conf); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java new file mode 100644 index 0000000000000..dc264b0d59279 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java @@ -0,0 +1,48 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hadoop.fs.contract.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractCreateTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; + +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; + + +/** + * Test secure create operations on the Router-based FS. + */ +public class TestRouterHDFSContractCreateSecure + extends AbstractContractCreateTest { + + @BeforeClass + public static void createCluster() throws Exception { + RouterHDFSContract.createCluster(initSecurity()); + } + + @AfterClass + public static void teardownCluster() throws IOException { + RouterHDFSContract.destroyCluster(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RouterHDFSContract(conf); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java new file mode 100644 index 0000000000000..57cc138618dad --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java @@ -0,0 +1,46 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hadoop.fs.contract.router; + +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; + +/** + * Test secure delete operations on the Router-based FS. + */ +public class TestRouterHDFSContractDeleteSecure + extends AbstractContractDeleteTest { + + @BeforeClass + public static void createCluster() throws Exception { + RouterHDFSContract.createCluster(initSecurity()); + } + + @AfterClass + public static void teardownCluster() throws IOException { + RouterHDFSContract.destroyCluster(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RouterHDFSContract(conf); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java new file mode 100644 index 0000000000000..13e4e9661f7cf --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java @@ -0,0 +1,47 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hadoop.fs.contract.router; + +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; + + +/** + * Test secure get file status operations on the Router-based FS. + */ +public class TestRouterHDFSContractGetFileStatusSecure + extends AbstractContractGetFileStatusTest { + + @BeforeClass + public static void createCluster() throws Exception { + RouterHDFSContract.createCluster(initSecurity()); + } + + @AfterClass + public static void teardownCluster() throws IOException { + RouterHDFSContract.destroyCluster(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RouterHDFSContract(conf); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java new file mode 100644 index 0000000000000..7c223a6704a2d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java @@ -0,0 +1,48 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hadoop.fs.contract.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; + +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; + + +/** + * Test secure dir operations on the Router-based FS. + */ +public class TestRouterHDFSContractMkdirSecure + extends AbstractContractMkdirTest { + + @BeforeClass + public static void createCluster() throws Exception { + RouterHDFSContract.createCluster(initSecurity()); + } + + @AfterClass + public static void teardownCluster() throws IOException { + RouterHDFSContract.destroyCluster(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RouterHDFSContract(conf); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java new file mode 100644 index 0000000000000..434402c587dde --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java @@ -0,0 +1,47 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hadoop.fs.contract.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractOpenTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; + +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; + + +/** + * Test secure open operations on the Router-based FS. + */ +public class TestRouterHDFSContractOpenSecure extends AbstractContractOpenTest { + + @BeforeClass + public static void createCluster() throws Exception { + RouterHDFSContract.createCluster(initSecurity()); + } + + @AfterClass + public static void teardownCluster() throws IOException { + RouterHDFSContract.destroyCluster(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RouterHDFSContract(conf); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java new file mode 100644 index 0000000000000..29d7398884b43 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java @@ -0,0 +1,48 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hadoop.fs.contract.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractRenameTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; + +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; + + +/** + * Test secure rename operations on the Router-based FS. + */ +public class TestRouterHDFSContractRenameSecure + extends AbstractContractRenameTest { + + @BeforeClass + public static void createCluster() throws Exception { + RouterHDFSContract.createCluster(initSecurity()); + } + + @AfterClass + public static void teardownCluster() throws IOException { + RouterHDFSContract.destroyCluster(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RouterHDFSContract(conf); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java new file mode 100644 index 0000000000000..32ec1617d1fec --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java @@ -0,0 +1,63 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hadoop.fs.contract.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; + +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; + + +/** + * Test secure root dir operations on the Router-based FS. + */ +public class TestRouterHDFSContractRootDirectorySecure + extends AbstractContractRootDirectoryTest { + + @BeforeClass + public static void createCluster() throws Exception { + RouterHDFSContract.createCluster(initSecurity()); + } + + @AfterClass + public static void teardownCluster() throws IOException { + RouterHDFSContract.destroyCluster(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RouterHDFSContract(conf); + } + + @Override + public void testListEmptyRootDirectory() throws IOException { + // It doesn't apply because we still have the mount points here + } + + @Override + public void testRmEmptyRootDirNonRecursive() throws IOException { + // It doesn't apply because we still have the mount points here + } + + @Override + public void testRecursiveRootListing() throws IOException { + // It doesn't apply because we still have the mount points here + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java new file mode 100644 index 0000000000000..f281b472c7c7d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java @@ -0,0 +1,48 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hadoop.fs.contract.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractSeekTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; + +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; + + +/** + * Test secure seek operations on the Router-based FS. + */ +public class TestRouterHDFSContractSeekSecure extends AbstractContractSeekTest { + + @BeforeClass + public static void createCluster() throws Exception { + RouterHDFSContract.createCluster(initSecurity()); + } + + @AfterClass + public static void teardownCluster() throws IOException { + RouterHDFSContract.destroyCluster(); + } + + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RouterHDFSContract(conf); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java new file mode 100644 index 0000000000000..8f86b951a5043 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java @@ -0,0 +1,48 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hadoop.fs.contract.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractSetTimesTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; + +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; + + +/** + * Test secure set times operations on the Router-based FS. + */ +public class TestRouterHDFSContractSetTimesSecure + extends AbstractContractSetTimesTest { + + @BeforeClass + public static void createCluster() throws Exception { + RouterHDFSContract.createCluster(initSecurity()); + } + + @AfterClass + public static void teardownCluster() throws IOException { + RouterHDFSContract.destroyCluster(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RouterHDFSContract(conf); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java index e34713d665afe..a5693a6f2c367 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java @@ -28,6 +28,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.addDirectory; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.waitNamenodeRegistered; @@ -85,6 +87,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.Service.STATE; @@ -270,6 +273,7 @@ public class NamenodeContext { private int servicePort; private int lifelinePort; private int httpPort; + private int httpsPort; private URI fileSystemUri; private int index; private DFSClient client; @@ -305,7 +309,12 @@ public void setNamenode(NameNode nn) throws URISyntaxException { this.rpcPort = nn.getNameNodeAddress().getPort(); this.servicePort = nn.getServiceRpcAddress().getPort(); this.lifelinePort = nn.getServiceRpcAddress().getPort(); - this.httpPort = nn.getHttpAddress().getPort(); + if (nn.getHttpAddress() != null) { + this.httpPort = nn.getHttpAddress().getPort(); + } + if (nn.getHttpsAddress() != null) { + this.httpsPort = nn.getHttpsAddress().getPort(); + } this.fileSystemUri = new URI("hdfs://" + namenode.getHostAndPort()); DistributedFileSystem.setDefaultUri(this.conf, this.fileSystemUri); @@ -328,10 +337,22 @@ public String getLifelineAddress() { return namenode.getServiceRpcAddress().getHostName() + ":" + lifelinePort; } + public String getWebAddress() { + if (conf.get(DFS_HTTP_POLICY_KEY) + .equals(HttpConfig.Policy.HTTPS_ONLY.name())) { + return getHttpsAddress(); + } + return getHttpAddress(); + } + public String getHttpAddress() { return namenode.getHttpAddress().getHostName() + ":" + httpPort; } + public String getHttpsAddress() { + return namenode.getHttpsAddress().getHostName() + ":" + httpsPort; + } + public FileSystem getFileSystem() throws IOException { return DistributedFileSystem.get(conf); } @@ -375,22 +396,38 @@ public Configuration getConf() { public MiniRouterDFSCluster( boolean ha, int numNameservices, int numNamenodes, - long heartbeatInterval, long cacheFlushInterval) { + long heartbeatInterval, long cacheFlushInterval, + Configuration overrideConf) { this.highAvailability = ha; this.heartbeatInterval = heartbeatInterval; this.cacheFlushInterval = cacheFlushInterval; - configureNameservices(numNameservices, numNamenodes); + configureNameservices(numNameservices, numNamenodes, overrideConf); + } + + public MiniRouterDFSCluster( + boolean ha, int numNameservices, int numNamenodes, + long heartbeatInterval, long cacheFlushInterval) { + this(ha, numNameservices, numNamenodes, + heartbeatInterval, cacheFlushInterval, null); } public MiniRouterDFSCluster(boolean ha, int numNameservices) { this(ha, numNameservices, 2, - DEFAULT_HEARTBEAT_INTERVAL_MS, DEFAULT_CACHE_INTERVAL_MS); + DEFAULT_HEARTBEAT_INTERVAL_MS, DEFAULT_CACHE_INTERVAL_MS, + null); } public MiniRouterDFSCluster( boolean ha, int numNameservices, int numNamenodes) { this(ha, numNameservices, numNamenodes, - DEFAULT_HEARTBEAT_INTERVAL_MS, DEFAULT_CACHE_INTERVAL_MS); + DEFAULT_HEARTBEAT_INTERVAL_MS, DEFAULT_CACHE_INTERVAL_MS, + null); + } + + public MiniRouterDFSCluster(boolean ha, int numNameservices, + Configuration overrideConf) { + this(ha, numNameservices, 2, + DEFAULT_HEARTBEAT_INTERVAL_MS, DEFAULT_CACHE_INTERVAL_MS, overrideConf); } /** @@ -447,6 +484,8 @@ public Configuration generateNamenodeConfiguration(String nsId) { "127.0.0.1:" + context.httpPort); conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY + "." + suffix, "0.0.0.0"); + conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY + "." + suffix, + "127.0.0.1:" + context.httpsPort); // If the service port is enabled by default, we need to set them up boolean servicePortEnabled = false; @@ -543,7 +582,8 @@ public Configuration generateRouterConfiguration(String nsId, String nnId) { return conf; } - public void configureNameservices(int numNameservices, int numNamenodes) { + public void configureNameservices(int numNameservices, int numNamenodes, + Configuration overrideConf) { this.nameservices = new ArrayList<>(); this.namenodes = new ArrayList<>(); @@ -554,6 +594,10 @@ public void configureNameservices(int numNameservices, int numNamenodes) { this.nameservices.add("ns" + i); Configuration nnConf = generateNamenodeConfiguration(ns); + if (overrideConf != null) { + nnConf.addResource(overrideConf); + } + if (!highAvailability) { context = new NamenodeContext(nnConf, ns, null, nnIndex++); this.namenodes.add(context); @@ -788,7 +832,7 @@ public void registerNamenodes() throws IOException { NamenodeStatusReport report = new NamenodeStatusReport( nn.nameserviceId, nn.namenodeId, nn.getRpcAddress(), nn.getServiceAddress(), - nn.getLifelineAddress(), nn.getHttpAddress()); + nn.getLifelineAddress(), nn.getWebAddress()); FSImage fsImage = nn.namenode.getNamesystem().getFSImage(); NamespaceInfo nsInfo = fsImage.getStorage().getNamespaceInfo(); report.setNamespaceInfo(nsInfo); From ebfd2d8a4e0214a4fff512ffe00e98a721ae49c8 Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Wed, 7 Nov 2018 07:37:02 +0530 Subject: [PATCH 0267/1308] HDFS-12284. addendum to HDFS-12284. Contributed by Inigo Goiri. --- hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index 400904365992b..e47a204047ac0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -36,7 +36,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle - bcprov-jdk16 + bcprov-jdk15on test From 04caaba4884cdea9f3b97f819fe6599ab3d6f151 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Tue, 13 Nov 2018 10:14:35 -0800 Subject: [PATCH 0268/1308] HDFS-13852. RBF: The DN_REPORT_TIME_OUT and DN_REPORT_CACHE_EXPIRE should be configured in RBFConfigKeys. Contributed by yanghuafeng. --- .../federation/metrics/FederationMetrics.java | 12 ++++++++-- .../metrics/NamenodeBeanMetrics.java | 22 ++++--------------- .../federation/router/RBFConfigKeys.java | 7 ++++++ .../src/main/resources/hdfs-rbf-default.xml | 17 ++++++++++++++ .../router/TestRouterRPCClientRetries.java | 2 +- .../federation/router/TestRouterRpc.java | 2 +- 6 files changed, 40 insertions(+), 22 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java index 23f62b619db70..6a0a46e89e83c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java @@ -47,12 +47,14 @@ import javax.management.ObjectName; import javax.management.StandardMBean; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; @@ -95,7 +97,7 @@ public class FederationMetrics implements FederationMBean { private static final String DATE_FORMAT = "yyyy/MM/dd HH:mm:ss"; /** Prevent holding the page from load too long. */ - private static final long TIME_OUT = TimeUnit.SECONDS.toMillis(1); + private final long timeOut; /** Router interface. */ @@ -143,6 +145,12 @@ public FederationMetrics(Router router) throws IOException { this.routerStore = stateStore.getRegisteredRecordStore( RouterStore.class); } + + // Initialize the cache for the DN reports + Configuration conf = router.getConfig(); + this.timeOut = conf.getTimeDuration(RBFConfigKeys.DN_REPORT_TIME_OUT, + RBFConfigKeys.DN_REPORT_TIME_OUT_MS_DEFAULT, TimeUnit.MILLISECONDS); + } /** @@ -434,7 +442,7 @@ public String getNodeUsage() { try { RouterRpcServer rpcServer = this.router.getRpcServer(); DatanodeInfo[] live = rpcServer.getDatanodeReport( - DatanodeReportType.LIVE, false, TIME_OUT); + DatanodeReportType.LIVE, false, timeOut); if (live.length > 0) { float totalDfsUsed = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java index 73765cf0df0f6..4380ae9eebb53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java @@ -74,21 +74,6 @@ public class NamenodeBeanMetrics private static final Logger LOG = LoggerFactory.getLogger(NamenodeBeanMetrics.class); - /** Prevent holding the page from loading too long. */ - private static final String DN_REPORT_TIME_OUT = - RBFConfigKeys.FEDERATION_ROUTER_PREFIX + "dn-report.time-out"; - /** We only wait for 1 second. */ - private static final long DN_REPORT_TIME_OUT_DEFAULT = - TimeUnit.SECONDS.toMillis(1); - - /** Time to cache the DN information. */ - public static final String DN_REPORT_CACHE_EXPIRE = - RBFConfigKeys.FEDERATION_ROUTER_PREFIX + "dn-report.cache-expire"; - /** We cache the DN information for 10 seconds by default. */ - public static final long DN_REPORT_CACHE_EXPIRE_DEFAULT = - TimeUnit.SECONDS.toMillis(10); - - /** Instance of the Router being monitored. */ private final Router router; @@ -148,10 +133,11 @@ public NamenodeBeanMetrics(Router router) { // Initialize the cache for the DN reports Configuration conf = router.getConfig(); this.dnReportTimeOut = conf.getTimeDuration( - DN_REPORT_TIME_OUT, DN_REPORT_TIME_OUT_DEFAULT, TimeUnit.MILLISECONDS); + RBFConfigKeys.DN_REPORT_TIME_OUT, + RBFConfigKeys.DN_REPORT_TIME_OUT_MS_DEFAULT, TimeUnit.MILLISECONDS); long dnCacheExpire = conf.getTimeDuration( - DN_REPORT_CACHE_EXPIRE, - DN_REPORT_CACHE_EXPIRE_DEFAULT, TimeUnit.MILLISECONDS); + RBFConfigKeys.DN_REPORT_CACHE_EXPIRE, + RBFConfigKeys.DN_REPORT_CACHE_EXPIRE_MS_DEFAULT, TimeUnit.MILLISECONDS); this.dnCache = CacheBuilder.newBuilder() .expireAfterWrite(dnCacheExpire, TimeUnit.MILLISECONDS) .build( diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java index fa474f44dc8e9..dd72e3695ac9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java @@ -233,6 +233,13 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic { FEDERATION_ROUTER_PREFIX + "https-bind-host"; public static final String DFS_ROUTER_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_ROUTER_HTTPS_PORT_DEFAULT; + public static final String DN_REPORT_TIME_OUT = + FEDERATION_ROUTER_PREFIX + "dn-report.time-out"; + public static final long DN_REPORT_TIME_OUT_MS_DEFAULT = 1000; + public static final String DN_REPORT_CACHE_EXPIRE = + FEDERATION_ROUTER_PREFIX + "dn-report.cache-expire"; + public static final long DN_REPORT_CACHE_EXPIRE_MS_DEFAULT = + TimeUnit.SECONDS.toMillis(10); // HDFS Router-based federation quota public static final String DFS_ROUTER_QUOTA_ENABLE = diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml index 29c3093cb644a..53bf53aab93ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml @@ -143,6 +143,23 @@ + + + dfs.federation.router.dn-report.time-out + 1000 + + Time out, in milliseconds for getDatanodeReport. + + + + + dfs.federation.router.dn-report.cache-expire + 10s + + Expiration time in seconds for datanodereport. + + + dfs.federation.router.metrics.class org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformanceMonitor diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java index e5ab3ab27707a..f84e9a03ee88b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java @@ -81,7 +81,7 @@ public void setUp() throws Exception { .rpc() .build(); routerConf.setTimeDuration( - NamenodeBeanMetrics.DN_REPORT_CACHE_EXPIRE, 1, TimeUnit.SECONDS); + RBFConfigKeys.DN_REPORT_CACHE_EXPIRE, 1, TimeUnit.SECONDS); // reduce IPC client connection retry times and interval time Configuration clientConf = new Configuration(false); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index a32cba147df14..204366e48d52f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -175,7 +175,7 @@ public static void globalSetUp() throws Exception { .build(); // We decrease the DN cache times to make the test faster routerConf.setTimeDuration( - NamenodeBeanMetrics.DN_REPORT_CACHE_EXPIRE, 1, TimeUnit.SECONDS); + RBFConfigKeys.DN_REPORT_CACHE_EXPIRE, 1, TimeUnit.SECONDS); cluster.addRouterOverrides(routerConf); cluster.startRouters(); From fa55eacd357f4c4524fb54598e989c5746921de2 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Wed, 14 Nov 2018 18:35:12 +0530 Subject: [PATCH 0269/1308] HDFS-13834. RBF: Connection creator thread should catch Throwable. Contributed by CR Hota. --- .../federation/router/ConnectionManager.java | 4 +- .../router/TestConnectionManager.java | 43 +++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index 9fb83e430bc07..fa2bf944dda6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -393,7 +393,7 @@ public void run() { /** * Thread that creates connections asynchronously. */ - private static class ConnectionCreator extends Thread { + static class ConnectionCreator extends Thread { /** If the creator is running. */ private boolean running = true; /** Queue to push work to. */ @@ -426,6 +426,8 @@ public void run() { } catch (InterruptedException e) { LOG.error("The connection creator was interrupted"); this.running = false; + } catch (Throwable e) { + LOG.error("Fatal error caught by connection creator ", e); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java index 0e1eb40783fe3..765f6c84e5611 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java @@ -22,12 +22,17 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.Rule; +import org.junit.rules.ExpectedException; import java.io.IOException; import java.util.Map; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -49,6 +54,7 @@ public class TestConnectionManager { private static final UserGroupInformation TEST_USER3 = UserGroupInformation.createUserForTesting("user3", TEST_GROUP); private static final String TEST_NN_ADDRESS = "nn1:8080"; + private static final String UNRESOLVED_TEST_NN_ADDRESS = "unknownhost:8080"; @Before public void setup() throws Exception { @@ -59,6 +65,9 @@ public void setup() throws Exception { connManager.start(); } + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + @After public void shutdown() { if (connManager != null) { @@ -121,6 +130,40 @@ public void testCleanup() throws Exception { checkPoolConnections(TEST_USER3, 4, 2); } + @Test + public void testConnectionCreatorWithException() throws Exception { + // Create a bad connection pool pointing to unresolvable namenode address. + ConnectionPool badPool = new ConnectionPool( + conf, UNRESOLVED_TEST_NN_ADDRESS, TEST_USER1, 0, 10, + ClientProtocol.class); + BlockingQueue queue = new ArrayBlockingQueue<>(1); + queue.add(badPool); + ConnectionManager.ConnectionCreator connectionCreator = + new ConnectionManager.ConnectionCreator(queue); + connectionCreator.setDaemon(true); + connectionCreator.start(); + // Wait to make sure async thread is scheduled and picks + GenericTestUtils.waitFor(()->queue.isEmpty(), 50, 5000); + // At this point connection creation task should be definitely picked up. + assertTrue(queue.isEmpty()); + // At this point connection thread should still be alive. + assertTrue(connectionCreator.isAlive()); + // Stop the thread as test is successful at this point + connectionCreator.interrupt(); + } + + @Test + public void testGetConnectionWithException() throws Exception { + String exceptionCause = "java.net.UnknownHostException: unknownhost"; + exceptionRule.expect(IllegalArgumentException.class); + exceptionRule.expectMessage(exceptionCause); + + // Create a bad connection pool pointing to unresolvable namenode address. + ConnectionPool badPool = new ConnectionPool( + conf, UNRESOLVED_TEST_NN_ADDRESS, TEST_USER1, 1, 10, + ClientProtocol.class); + } + @Test public void testGetConnection() throws Exception { Map poolMap = connManager.getPools(); From f4bd1114ff529e971f9b496ad62a7edca37fdf8d Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Wed, 21 Nov 2018 10:40:26 +0800 Subject: [PATCH 0270/1308] HDFS-14082. RBF: Add option to fail operations when a subcluster is unavailable. Contributed by Inigo Goiri. --- .../federation/router/RBFConfigKeys.java | 4 ++ .../router/RouterClientProtocol.java | 15 +++-- .../federation/router/RouterRpcServer.java | 9 +++ .../src/main/resources/hdfs-rbf-default.xml | 10 ++++ .../router/TestRouterRpcMultiDestination.java | 59 +++++++++++++++++++ 5 files changed, 93 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java index dd72e3695ac9d..10018fe2bd754 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java @@ -125,6 +125,10 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic { public static final String DFS_ROUTER_CLIENT_REJECT_OVERLOAD = FEDERATION_ROUTER_PREFIX + "client.reject.overload"; public static final boolean DFS_ROUTER_CLIENT_REJECT_OVERLOAD_DEFAULT = false; + public static final String DFS_ROUTER_ALLOW_PARTIAL_LIST = + FEDERATION_ROUTER_PREFIX + "client.allow-partial-listing"; + public static final boolean DFS_ROUTER_ALLOW_PARTIAL_LIST_DEFAULT = true; + // HDFS Router State Store connection public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS = diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 2e0713fba2e0e..c8b7cdd1aa4d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -112,6 +112,9 @@ public class RouterClientProtocol implements ClientProtocol { private final FileSubclusterResolver subclusterResolver; private final ActiveNamenodeResolver namenodeResolver; + /** If it requires response from all subclusters. */ + private final boolean allowPartialList; + /** Identifier for the super user. */ private String superUser; /** Identifier for the super group. */ @@ -125,6 +128,10 @@ public class RouterClientProtocol implements ClientProtocol { this.subclusterResolver = rpcServer.getSubclusterResolver(); this.namenodeResolver = rpcServer.getNamenodeResolver(); + this.allowPartialList = conf.getBoolean( + RBFConfigKeys.DFS_ROUTER_ALLOW_PARTIAL_LIST, + RBFConfigKeys.DFS_ROUTER_ALLOW_PARTIAL_LIST_DEFAULT); + // User and group for reporting try { this.superUser = UserGroupInformation.getCurrentUser().getShortUserName(); @@ -614,8 +621,8 @@ public DirectoryListing getListing(String src, byte[] startAfter, new Class[] {String.class, startAfter.getClass(), boolean.class}, new RemoteParam(), startAfter, needLocation); Map listings = - rpcClient.invokeConcurrent( - locations, method, false, false, DirectoryListing.class); + rpcClient.invokeConcurrent(locations, method, + !this.allowPartialList, false, DirectoryListing.class); Map nnListing = new TreeMap<>(); int totalRemainingEntries = 0; @@ -1004,8 +1011,8 @@ public ContentSummary getContentSummary(String path) throws IOException { RemoteMethod method = new RemoteMethod("getContentSummary", new Class[] {String.class}, new RemoteParam()); Map results = - rpcClient.invokeConcurrent( - locations, method, false, false, ContentSummary.class); + rpcClient.invokeConcurrent(locations, method, + !this.allowPartialList, false, ContentSummary.class); summaries.addAll(results.values()); } catch (FileNotFoundException e) { notFoundException = e; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index fcb35f41271f9..ad5980b8d36b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -1483,6 +1483,15 @@ public Quota getQuotaModule() { return this.quotaCall; } + /** + * Get ClientProtocol module implementation. + * @return ClientProtocol implementation + */ + @VisibleForTesting + public RouterClientProtocol getClientProtocolModule() { + return this.clientProto; + } + /** * Get RPC metrics info. * @return The instance of FederationRPCMetrics. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml index 53bf53aab93ff..09050bbff6b92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml @@ -482,6 +482,16 @@ + + dfs.federation.router.client.allow-partial-listing + true + + If the Router can return a partial list of files in a multi-destination mount point when one of the subclusters is unavailable. + True may return a partial list of files if a subcluster is down. + False will fail the request if one is unavailable. + + + dfs.federation.router.keytab.file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 94b712f534e05..31017480b4811 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -20,6 +20,13 @@ import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.internal.util.reflection.Whitebox.getInternalState; +import static org.mockito.internal.util.reflection.Whitebox.setInternalState; import java.io.IOException; import java.lang.reflect.Method; @@ -44,6 +51,13 @@ import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Test; /** * The the RPC interface of the {@link getRouter()} implemented by @@ -214,4 +228,49 @@ public void testProxyRenameFiles() throws IOException, InterruptedException { testRename(getRouterContext(), filename1, renamedFile, false); testRename2(getRouterContext(), filename1, renamedFile, false); } + + @Test + public void testSubclusterDown() throws Exception { + final int totalFiles = 6; + + List routers = getCluster().getRouters(); + + // Test the behavior when everything is fine + FileSystem fs = getRouterFileSystem(); + FileStatus[] files = fs.listStatus(new Path("/")); + assertEquals(totalFiles, files.length); + + // Simulate one of the subclusters is in standby + NameNode nn0 = getCluster().getNamenode("ns0", null).getNamenode(); + FSNamesystem ns0 = nn0.getNamesystem(); + HAContext nn0haCtx = (HAContext)getInternalState(ns0, "haContext"); + HAContext mockCtx = mock(HAContext.class); + doThrow(new StandbyException("Mock")).when(mockCtx).checkOperation(any()); + setInternalState(ns0, "haContext", mockCtx); + + // router0 should throw an exception + RouterContext router0 = routers.get(0); + RouterRpcServer router0RPCServer = router0.getRouter().getRpcServer(); + RouterClientProtocol router0ClientProtocol = + router0RPCServer.getClientProtocolModule(); + setInternalState(router0ClientProtocol, "allowPartialList", false); + try { + router0.getFileSystem().listStatus(new Path("/")); + fail("I should throw an exception"); + } catch (RemoteException re) { + GenericTestUtils.assertExceptionContains( + "No namenode available to invoke getListing", re); + } + + // router1 should report partial results + RouterContext router1 = routers.get(1); + files = router1.getFileSystem().listStatus(new Path("/")); + assertTrue("Found " + files.length + " items, we should have less", + files.length < totalFiles); + + + // Restore the HA context and the Router + setInternalState(ns0, "haContext", nn0haCtx); + setInternalState(router0ClientProtocol, "allowPartialList", true); + } } \ No newline at end of file From f2355c706361594b7b2ef8b65b37060eab1d66df Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Thu, 22 Nov 2018 00:34:08 +0530 Subject: [PATCH 0271/1308] HDFS-13776. RBF: Add Storage policies related ClientProtocol APIs. Contributed by Dibyendu Karmakar. --- .../router/RouterClientProtocol.java | 24 +-- .../router/RouterStoragePolicy.java | 98 ++++++++++++ .../federation/MiniRouterDFSCluster.java | 13 ++ .../federation/router/TestRouterRpc.java | 57 +++++++ .../TestRouterRpcStoragePolicySatisfier.java | 149 ++++++++++++++++++ 5 files changed, 325 insertions(+), 16 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcStoragePolicySatisfier.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index c8b7cdd1aa4d8..303eedf106b66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -121,6 +121,8 @@ public class RouterClientProtocol implements ClientProtocol { private final String superGroup; /** Erasure coding calls. */ private final ErasureCoding erasureCoding; + /** StoragePolicy calls. **/ + private final RouterStoragePolicy storagePolicy; RouterClientProtocol(Configuration conf, RouterRpcServer rpcServer) { this.rpcServer = rpcServer; @@ -144,6 +146,7 @@ public class RouterClientProtocol implements ClientProtocol { DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY, DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT); this.erasureCoding = new ErasureCoding(rpcServer); + this.storagePolicy = new RouterStoragePolicy(rpcServer); } @Override @@ -278,22 +281,12 @@ public boolean setReplication(String src, short replication) @Override public void setStoragePolicy(String src, String policyName) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE); - - List locations = rpcServer.getLocationsForPath(src, true); - RemoteMethod method = new RemoteMethod("setStoragePolicy", - new Class[] {String.class, String.class}, - new RemoteParam(), policyName); - rpcClient.invokeSequential(locations, method, null, null); + storagePolicy.setStoragePolicy(src, policyName); } @Override public BlockStoragePolicy[] getStoragePolicies() throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.READ); - - RemoteMethod method = new RemoteMethod("getStoragePolicies"); - String ns = subclusterResolver.getDefaultNamespace(); - return (BlockStoragePolicy[]) rpcClient.invokeSingle(ns, method); + return storagePolicy.getStoragePolicies(); } @Override @@ -1463,13 +1456,12 @@ public void reportBadBlocks(LocatedBlock[] blocks) throws IOException { @Override public void unsetStoragePolicy(String src) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); + storagePolicy.unsetStoragePolicy(src); } @Override public BlockStoragePolicy getStoragePolicy(String path) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.READ, false); - return null; + return storagePolicy.getStoragePolicy(path); } @Override @@ -1557,7 +1549,7 @@ public void msync() throws IOException { @Override public void satisfyStoragePolicy(String path) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); + storagePolicy.satisfyStoragePolicy(path); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java new file mode 100644 index 0000000000000..7145940cca4d4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.namenode.NameNode; + +import java.io.IOException; +import java.util.List; + +/** + * Module that implements all the RPC calls in + * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol} related to + * Storage Policy in the {@link RouterRpcServer}. + */ +public class RouterStoragePolicy { + + /** RPC server to receive client calls. */ + private final RouterRpcServer rpcServer; + /** RPC clients to connect to the Namenodes. */ + private final RouterRpcClient rpcClient; + /** Interface to map global name space to HDFS subcluster name spaces. */ + private final FileSubclusterResolver subclusterResolver; + + public RouterStoragePolicy(RouterRpcServer server) { + this.rpcServer = server; + this.rpcClient = this.rpcServer.getRPCClient(); + this.subclusterResolver = this.rpcServer.getSubclusterResolver(); + } + + public void setStoragePolicy(String src, String policyName) + throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.WRITE); + + List locations = rpcServer.getLocationsForPath(src, true); + RemoteMethod method = new RemoteMethod("setStoragePolicy", + new Class[] {String.class, String.class}, + new RemoteParam(), + policyName); + rpcClient.invokeSequential(locations, method, null, null); + } + + public BlockStoragePolicy[] getStoragePolicies() throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.READ); + + RemoteMethod method = new RemoteMethod("getStoragePolicies"); + String ns = subclusterResolver.getDefaultNamespace(); + return (BlockStoragePolicy[]) rpcClient.invokeSingle(ns, method); + } + + public void unsetStoragePolicy(String src) throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true); + + List locations = rpcServer.getLocationsForPath(src, true); + RemoteMethod method = new RemoteMethod("unsetStoragePolicy", + new Class[] {String.class}, + new RemoteParam()); + rpcClient.invokeSequential(locations, method); + } + + public BlockStoragePolicy getStoragePolicy(String path) + throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.READ, true); + + List locations = rpcServer.getLocationsForPath(path, false); + RemoteMethod method = new RemoteMethod("getStoragePolicy", + new Class[] {String.class}, + new RemoteParam()); + return (BlockStoragePolicy) rpcClient.invokeSequential(locations, method); + } + + public void satisfyStoragePolicy(String path) throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.READ, true); + + List locations = rpcServer.getLocationsForPath(path, true); + RemoteMethod method = new RemoteMethod("satisfyStoragePolicy", + new Class[] {String.class}, + new RemoteParam()); + rpcClient.invokeSequential(locations, method); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java index a5693a6f2c367..2df883cff6594 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java @@ -67,6 +67,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSClient; @@ -118,6 +119,8 @@ public class MiniRouterDFSCluster { private boolean highAvailability; /** Number of datanodes per nameservice. */ private int numDatanodesPerNameservice = 2; + /** Custom storage type for each datanode. */ + private StorageType[][] storageTypes = null; /** Mini cluster. */ private MiniDFSCluster cluster; @@ -614,6 +617,15 @@ public void setNumDatanodesPerNameservice(int num) { this.numDatanodesPerNameservice = num; } + /** + * Set custom storage type configuration for each datanode. + * If storageTypes is uninitialized or passed null then + * StorageType.DEFAULT is used. + */ + public void setStorageTypes(StorageType[][] storageTypes) { + this.storageTypes = storageTypes; + } + /** * Set the DNs to belong to only one subcluster. */ @@ -767,6 +779,7 @@ public void startCluster(Configuration overrideConf) { .numDataNodes(numDNs) .nnTopology(topology) .dataNodeConfOverlays(dnConfs) + .storageTypes(storageTypes) .build(); cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index 204366e48d52f..8632203b06d01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -769,6 +770,62 @@ public void testProxyStoragePolicy() throws Exception { m, new Object[] {badPath, "badpolicy"}); } + @Test + public void testProxyGetAndUnsetStoragePolicy() throws Exception { + String file = "/testGetStoragePolicy"; + String nnFilePath = cluster.getNamenodeTestDirectoryForNS(ns) + file; + String routerFilePath = cluster.getFederatedTestDirectoryForNS(ns) + file; + + createFile(routerFS, routerFilePath, 32); + + // Get storage policy via router + BlockStoragePolicy policy = routerProtocol.getStoragePolicy(routerFilePath); + // Verify default policy is HOT + assertEquals(HdfsConstants.HOT_STORAGE_POLICY_NAME, policy.getName()); + assertEquals(HdfsConstants.HOT_STORAGE_POLICY_ID, policy.getId()); + + // Get storage policies via router + BlockStoragePolicy[] policies = routerProtocol.getStoragePolicies(); + BlockStoragePolicy[] nnPolicies = namenode.getClient().getStoragePolicies(); + // Verify policie returned by router is same as policies returned by NN + assertArrayEquals(nnPolicies, policies); + + BlockStoragePolicy newPolicy = policies[0]; + while (newPolicy.isCopyOnCreateFile()) { + // Pick a non copy on create policy. Beacuse if copyOnCreateFile is set + // then the policy cannot be changed after file creation. + Random rand = new Random(); + int randIndex = rand.nextInt(policies.length); + newPolicy = policies[randIndex]; + } + routerProtocol.setStoragePolicy(routerFilePath, newPolicy.getName()); + + // Get storage policy via router + policy = routerProtocol.getStoragePolicy(routerFilePath); + // Verify default policy + assertEquals(newPolicy.getName(), policy.getName()); + assertEquals(newPolicy.getId(), policy.getId()); + + // Verify policy via NN + BlockStoragePolicy nnPolicy = + namenode.getClient().getStoragePolicy(nnFilePath); + assertEquals(nnPolicy.getName(), policy.getName()); + assertEquals(nnPolicy.getId(), policy.getId()); + + // Unset storage policy via router + routerProtocol.unsetStoragePolicy(routerFilePath); + + // Get storage policy + policy = routerProtocol.getStoragePolicy(routerFilePath); + assertEquals(HdfsConstants.HOT_STORAGE_POLICY_NAME, policy.getName()); + assertEquals(HdfsConstants.HOT_STORAGE_POLICY_ID, policy.getId()); + + // Verify policy via NN + nnPolicy = namenode.getClient().getStoragePolicy(nnFilePath); + assertEquals(nnPolicy.getName(), policy.getName()); + assertEquals(nnPolicy.getId(), policy.getId()); + } + @Test public void testProxyGetPreferedBlockSize() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcStoragePolicySatisfier.java new file mode 100644 index 0000000000000..fa1079a4edeb4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcStoragePolicySatisfier.java @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.metrics.NamenodeBeanMetrics; +import org.apache.hadoop.hdfs.server.namenode.sps.Context; +import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier; +import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertEquals; + +/** + * Test StoragePolicySatisfy through router rpc calls. + */ +public class TestRouterRpcStoragePolicySatisfier { + + /** Federated HDFS cluster. */ + private static MiniRouterDFSCluster cluster; + + /** Client interface to the Router. */ + private static ClientProtocol routerProtocol; + + /** Filesystem interface to the Router. */ + private static FileSystem routerFS; + /** Filesystem interface to the Namenode. */ + private static FileSystem nnFS; + + @BeforeClass + public static void globalSetUp() throws Exception { + cluster = new MiniRouterDFSCluster(false, 1); + // Set storage types for the cluster + StorageType[][] newtypes = new StorageType[][] { + {StorageType.ARCHIVE, StorageType.DISK}}; + cluster.setStorageTypes(newtypes); + + Configuration conf = cluster.getNamenodes().get(0).getConf(); + conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, + HdfsConstants.StoragePolicySatisfierMode.EXTERNAL.toString()); + // Reduced refresh cycle to update latest datanodes. + conf.setLong(DFSConfigKeys.DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS, + 1000); + cluster.addNamenodeOverrides(conf); + + cluster.setNumDatanodesPerNameservice(1); + + // Start NNs and DNs and wait until ready + cluster.startCluster(); + + // Start routers with only an RPC service + Configuration routerConf = new RouterConfigBuilder() + .metrics() + .rpc() + .build(); + // We decrease the DN cache times to make the test faster + routerConf.setTimeDuration( + RBFConfigKeys.DN_REPORT_CACHE_EXPIRE, 1, TimeUnit.SECONDS); + cluster.addRouterOverrides(routerConf); + cluster.startRouters(); + + // Register and verify all NNs with all routers + cluster.registerNamenodes(); + cluster.waitNamenodeRegistration(); + + // Create mock locations + cluster.installMockLocations(); + + // Random router for this test + MiniRouterDFSCluster.RouterContext rndRouter = cluster.getRandomRouter(); + + routerProtocol = rndRouter.getClient().getNamenode(); + routerFS = rndRouter.getFileSystem(); + nnFS = cluster.getNamenodes().get(0).getFileSystem(); + + NameNodeConnector nnc = DFSTestUtil.getNameNodeConnector(conf, + HdfsServerConstants.MOVER_ID_PATH, 1, false); + + StoragePolicySatisfier externalSps = new StoragePolicySatisfier(conf); + Context externalCtxt = new ExternalSPSContext(externalSps, nnc); + + externalSps.init(externalCtxt); + externalSps.start(HdfsConstants.StoragePolicySatisfierMode.EXTERNAL); + } + + @AfterClass + public static void tearDown() { + cluster.shutdown(); + } + + @Test + public void testStoragePolicySatisfier() throws Exception { + final String file = "/testStoragePolicySatisfierCommand"; + short repl = 1; + int size = 32; + DFSTestUtil.createFile(routerFS, new Path(file), size, repl, 0); + // Varify storage type is DISK + DFSTestUtil.waitExpectedStorageType(file, StorageType.DISK, 1, 20000, + (DistributedFileSystem) routerFS); + // Set storage policy as COLD + routerProtocol + .setStoragePolicy(file, HdfsConstants.COLD_STORAGE_POLICY_NAME); + // Verify storage policy is set properly + BlockStoragePolicy storagePolicy = routerProtocol.getStoragePolicy(file); + assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME, + storagePolicy.getName()); + // Invoke satisfy storage policy + routerProtocol.satisfyStoragePolicy(file); + // Verify storage type is ARCHIVE + DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 20000, + (DistributedFileSystem) routerFS); + + // Verify storage type via NN + DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 20000, + (DistributedFileSystem) nnFS); + } +} From 19088e1b496edd55cc2110ecee973d2be8068111 Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Thu, 22 Nov 2018 08:26:22 +0530 Subject: [PATCH 0272/1308] HDFS-14089. RBF: Failed to specify server's Kerberos pricipal name in NamenodeHeartbeatService. Contributed by Ranith Sardar. --- .../server/federation/router/NamenodeHeartbeatService.java | 3 ++- .../apache/hadoop/fs/contract/router/SecurityConfUtil.java | 6 ------ 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java index 1349aa3056c2c..871ebaf9eef50 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.tools.DFSHAAdmin; import org.apache.hadoop.hdfs.tools.NNHAServiceTarget; import org.codehaus.jettison.json.JSONArray; import org.codehaus.jettison.json.JSONObject; @@ -108,7 +109,7 @@ public NamenodeHeartbeatService( @Override protected void serviceInit(Configuration configuration) throws Exception { - this.conf = configuration; + this.conf = DFSHAAdmin.addSecurityConfiguration(configuration); String nnDesc = nameserviceId; if (this.namenodeId != null && !this.namenodeId.isEmpty()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java index deb6ace16b16a..100313e151038 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java @@ -14,8 +14,6 @@ package org.apache.hadoop.fs.contract.router; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SERVICE_USER_NAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; @@ -109,10 +107,6 @@ public static Configuration initSecurity() throws Exception { spnegoPrincipal = SPNEGO_USER_NAME + "/" + krbInstance + "@" + kdc.getRealm(); - // Set auth configuration for mini DFS - conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - conf.set(HADOOP_SECURITY_SERVICE_USER_NAME_KEY, routerPrincipal); - // Setup principles and keytabs for dfs conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, routerPrincipal); conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); From b320caecb32e0eb739ad925a4646bef1a85caebd Mon Sep 17 00:00:00 2001 From: Surendra Singh Lilhore Date: Tue, 4 Dec 2018 12:23:56 +0530 Subject: [PATCH 0273/1308] HDFS-14085. RBF: LS command for root shows wrong owner and permission information. Contributed by Ayush Saxena. --- .../federation/router/FederationUtil.java | 23 +- .../router/RouterClientProtocol.java | 29 +- .../router/TestRouterMountTable.java | 307 +++++++++++++----- 3 files changed, 278 insertions(+), 81 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java index f8c7a9bfbf116..f0d9168a36162 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java @@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; @@ -205,4 +206,24 @@ public static boolean isParentEntry(final String path, final String parent) { return path.charAt(parent.length()) == Path.SEPARATOR_CHAR || parent.equals(Path.SEPARATOR); } -} + + /** + * Add the the number of children for an existing HdfsFileStatus object. + * @param dirStatus HdfsfileStatus object. + * @param children number of children to be added. + * @return HdfsFileStatus with the number of children specified. + */ + public static HdfsFileStatus updateMountPointStatus(HdfsFileStatus dirStatus, + int children) { + return new HdfsFileStatus.Builder().atime(dirStatus.getAccessTime()) + .blocksize(dirStatus.getBlockSize()).children(children) + .ecPolicy(dirStatus.getErasureCodingPolicy()) + .feInfo(dirStatus.getFileEncryptionInfo()).fileId(dirStatus.getFileId()) + .group(dirStatus.getGroup()).isdir(dirStatus.isDir()) + .length(dirStatus.getLen()).mtime(dirStatus.getModificationTime()) + .owner(dirStatus.getOwner()).path(dirStatus.getLocalNameInBytes()) + .perm(dirStatus.getPermission()).replication(dirStatus.getReplication()) + .storagePolicy(dirStatus.getStoragePolicy()) + .symlink(dirStatus.getSymlinkInBytes()).build(); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 303eedf106b66..c52b7655657bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.updateMountPointStatus; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.BatchedRemoteIterator; @@ -675,7 +676,6 @@ public DirectoryListing getListing(String src, byte[] startAfter, if (dates != null && dates.containsKey(child)) { date = dates.get(child); } - // TODO add number of children HdfsFileStatus dirStatus = getMountPointStatus(child, 0, date); // This may overwrite existing listing entries with the mount point @@ -1669,12 +1669,13 @@ private HdfsFileStatus getFileInfoAll(final List locations, // Get the file info from everybody Map results = rpcClient.invokeConcurrent(locations, method, HdfsFileStatus.class); - + int children=0; // We return the first file HdfsFileStatus dirStatus = null; for (RemoteLocation loc : locations) { HdfsFileStatus fileStatus = results.get(loc); if (fileStatus != null) { + children += fileStatus.getChildrenNum(); if (!fileStatus.isDirectory()) { return fileStatus; } else if (dirStatus == null) { @@ -1682,7 +1683,10 @@ private HdfsFileStatus getFileInfoAll(final List locations, } } } - return dirStatus; + if (dirStatus != null) { + return updateMountPointStatus(dirStatus, children); + } + return null; } /** @@ -1738,12 +1742,23 @@ private HdfsFileStatus getMountPointStatus( String group = this.superGroup; if (subclusterResolver instanceof MountTableResolver) { try { + String mName = name.startsWith("/") ? name : "/" + name; MountTableResolver mountTable = (MountTableResolver) subclusterResolver; - MountTable entry = mountTable.getMountPoint(name); + MountTable entry = mountTable.getMountPoint(mName); if (entry != null) { - permission = entry.getMode(); - owner = entry.getOwnerName(); - group = entry.getGroupName(); + HdfsFileStatus fInfo = getFileInfoAll(entry.getDestinations(), + new RemoteMethod("getFileInfo", new Class[] {String.class}, + new RemoteParam())); + if (fInfo != null) { + permission = fInfo.getPermission(); + owner = fInfo.getOwner(); + group = fInfo.getGroup(); + childrenNum = fInfo.getChildrenNum(); + } else { + permission = entry.getMode(); + owner = entry.getOwnerName(); + group = entry.getGroupName(); + } } } catch (IOException e) { LOG.error("Cannot get mount point: {}", e.getMessage()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java index d2b78d34edfa8..9538d7117060e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -60,18 +61,21 @@ public class TestRouterMountTable { private static StateStoreDFSCluster cluster; - private static NamenodeContext nnContext; + private static NamenodeContext nnContext0; + private static NamenodeContext nnContext1; private static RouterContext routerContext; private static MountTableResolver mountTable; private static ClientProtocol routerProtocol; private static long startTime; + private static FileSystem nnFs0; + private static FileSystem nnFs1; @BeforeClass public static void globalSetUp() throws Exception { startTime = Time.now(); // Build and start a federated cluster - cluster = new StateStoreDFSCluster(false, 1); + cluster = new StateStoreDFSCluster(false, 2); Configuration conf = new RouterConfigBuilder() .stateStore() .admin() @@ -83,7 +87,10 @@ public static void globalSetUp() throws Exception { cluster.waitClusterUp(); // Get the end points - nnContext = cluster.getRandomNamenode(); + nnContext0 = cluster.getNamenode("ns0", null); + nnContext1 = cluster.getNamenode("ns1", null); + nnFs0 = nnContext0.getFileSystem(); + nnFs1 = nnContext1.getFileSystem(); routerContext = cluster.getRandomRouter(); Router router = routerContext.getRouter(); routerProtocol = routerContext.getClient().getNamenode(); @@ -129,12 +136,11 @@ public void testReadOnly() throws Exception { assertTrue(addMountTable(regularEntry)); // Create a folder which should show in all locations - final FileSystem nnFs = nnContext.getFileSystem(); final FileSystem routerFs = routerContext.getFileSystem(); assertTrue(routerFs.mkdirs(new Path("/regular/newdir"))); FileStatus dirStatusNn = - nnFs.getFileStatus(new Path("/testdir/newdir")); + nnFs0.getFileStatus(new Path("/testdir/newdir")); assertTrue(dirStatusNn.isDirectory()); FileStatus dirStatusRegular = routerFs.getFileStatus(new Path("/regular/newdir")); @@ -179,93 +185,248 @@ private boolean addMountTable(final MountTable entry) throws IOException { */ @Test public void testListFilesTime() throws Exception { - // Add mount table entry - MountTable addEntry = MountTable.newInstance( - "/testdir", Collections.singletonMap("ns0", "/testdir")); - assertTrue(addMountTable(addEntry)); - addEntry = MountTable.newInstance( - "/testdir2", Collections.singletonMap("ns0", "/testdir2")); - assertTrue(addMountTable(addEntry)); - addEntry = MountTable.newInstance( - "/testdir/subdir", Collections.singletonMap("ns0", "/testdir/subdir")); - assertTrue(addMountTable(addEntry)); - addEntry = MountTable.newInstance( - "/testdir3/subdir1", Collections.singletonMap("ns0", "/testdir3")); - assertTrue(addMountTable(addEntry)); - addEntry = MountTable.newInstance( - "/testA/testB/testC/testD", Collections.singletonMap("ns0", "/test")); - assertTrue(addMountTable(addEntry)); + try { + // Add mount table entry + MountTable addEntry = MountTable.newInstance("/testdir", + Collections.singletonMap("ns0", "/testdir")); + assertTrue(addMountTable(addEntry)); + addEntry = MountTable.newInstance("/testdir2", + Collections.singletonMap("ns0", "/testdir2")); + assertTrue(addMountTable(addEntry)); + addEntry = MountTable.newInstance("/testdir/subdir", + Collections.singletonMap("ns0", "/testdir/subdir")); + assertTrue(addMountTable(addEntry)); + addEntry = MountTable.newInstance("/testdir3/subdir1", + Collections.singletonMap("ns0", "/testdir3")); + assertTrue(addMountTable(addEntry)); + addEntry = MountTable.newInstance("/testA/testB/testC/testD", + Collections.singletonMap("ns0", "/test")); + assertTrue(addMountTable(addEntry)); - // Create test dir in NN - final FileSystem nnFs = nnContext.getFileSystem(); - assertTrue(nnFs.mkdirs(new Path("/newdir"))); + // Create test dir in NN + assertTrue(nnFs0.mkdirs(new Path("/newdir"))); - Map pathModTime = new TreeMap<>(); - for (String mount : mountTable.getMountPoints("/")) { - if (mountTable.getMountPoint("/"+mount) != null) { - pathModTime.put(mount, mountTable.getMountPoint("/"+mount) - .getDateModified()); - } else { - List entries = mountTable.getMounts("/"+mount); - for (MountTable entry : entries) { - if (pathModTime.get(mount) == null || - pathModTime.get(mount) < entry.getDateModified()) { - pathModTime.put(mount, entry.getDateModified()); + Map pathModTime = new TreeMap<>(); + for (String mount : mountTable.getMountPoints("/")) { + if (mountTable.getMountPoint("/" + mount) != null) { + pathModTime.put(mount, + mountTable.getMountPoint("/" + mount).getDateModified()); + } else { + List entries = mountTable.getMounts("/" + mount); + for (MountTable entry : entries) { + if (pathModTime.get(mount) == null + || pathModTime.get(mount) < entry.getDateModified()) { + pathModTime.put(mount, entry.getDateModified()); + } } } } + FileStatus[] iterator = nnFs0.listStatus(new Path("/")); + for (FileStatus file : iterator) { + pathModTime.put(file.getPath().getName(), file.getModificationTime()); + } + // Fetch listing + DirectoryListing listing = + routerProtocol.getListing("/", HdfsFileStatus.EMPTY_NAME, false); + Iterator pathModTimeIterator = pathModTime.keySet().iterator(); + + // Match date/time for each path returned + for (HdfsFileStatus f : listing.getPartialListing()) { + String fileName = pathModTimeIterator.next(); + String currentFile = f.getFullPath(new Path("/")).getName(); + Long currentTime = f.getModificationTime(); + Long expectedTime = pathModTime.get(currentFile); + + assertEquals(currentFile, fileName); + assertTrue(currentTime > startTime); + assertEquals(currentTime, expectedTime); + } + // Verify the total number of results found/matched + assertEquals(pathModTime.size(), listing.getPartialListing().length); + } finally { + nnFs0.delete(new Path("/newdir"), true); } - FileStatus[] iterator = nnFs.listStatus(new Path("/")); - for (FileStatus file : iterator) { - pathModTime.put(file.getPath().getName(), file.getModificationTime()); + } + + /** + * Verify permission for a mount point when the actual destination is not + * present. It returns the permissions of the mount point. + */ + @Test + public void testMountTablePermissionsNoDest() throws IOException { + MountTable addEntry; + addEntry = MountTable.newInstance("/testdir1", + Collections.singletonMap("ns0", "/tmp/testdir1")); + addEntry.setGroupName("group1"); + addEntry.setOwnerName("owner1"); + addEntry.setMode(FsPermission.createImmutable((short) 0775)); + assertTrue(addMountTable(addEntry)); + FileStatus[] list = routerContext.getFileSystem().listStatus(new Path("/")); + assertEquals("group1", list[0].getGroup()); + assertEquals("owner1", list[0].getOwner()); + assertEquals((short) 0775, list[0].getPermission().toShort()); + } + + /** + * Verify permission for a mount point when the actual destination present. It + * returns the permissions of the actual destination pointed by the mount + * point. + */ + @Test + public void testMountTablePermissionsWithDest() throws IOException { + try { + MountTable addEntry = MountTable.newInstance("/testdir", + Collections.singletonMap("ns0", "/tmp/testdir")); + assertTrue(addMountTable(addEntry)); + nnFs0.mkdirs(new Path("/tmp/testdir")); + nnFs0.setOwner(new Path("/tmp/testdir"), "Aowner", "Agroup"); + nnFs0.setPermission(new Path("/tmp/testdir"), + FsPermission.createImmutable((short) 775)); + FileStatus[] list = + routerContext.getFileSystem().listStatus(new Path("/")); + assertEquals("Agroup", list[0].getGroup()); + assertEquals("Aowner", list[0].getOwner()); + assertEquals((short) 775, list[0].getPermission().toShort()); + } finally { + nnFs0.delete(new Path("/tmp"), true); } - // Fetch listing - DirectoryListing listing = - routerProtocol.getListing("/", HdfsFileStatus.EMPTY_NAME, false); - Iterator pathModTimeIterator = pathModTime.keySet().iterator(); + } - // Match date/time for each path returned - for(HdfsFileStatus f : listing.getPartialListing()) { - String fileName = pathModTimeIterator.next(); - String currentFile = f.getFullPath(new Path("/")).getName(); - Long currentTime = f.getModificationTime(); - Long expectedTime = pathModTime.get(currentFile); + /** + * Verify permission for a mount point when the multiple destinations are + * present with both having same permissions. It returns the same actual + * permissions of the actual destinations pointed by the mount point. + */ + @Test + public void testMountTablePermissionsMultiDest() throws IOException { + try { + Map destMap = new HashMap<>(); + destMap.put("ns0", "/tmp/testdir"); + destMap.put("ns1", "/tmp/testdir01"); + MountTable addEntry = MountTable.newInstance("/testdir", destMap); + assertTrue(addMountTable(addEntry)); + nnFs0.mkdirs(new Path("/tmp/testdir")); + nnFs0.setOwner(new Path("/tmp/testdir"), "Aowner", "Agroup"); + nnFs0.setPermission(new Path("/tmp/testdir"), + FsPermission.createImmutable((short) 775)); + nnFs1.mkdirs(new Path("/tmp/testdir01")); + nnFs1.setOwner(new Path("/tmp/testdir01"), "Aowner", "Agroup"); + nnFs1.setPermission(new Path("/tmp/testdir01"), + FsPermission.createImmutable((short) 775)); + FileStatus[] list = + routerContext.getFileSystem().listStatus(new Path("/")); + assertEquals("Agroup", list[0].getGroup()); + assertEquals("Aowner", list[0].getOwner()); + assertEquals((short) 775, list[0].getPermission().toShort()); + } finally { + nnFs0.delete(new Path("/tmp"), true); + nnFs1.delete(new Path("/tmp"), true); + } + } - assertEquals(currentFile, fileName); - assertTrue(currentTime > startTime); - assertEquals(currentTime, expectedTime); + /** + * Verify permission for a mount point when the multiple destinations are + * present with both having different permissions. It returns the actual + * permissions of either of the actual destinations pointed by the mount + * point. + */ + @Test + public void testMountTablePermissionsMultiDestDifferentPerm() + throws IOException { + try { + Map destMap = new HashMap<>(); + destMap.put("ns0", "/tmp/testdir"); + destMap.put("ns1", "/tmp/testdir01"); + MountTable addEntry = MountTable.newInstance("/testdir", destMap); + assertTrue(addMountTable(addEntry)); + nnFs0.mkdirs(new Path("/tmp/testdir")); + nnFs0.setOwner(new Path("/tmp/testdir"), "Aowner", "Agroup"); + nnFs0.setPermission(new Path("/tmp/testdir"), + FsPermission.createImmutable((short) 775)); + nnFs1.mkdirs(new Path("/tmp/testdir01")); + nnFs1.setOwner(new Path("/tmp/testdir01"), "Aowner01", "Agroup01"); + nnFs1.setPermission(new Path("/tmp/testdir01"), + FsPermission.createImmutable((short) 755)); + FileStatus[] list = + routerContext.getFileSystem().listStatus(new Path("/")); + assertTrue("Agroup".equals(list[0].getGroup()) + || "Agroup01".equals(list[0].getGroup())); + assertTrue("Aowner".equals(list[0].getOwner()) + || "Aowner01".equals(list[0].getOwner())); + assertTrue(((short) 775) == list[0].getPermission().toShort() + || ((short) 755) == list[0].getPermission().toShort()); + } finally { + nnFs0.delete(new Path("/tmp"), true); + nnFs1.delete(new Path("/tmp"), true); } - // Verify the total number of results found/matched - assertEquals(pathModTime.size(), listing.getPartialListing().length); } /** - * Verify that the file listing contains correct permission. + * Validate whether mount point name gets resolved or not. On successful + * resolution the details returned would be the ones actually set on the mount + * point. */ @Test - public void testMountTablePermissions() throws Exception { - // Add mount table entries - MountTable addEntry = MountTable.newInstance( - "/testdir1", Collections.singletonMap("ns0", "/testdir1")); + public void testMountPointResolved() throws IOException { + MountTable addEntry = MountTable.newInstance("/testdir", + Collections.singletonMap("ns0", "/tmp/testdir")); addEntry.setGroupName("group1"); addEntry.setOwnerName("owner1"); - addEntry.setMode(FsPermission.createImmutable((short)0775)); - assertTrue(addMountTable(addEntry)); - addEntry = MountTable.newInstance( - "/testdir2", Collections.singletonMap("ns0", "/testdir2")); - addEntry.setGroupName("group2"); - addEntry.setOwnerName("owner2"); - addEntry.setMode(FsPermission.createImmutable((short)0755)); assertTrue(addMountTable(addEntry)); + HdfsFileStatus finfo = routerProtocol.getFileInfo("/testdir"); + FileStatus[] finfo1 = + routerContext.getFileSystem().listStatus(new Path("/")); + assertEquals("owner1", finfo.getOwner()); + assertEquals("owner1", finfo1[0].getOwner()); + assertEquals("group1", finfo.getGroup()); + assertEquals("group1", finfo1[0].getGroup()); + } - HdfsFileStatus fs = routerProtocol.getFileInfo("/testdir1"); - assertEquals("group1", fs.getGroup()); - assertEquals("owner1", fs.getOwner()); - assertEquals((short) 0775, fs.getPermission().toShort()); + /** + * Validate the number of children for the mount point.It must be equal to the + * number of children of the destination pointed by the mount point. + */ + @Test + public void testMountPointChildren() throws IOException { + try { + MountTable addEntry = MountTable.newInstance("/testdir", + Collections.singletonMap("ns0", "/tmp/testdir")); + assertTrue(addMountTable(addEntry)); + nnFs0.mkdirs(new Path("/tmp/testdir")); + nnFs0.mkdirs(new Path("/tmp/testdir/1")); + nnFs0.mkdirs(new Path("/tmp/testdir/2")); + FileStatus[] finfo1 = + routerContext.getFileSystem().listStatus(new Path("/")); + assertEquals(2, ((HdfsFileStatus) finfo1[0]).getChildrenNum()); + } finally { + nnFs0.delete(new Path("/tmp"), true); + } + } - fs = routerProtocol.getFileInfo("/testdir2"); - assertEquals("group2", fs.getGroup()); - assertEquals("owner2", fs.getOwner()); - assertEquals((short) 0755, fs.getPermission().toShort()); + /** + * Validate the number of children for the mount point pointing to multiple + * destinations.It must be equal to the sum of number of children of the + * destinations pointed by the mount point. + */ + @Test + public void testMountPointChildrenMultiDest() throws IOException { + try { + Map destMap = new HashMap<>(); + destMap.put("ns0", "/tmp/testdir"); + destMap.put("ns1", "/tmp/testdir01"); + MountTable addEntry = MountTable.newInstance("/testdir", destMap); + assertTrue(addMountTable(addEntry)); + nnFs0.mkdirs(new Path("/tmp/testdir")); + nnFs0.mkdirs(new Path("/tmp/testdir")); + nnFs1.mkdirs(new Path("/tmp/testdir01")); + nnFs0.mkdirs(new Path("/tmp/testdir/1")); + nnFs1.mkdirs(new Path("/tmp/testdir01/1")); + FileStatus[] finfo1 = + routerContext.getFileSystem().listStatus(new Path("/")); + assertEquals(2, ((HdfsFileStatus) finfo1[0]).getChildrenNum()); + } finally { + nnFs0.delete(new Path("/tmp"), true); + nnFs0.delete(new Path("/tmp"), true); + } } } \ No newline at end of file From 6aa7aabff801905f55a18d117f927d0125057d78 Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Tue, 4 Dec 2018 19:58:38 +0800 Subject: [PATCH 0274/1308] HDFS-14114. RBF: MIN_ACTIVE_RATIO should be configurable. Contributed by Fei Hui. --- .../federation/router/ConnectionManager.java | 20 +++++--- .../federation/router/ConnectionPool.java | 14 ++++- .../federation/router/RBFConfigKeys.java | 5 ++ .../src/main/resources/hdfs-rbf-default.xml | 8 +++ .../router/TestConnectionManager.java | 51 ++++++++++++++++--- 5 files changed, 83 insertions(+), 15 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index fa2bf944dda6d..74bbbb572fd27 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -49,10 +49,6 @@ public class ConnectionManager { private static final Logger LOG = LoggerFactory.getLogger(ConnectionManager.class); - /** Minimum amount of active connections: 50%. */ - protected static final float MIN_ACTIVE_RATIO = 0.5f; - - /** Configuration for the connection manager, pool and sockets. */ private final Configuration conf; @@ -60,6 +56,8 @@ public class ConnectionManager { private final int minSize = 1; /** Max number of connections per user + nn. */ private final int maxSize; + /** Min ratio of active connections per user + nn. */ + private final float minActiveRatio; /** How often we close a pool for a particular user + nn. */ private final long poolCleanupPeriodMs; @@ -96,10 +94,13 @@ public class ConnectionManager { public ConnectionManager(Configuration config) { this.conf = config; - // Configure minimum and maximum connection pools + // Configure minimum, maximum and active connection pools this.maxSize = this.conf.getInt( RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE, RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT); + this.minActiveRatio = this.conf.getFloat( + RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_MIN_ACTIVE_RATIO, + RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_MIN_ACTIVE_RATIO_DEFAULT); // Map with the connections indexed by UGI and Namenode this.pools = new HashMap<>(); @@ -203,7 +204,8 @@ public ConnectionContext getConnection(UserGroupInformation ugi, pool = this.pools.get(connectionId); if (pool == null) { pool = new ConnectionPool( - this.conf, nnAddress, ugi, this.minSize, this.maxSize, protocol); + this.conf, nnAddress, ugi, this.minSize, this.maxSize, + this.minActiveRatio, protocol); this.pools.put(connectionId, pool); } } finally { @@ -326,8 +328,9 @@ void cleanup(ConnectionPool pool) { long timeSinceLastActive = Time.now() - pool.getLastActiveTime(); int total = pool.getNumConnections(); int active = pool.getNumActiveConnections(); + float poolMinActiveRatio = pool.getMinActiveRatio(); if (timeSinceLastActive > connectionCleanupPeriodMs || - active < MIN_ACTIVE_RATIO * total) { + active < poolMinActiveRatio * total) { // Remove and close 1 connection List conns = pool.removeConnections(1); for (ConnectionContext conn : conns) { @@ -412,8 +415,9 @@ public void run() { try { int total = pool.getNumConnections(); int active = pool.getNumActiveConnections(); + float poolMinActiveRatio = pool.getMinActiveRatio(); if (pool.getNumConnections() < pool.getMaxSize() && - active >= MIN_ACTIVE_RATIO * total) { + active >= poolMinActiveRatio * total) { ConnectionContext conn = pool.newConnection(); pool.addConnection(conn); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index fab3b81bc2578..f868521304752 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -91,6 +91,8 @@ public class ConnectionPool { private final int minSize; /** Max number of connections per user. */ private final int maxSize; + /** Min ratio of active connections per user. */ + private final float minActiveRatio; /** The last time a connection was active. */ private volatile long lastActiveTime = 0; @@ -98,7 +100,7 @@ public class ConnectionPool { protected ConnectionPool(Configuration config, String address, UserGroupInformation user, int minPoolSize, int maxPoolSize, - Class proto) throws IOException { + float minActiveRatio, Class proto) throws IOException { this.conf = config; @@ -112,6 +114,7 @@ protected ConnectionPool(Configuration config, String address, // Set configuration parameters for the pool this.minSize = minPoolSize; this.maxSize = maxPoolSize; + this.minActiveRatio = minActiveRatio; // Add minimum connections to the pool for (int i=0; i + + dfs.federation.router.connection.min-active-ratio + 0.5f + + Minimum active ratio of connections from the router to namenodes. + + + dfs.federation.router.connection.clean.ms 10000 diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java index 765f6c84e5611..6c1e448a392f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java @@ -80,14 +80,14 @@ public void testCleanup() throws Exception { Map poolMap = connManager.getPools(); ConnectionPool pool1 = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, 0.5f, ClientProtocol.class); addConnectionsToPool(pool1, 9, 4); poolMap.put( new ConnectionPoolId(TEST_USER1, TEST_NN_ADDRESS, ClientProtocol.class), pool1); ConnectionPool pool2 = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER2, 0, 10, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER2, 0, 10, 0.5f, ClientProtocol.class); addConnectionsToPool(pool2, 10, 10); poolMap.put( new ConnectionPoolId(TEST_USER2, TEST_NN_ADDRESS, ClientProtocol.class), @@ -110,7 +110,7 @@ public void testCleanup() throws Exception { // Make sure the number of connections doesn't go below minSize ConnectionPool pool3 = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER3, 2, 10, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER3, 2, 10, 0.5f, ClientProtocol.class); addConnectionsToPool(pool3, 8, 0); poolMap.put( new ConnectionPoolId(TEST_USER3, TEST_NN_ADDRESS, ClientProtocol.class), @@ -171,7 +171,7 @@ public void testGetConnection() throws Exception { int activeConns = 5; ConnectionPool pool = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, 0.5f, ClientProtocol.class); addConnectionsToPool(pool, totalConns, activeConns); poolMap.put( new ConnectionPoolId(TEST_USER1, TEST_NN_ADDRESS, ClientProtocol.class), @@ -196,7 +196,7 @@ public void testGetConnection() throws Exception { @Test public void testValidClientIndex() throws Exception { ConnectionPool pool = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER1, 2, 2, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER1, 2, 2, 0.5f, ClientProtocol.class); for(int i = -3; i <= 3; i++) { pool.getClientIndex().set(i); ConnectionContext conn = pool.getConnection(); @@ -212,7 +212,7 @@ public void getGetConnectionNamenodeProtocol() throws Exception { int activeConns = 5; ConnectionPool pool = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, NamenodeProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, 0.5f, NamenodeProtocol.class); addConnectionsToPool(pool, totalConns, activeConns); poolMap.put( new ConnectionPoolId( @@ -262,4 +262,43 @@ private void checkPoolConnections(UserGroupInformation ugi, } } + @Test + public void testConfigureConnectionActiveRatio() throws IOException { + final int totalConns = 10; + int activeConns = 7; + + Configuration tmpConf = new Configuration(); + // Set dfs.federation.router.connection.min-active-ratio 0.8f + tmpConf.setFloat( + RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_MIN_ACTIVE_RATIO, 0.8f); + ConnectionManager tmpConnManager = new ConnectionManager(tmpConf); + tmpConnManager.start(); + + // Create one new connection pool + tmpConnManager.getConnection(TEST_USER1, TEST_NN_ADDRESS, + NamenodeProtocol.class); + + Map poolMap = tmpConnManager.getPools(); + ConnectionPoolId connectionPoolId = new ConnectionPoolId(TEST_USER1, + TEST_NN_ADDRESS, NamenodeProtocol.class); + ConnectionPool pool = poolMap.get(connectionPoolId); + + // Test min active ratio is 0.8f + assertEquals(0.8f, pool.getMinActiveRatio(), 0.001f); + + pool.getConnection().getClient(); + // Test there is one active connection in pool + assertEquals(1, pool.getNumActiveConnections()); + + // Add other 6 active/9 total connections to pool + addConnectionsToPool(pool, totalConns - 1, activeConns - 1); + + // There are 7 active connections. + // The active number is less than totalConns(10) * minActiveRatio(0.8f). + // We can cleanup the pool + tmpConnManager.cleanup(pool); + assertEquals(totalConns - 1, pool.getNumConnections()); + + tmpConnManager.close(); + } } From 0ca7142c112c14004a6411df0f0d1bf3c4d17702 Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Tue, 4 Dec 2018 22:16:00 +0800 Subject: [PATCH 0275/1308] Revert "HDFS-14114. RBF: MIN_ACTIVE_RATIO should be configurable. Contributed by Fei Hui." This reverts commit 7c0d6f65fde12ead91ed7c706521ad1d3dc995f8. --- .../federation/router/ConnectionManager.java | 20 +++----- .../federation/router/ConnectionPool.java | 14 +---- .../federation/router/RBFConfigKeys.java | 5 -- .../src/main/resources/hdfs-rbf-default.xml | 8 --- .../router/TestConnectionManager.java | 51 +++---------------- 5 files changed, 15 insertions(+), 83 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index 74bbbb572fd27..fa2bf944dda6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -49,6 +49,10 @@ public class ConnectionManager { private static final Logger LOG = LoggerFactory.getLogger(ConnectionManager.class); + /** Minimum amount of active connections: 50%. */ + protected static final float MIN_ACTIVE_RATIO = 0.5f; + + /** Configuration for the connection manager, pool and sockets. */ private final Configuration conf; @@ -56,8 +60,6 @@ public class ConnectionManager { private final int minSize = 1; /** Max number of connections per user + nn. */ private final int maxSize; - /** Min ratio of active connections per user + nn. */ - private final float minActiveRatio; /** How often we close a pool for a particular user + nn. */ private final long poolCleanupPeriodMs; @@ -94,13 +96,10 @@ public class ConnectionManager { public ConnectionManager(Configuration config) { this.conf = config; - // Configure minimum, maximum and active connection pools + // Configure minimum and maximum connection pools this.maxSize = this.conf.getInt( RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE, RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT); - this.minActiveRatio = this.conf.getFloat( - RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_MIN_ACTIVE_RATIO, - RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_MIN_ACTIVE_RATIO_DEFAULT); // Map with the connections indexed by UGI and Namenode this.pools = new HashMap<>(); @@ -204,8 +203,7 @@ public ConnectionContext getConnection(UserGroupInformation ugi, pool = this.pools.get(connectionId); if (pool == null) { pool = new ConnectionPool( - this.conf, nnAddress, ugi, this.minSize, this.maxSize, - this.minActiveRatio, protocol); + this.conf, nnAddress, ugi, this.minSize, this.maxSize, protocol); this.pools.put(connectionId, pool); } } finally { @@ -328,9 +326,8 @@ void cleanup(ConnectionPool pool) { long timeSinceLastActive = Time.now() - pool.getLastActiveTime(); int total = pool.getNumConnections(); int active = pool.getNumActiveConnections(); - float poolMinActiveRatio = pool.getMinActiveRatio(); if (timeSinceLastActive > connectionCleanupPeriodMs || - active < poolMinActiveRatio * total) { + active < MIN_ACTIVE_RATIO * total) { // Remove and close 1 connection List conns = pool.removeConnections(1); for (ConnectionContext conn : conns) { @@ -415,9 +412,8 @@ public void run() { try { int total = pool.getNumConnections(); int active = pool.getNumActiveConnections(); - float poolMinActiveRatio = pool.getMinActiveRatio(); if (pool.getNumConnections() < pool.getMaxSize() && - active >= poolMinActiveRatio * total) { + active >= MIN_ACTIVE_RATIO * total) { ConnectionContext conn = pool.newConnection(); pool.addConnection(conn); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index f868521304752..fab3b81bc2578 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -91,8 +91,6 @@ public class ConnectionPool { private final int minSize; /** Max number of connections per user. */ private final int maxSize; - /** Min ratio of active connections per user. */ - private final float minActiveRatio; /** The last time a connection was active. */ private volatile long lastActiveTime = 0; @@ -100,7 +98,7 @@ public class ConnectionPool { protected ConnectionPool(Configuration config, String address, UserGroupInformation user, int minPoolSize, int maxPoolSize, - float minActiveRatio, Class proto) throws IOException { + Class proto) throws IOException { this.conf = config; @@ -114,7 +112,6 @@ protected ConnectionPool(Configuration config, String address, // Set configuration parameters for the pool this.minSize = minPoolSize; this.maxSize = maxPoolSize; - this.minActiveRatio = minActiveRatio; // Add minimum connections to the pool for (int i=0; i - - dfs.federation.router.connection.min-active-ratio - 0.5f - - Minimum active ratio of connections from the router to namenodes. - - - dfs.federation.router.connection.clean.ms 10000 diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java index 6c1e448a392f6..765f6c84e5611 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java @@ -80,14 +80,14 @@ public void testCleanup() throws Exception { Map poolMap = connManager.getPools(); ConnectionPool pool1 = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, 0.5f, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, ClientProtocol.class); addConnectionsToPool(pool1, 9, 4); poolMap.put( new ConnectionPoolId(TEST_USER1, TEST_NN_ADDRESS, ClientProtocol.class), pool1); ConnectionPool pool2 = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER2, 0, 10, 0.5f, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER2, 0, 10, ClientProtocol.class); addConnectionsToPool(pool2, 10, 10); poolMap.put( new ConnectionPoolId(TEST_USER2, TEST_NN_ADDRESS, ClientProtocol.class), @@ -110,7 +110,7 @@ public void testCleanup() throws Exception { // Make sure the number of connections doesn't go below minSize ConnectionPool pool3 = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER3, 2, 10, 0.5f, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER3, 2, 10, ClientProtocol.class); addConnectionsToPool(pool3, 8, 0); poolMap.put( new ConnectionPoolId(TEST_USER3, TEST_NN_ADDRESS, ClientProtocol.class), @@ -171,7 +171,7 @@ public void testGetConnection() throws Exception { int activeConns = 5; ConnectionPool pool = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, 0.5f, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, ClientProtocol.class); addConnectionsToPool(pool, totalConns, activeConns); poolMap.put( new ConnectionPoolId(TEST_USER1, TEST_NN_ADDRESS, ClientProtocol.class), @@ -196,7 +196,7 @@ public void testGetConnection() throws Exception { @Test public void testValidClientIndex() throws Exception { ConnectionPool pool = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER1, 2, 2, 0.5f, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER1, 2, 2, ClientProtocol.class); for(int i = -3; i <= 3; i++) { pool.getClientIndex().set(i); ConnectionContext conn = pool.getConnection(); @@ -212,7 +212,7 @@ public void getGetConnectionNamenodeProtocol() throws Exception { int activeConns = 5; ConnectionPool pool = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, 0.5f, NamenodeProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, NamenodeProtocol.class); addConnectionsToPool(pool, totalConns, activeConns); poolMap.put( new ConnectionPoolId( @@ -262,43 +262,4 @@ private void checkPoolConnections(UserGroupInformation ugi, } } - @Test - public void testConfigureConnectionActiveRatio() throws IOException { - final int totalConns = 10; - int activeConns = 7; - - Configuration tmpConf = new Configuration(); - // Set dfs.federation.router.connection.min-active-ratio 0.8f - tmpConf.setFloat( - RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_MIN_ACTIVE_RATIO, 0.8f); - ConnectionManager tmpConnManager = new ConnectionManager(tmpConf); - tmpConnManager.start(); - - // Create one new connection pool - tmpConnManager.getConnection(TEST_USER1, TEST_NN_ADDRESS, - NamenodeProtocol.class); - - Map poolMap = tmpConnManager.getPools(); - ConnectionPoolId connectionPoolId = new ConnectionPoolId(TEST_USER1, - TEST_NN_ADDRESS, NamenodeProtocol.class); - ConnectionPool pool = poolMap.get(connectionPoolId); - - // Test min active ratio is 0.8f - assertEquals(0.8f, pool.getMinActiveRatio(), 0.001f); - - pool.getConnection().getClient(); - // Test there is one active connection in pool - assertEquals(1, pool.getNumActiveConnections()); - - // Add other 6 active/9 total connections to pool - addConnectionsToPool(pool, totalConns - 1, activeConns - 1); - - // There are 7 active connections. - // The active number is less than totalConns(10) * minActiveRatio(0.8f). - // We can cleanup the pool - tmpConnManager.cleanup(pool); - assertEquals(totalConns - 1, pool.getNumConnections()); - - tmpConnManager.close(); - } } From 94a8dec168842cc17787526ee9a662f427a5dcea Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Wed, 5 Dec 2018 11:44:38 +0800 Subject: [PATCH 0276/1308] HDFS-14114. RBF: MIN_ACTIVE_RATIO should be configurable. Contributed by Fei Hui. --- .../federation/router/ConnectionManager.java | 20 ++++--- .../federation/router/ConnectionPool.java | 14 ++++- .../federation/router/RBFConfigKeys.java | 5 ++ .../src/main/resources/hdfs-rbf-default.xml | 8 +++ .../router/TestConnectionManager.java | 55 ++++++++++++++++--- 5 files changed, 85 insertions(+), 17 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index fa2bf944dda6d..74bbbb572fd27 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -49,10 +49,6 @@ public class ConnectionManager { private static final Logger LOG = LoggerFactory.getLogger(ConnectionManager.class); - /** Minimum amount of active connections: 50%. */ - protected static final float MIN_ACTIVE_RATIO = 0.5f; - - /** Configuration for the connection manager, pool and sockets. */ private final Configuration conf; @@ -60,6 +56,8 @@ public class ConnectionManager { private final int minSize = 1; /** Max number of connections per user + nn. */ private final int maxSize; + /** Min ratio of active connections per user + nn. */ + private final float minActiveRatio; /** How often we close a pool for a particular user + nn. */ private final long poolCleanupPeriodMs; @@ -96,10 +94,13 @@ public class ConnectionManager { public ConnectionManager(Configuration config) { this.conf = config; - // Configure minimum and maximum connection pools + // Configure minimum, maximum and active connection pools this.maxSize = this.conf.getInt( RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE, RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT); + this.minActiveRatio = this.conf.getFloat( + RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_MIN_ACTIVE_RATIO, + RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_MIN_ACTIVE_RATIO_DEFAULT); // Map with the connections indexed by UGI and Namenode this.pools = new HashMap<>(); @@ -203,7 +204,8 @@ public ConnectionContext getConnection(UserGroupInformation ugi, pool = this.pools.get(connectionId); if (pool == null) { pool = new ConnectionPool( - this.conf, nnAddress, ugi, this.minSize, this.maxSize, protocol); + this.conf, nnAddress, ugi, this.minSize, this.maxSize, + this.minActiveRatio, protocol); this.pools.put(connectionId, pool); } } finally { @@ -326,8 +328,9 @@ void cleanup(ConnectionPool pool) { long timeSinceLastActive = Time.now() - pool.getLastActiveTime(); int total = pool.getNumConnections(); int active = pool.getNumActiveConnections(); + float poolMinActiveRatio = pool.getMinActiveRatio(); if (timeSinceLastActive > connectionCleanupPeriodMs || - active < MIN_ACTIVE_RATIO * total) { + active < poolMinActiveRatio * total) { // Remove and close 1 connection List conns = pool.removeConnections(1); for (ConnectionContext conn : conns) { @@ -412,8 +415,9 @@ public void run() { try { int total = pool.getNumConnections(); int active = pool.getNumActiveConnections(); + float poolMinActiveRatio = pool.getMinActiveRatio(); if (pool.getNumConnections() < pool.getMaxSize() && - active >= MIN_ACTIVE_RATIO * total) { + active >= poolMinActiveRatio * total) { ConnectionContext conn = pool.newConnection(); pool.addConnection(conn); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index fab3b81bc2578..f868521304752 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -91,6 +91,8 @@ public class ConnectionPool { private final int minSize; /** Max number of connections per user. */ private final int maxSize; + /** Min ratio of active connections per user. */ + private final float minActiveRatio; /** The last time a connection was active. */ private volatile long lastActiveTime = 0; @@ -98,7 +100,7 @@ public class ConnectionPool { protected ConnectionPool(Configuration config, String address, UserGroupInformation user, int minPoolSize, int maxPoolSize, - Class proto) throws IOException { + float minActiveRatio, Class proto) throws IOException { this.conf = config; @@ -112,6 +114,7 @@ protected ConnectionPool(Configuration config, String address, // Set configuration parameters for the pool this.minSize = minPoolSize; this.maxSize = maxPoolSize; + this.minActiveRatio = minActiveRatio; // Add minimum connections to the pool for (int i=0; i + + dfs.federation.router.connection.min-active-ratio + 0.5f + + Minimum active ratio of connections from the router to namenodes. + + + dfs.federation.router.connection.clean.ms 10000 diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java index 765f6c84e5611..a06dd6a10ffe3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java @@ -80,14 +80,14 @@ public void testCleanup() throws Exception { Map poolMap = connManager.getPools(); ConnectionPool pool1 = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, 0.5f, ClientProtocol.class); addConnectionsToPool(pool1, 9, 4); poolMap.put( new ConnectionPoolId(TEST_USER1, TEST_NN_ADDRESS, ClientProtocol.class), pool1); ConnectionPool pool2 = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER2, 0, 10, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER2, 0, 10, 0.5f, ClientProtocol.class); addConnectionsToPool(pool2, 10, 10); poolMap.put( new ConnectionPoolId(TEST_USER2, TEST_NN_ADDRESS, ClientProtocol.class), @@ -110,7 +110,7 @@ public void testCleanup() throws Exception { // Make sure the number of connections doesn't go below minSize ConnectionPool pool3 = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER3, 2, 10, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER3, 2, 10, 0.5f, ClientProtocol.class); addConnectionsToPool(pool3, 8, 0); poolMap.put( new ConnectionPoolId(TEST_USER3, TEST_NN_ADDRESS, ClientProtocol.class), @@ -134,7 +134,7 @@ public void testCleanup() throws Exception { public void testConnectionCreatorWithException() throws Exception { // Create a bad connection pool pointing to unresolvable namenode address. ConnectionPool badPool = new ConnectionPool( - conf, UNRESOLVED_TEST_NN_ADDRESS, TEST_USER1, 0, 10, + conf, UNRESOLVED_TEST_NN_ADDRESS, TEST_USER1, 0, 10, 0.5f, ClientProtocol.class); BlockingQueue queue = new ArrayBlockingQueue<>(1); queue.add(badPool); @@ -160,7 +160,7 @@ public void testGetConnectionWithException() throws Exception { // Create a bad connection pool pointing to unresolvable namenode address. ConnectionPool badPool = new ConnectionPool( - conf, UNRESOLVED_TEST_NN_ADDRESS, TEST_USER1, 1, 10, + conf, UNRESOLVED_TEST_NN_ADDRESS, TEST_USER1, 1, 10, 0.5f, ClientProtocol.class); } @@ -171,7 +171,7 @@ public void testGetConnection() throws Exception { int activeConns = 5; ConnectionPool pool = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, 0.5f, ClientProtocol.class); addConnectionsToPool(pool, totalConns, activeConns); poolMap.put( new ConnectionPoolId(TEST_USER1, TEST_NN_ADDRESS, ClientProtocol.class), @@ -196,7 +196,7 @@ public void testGetConnection() throws Exception { @Test public void testValidClientIndex() throws Exception { ConnectionPool pool = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER1, 2, 2, ClientProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER1, 2, 2, 0.5f, ClientProtocol.class); for(int i = -3; i <= 3; i++) { pool.getClientIndex().set(i); ConnectionContext conn = pool.getConnection(); @@ -212,7 +212,7 @@ public void getGetConnectionNamenodeProtocol() throws Exception { int activeConns = 5; ConnectionPool pool = new ConnectionPool( - conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, NamenodeProtocol.class); + conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, 0.5f, NamenodeProtocol.class); addConnectionsToPool(pool, totalConns, activeConns); poolMap.put( new ConnectionPoolId( @@ -262,4 +262,43 @@ private void checkPoolConnections(UserGroupInformation ugi, } } + @Test + public void testConfigureConnectionActiveRatio() throws IOException { + final int totalConns = 10; + int activeConns = 7; + + Configuration tmpConf = new Configuration(); + // Set dfs.federation.router.connection.min-active-ratio 0.8f + tmpConf.setFloat( + RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_MIN_ACTIVE_RATIO, 0.8f); + ConnectionManager tmpConnManager = new ConnectionManager(tmpConf); + tmpConnManager.start(); + + // Create one new connection pool + tmpConnManager.getConnection(TEST_USER1, TEST_NN_ADDRESS, + NamenodeProtocol.class); + + Map poolMap = tmpConnManager.getPools(); + ConnectionPoolId connectionPoolId = new ConnectionPoolId(TEST_USER1, + TEST_NN_ADDRESS, NamenodeProtocol.class); + ConnectionPool pool = poolMap.get(connectionPoolId); + + // Test min active ratio is 0.8f + assertEquals(0.8f, pool.getMinActiveRatio(), 0.001f); + + pool.getConnection().getClient(); + // Test there is one active connection in pool + assertEquals(1, pool.getNumActiveConnections()); + + // Add other 6 active/9 total connections to pool + addConnectionsToPool(pool, totalConns - 1, activeConns - 1); + + // There are 7 active connections. + // The active number is less than totalConns(10) * minActiveRatio(0.8f). + // We can cleanup the pool + tmpConnManager.cleanup(pool); + assertEquals(totalConns - 1, pool.getNumConnections()); + + tmpConnManager.close(); + } } From 01b4126b4e8124edfde20ba4733c6300bb994251 Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Sun, 16 Dec 2018 00:40:51 +0900 Subject: [PATCH 0277/1308] HDFS-14152. RBF: Fix a typo in RouterAdmin usage. Contributed by Ayush Saxena. --- .../org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java | 2 +- .../hdfs/server/federation/router/TestRouterAdminCLI.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 4a9cc7ac51f59..bdaabe821f87a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -109,7 +109,7 @@ private String getUsage(String cmd) { {"-add", "-update", "-rm", "-ls", "-setQuota", "-clrQuota", "-safemode", "-nameservice", "-getDisabledNameservices"}; StringBuilder usage = new StringBuilder(); - usage.append("Usage: hdfs routeradmin :\n"); + usage.append("Usage: hdfs dfsrouteradmin :\n"); for (int i = 0; i < commands.length; i++) { usage.append(getUsage(commands[i])); if (i + 1 < commands.length) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index 66429420ab0c3..d0e3e50fce4e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -549,7 +549,7 @@ public void testInvalidArgumentMessage() throws Exception { argv = new String[] {"-Random"}; assertEquals(-1, ToolRunner.run(admin, argv)); - String expected = "Usage: hdfs routeradmin :\n" + String expected = "Usage: hdfs dfsrouteradmin :\n" + "\t[-add " + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + "-owner -group -mode ]\n" From bbe859177d67fcdfd5377b1abff4a637fbbd4587 Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Mon, 17 Dec 2018 12:35:07 +0800 Subject: [PATCH 0278/1308] HDFS-13869. RBF: Handle NPE for NamenodeBeanMetrics#getFederationMetrics. Contributed by Ranith Sardar. --- .../metrics/NamenodeBeanMetrics.java | 149 +++++++++++++++--- .../hdfs/server/federation/router/Router.java | 8 +- .../server/federation/router/TestRouter.java | 14 ++ 3 files changed, 147 insertions(+), 24 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java index 4380ae9eebb53..a05fdc144989d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java @@ -168,8 +168,12 @@ public void close() { } } - private FederationMetrics getFederationMetrics() { - return this.router.getMetrics(); + private FederationMetrics getFederationMetrics() throws IOException { + FederationMetrics metrics = getRouter().getMetrics(); + if (metrics == null) { + throw new IOException("Federated metrics is not initialized"); + } + return metrics; } ///////////////////////////////////////////////////////// @@ -188,22 +192,42 @@ public String getSoftwareVersion() { @Override public long getUsed() { - return getFederationMetrics().getUsedCapacity(); + try { + return getFederationMetrics().getUsedCapacity(); + } catch (IOException e) { + LOG.debug("Failed to get the used capacity", e.getMessage()); + } + return 0; } @Override public long getFree() { - return getFederationMetrics().getRemainingCapacity(); + try { + return getFederationMetrics().getRemainingCapacity(); + } catch (IOException e) { + LOG.debug("Failed to get remaining capacity", e.getMessage()); + } + return 0; } @Override public long getTotal() { - return getFederationMetrics().getTotalCapacity(); + try { + return getFederationMetrics().getTotalCapacity(); + } catch (IOException e) { + LOG.debug("Failed to Get total capacity", e.getMessage()); + } + return 0; } @Override public long getProvidedCapacity() { - return getFederationMetrics().getProvidedSpace(); + try { + return getFederationMetrics().getProvidedSpace(); + } catch (IOException e) { + LOG.debug("Failed to get provided capacity", e.getMessage()); + } + return 0; } @Override @@ -261,39 +285,79 @@ public float getPercentBlockPoolUsed() { @Override public long getTotalBlocks() { - return getFederationMetrics().getNumBlocks(); + try { + return getFederationMetrics().getNumBlocks(); + } catch (IOException e) { + LOG.debug("Failed to get number of blocks", e.getMessage()); + } + return 0; } @Override public long getNumberOfMissingBlocks() { - return getFederationMetrics().getNumOfMissingBlocks(); + try { + return getFederationMetrics().getNumOfMissingBlocks(); + } catch (IOException e) { + LOG.debug("Failed to get number of missing blocks", e.getMessage()); + } + return 0; } @Override @Deprecated public long getPendingReplicationBlocks() { - return getFederationMetrics().getNumOfBlocksPendingReplication(); + try { + return getFederationMetrics().getNumOfBlocksPendingReplication(); + } catch (IOException e) { + LOG.debug("Failed to get number of blocks pending replica", + e.getMessage()); + } + return 0; } @Override public long getPendingReconstructionBlocks() { - return getFederationMetrics().getNumOfBlocksPendingReplication(); + try { + return getFederationMetrics().getNumOfBlocksPendingReplication(); + } catch (IOException e) { + LOG.debug("Failed to get number of blocks pending replica", + e.getMessage()); + } + return 0; } @Override @Deprecated public long getUnderReplicatedBlocks() { - return getFederationMetrics().getNumOfBlocksUnderReplicated(); + try { + return getFederationMetrics().getNumOfBlocksUnderReplicated(); + } catch (IOException e) { + LOG.debug("Failed to get number of blocks under replicated", + e.getMessage()); + } + return 0; } @Override public long getLowRedundancyBlocks() { - return getFederationMetrics().getNumOfBlocksUnderReplicated(); + try { + return getFederationMetrics().getNumOfBlocksUnderReplicated(); + } catch (IOException e) { + LOG.debug("Failed to get number of blocks under replicated", + e.getMessage()); + } + return 0; } @Override public long getPendingDeletionBlocks() { - return getFederationMetrics().getNumOfBlocksPendingDeletion(); + try { + return getFederationMetrics().getNumOfBlocksPendingDeletion(); + } catch (IOException e) { + LOG.debug("Failed to get number of blocks pending deletion", + e.getMessage()); + } + return 0; } @Override @@ -471,7 +535,12 @@ public String getJournalTransactionInfo() { @Override public long getNNStartedTimeInMillis() { - return this.router.getStartTime(); + try { + return getRouter().getStartTime(); + } catch (IOException e) { + LOG.debug("Failed to get the router startup time", e.getMessage()); + } + return 0; } @Override @@ -527,7 +596,12 @@ public long getProvidedCapacityTotal() { @Override public long getFilesTotal() { - return getFederationMetrics().getNumFiles(); + try { + return getFederationMetrics().getNumFiles(); + } catch (IOException e) { + LOG.debug("Failed to get number of files", e.getMessage()); + } + return 0; } @Override @@ -537,12 +611,22 @@ public int getTotalLoad() { @Override public int getNumLiveDataNodes() { - return this.router.getMetrics().getNumLiveNodes(); + try { + return getFederationMetrics().getNumLiveNodes(); + } catch (IOException e) { + LOG.debug("Failed to get number of live nodes", e.getMessage()); + } + return 0; } @Override public int getNumDeadDataNodes() { - return this.router.getMetrics().getNumDeadNodes(); + try { + return getFederationMetrics().getNumDeadNodes(); + } catch (IOException e) { + LOG.debug("Failed to get number of dead nodes", e.getMessage()); + } + return 0; } @Override @@ -552,17 +636,35 @@ public int getNumStaleDataNodes() { @Override public int getNumDecomLiveDataNodes() { - return this.router.getMetrics().getNumDecomLiveNodes(); + try { + return getFederationMetrics().getNumDecomLiveNodes(); + } catch (IOException e) { + LOG.debug("Failed to get the number of live decommissioned datanodes", + e.getMessage()); + } + return 0; } @Override public int getNumDecomDeadDataNodes() { - return this.router.getMetrics().getNumDecomDeadNodes(); + try { + return getFederationMetrics().getNumDecomDeadNodes(); + } catch (IOException e) { + LOG.debug("Failed to get the number of dead decommissioned datanodes", + e.getMessage()); + } + return 0; } @Override public int getNumDecommissioningDataNodes() { - return this.router.getMetrics().getNumDecommissioningNodes(); + try { + return getFederationMetrics().getNumDecommissioningNodes(); + } catch (IOException e) { + LOG.debug("Failed to get number of decommissioning nodes", + e.getMessage()); + } + return 0; } @Override @@ -702,4 +804,11 @@ public int getNumEncryptionZones() { public String getVerifyECWithTopologyResult() { return null; } + + private Router getRouter() throws IOException { + if (this.router == null) { + throw new IOException("Router is not initialized"); + } + return this.router; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 32882735ebf61..3182e27bcc93d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -586,11 +586,11 @@ public FederationMetrics getMetrics() { * * @return Namenode metrics. */ - public NamenodeBeanMetrics getNamenodeMetrics() { - if (this.metrics != null) { - return this.metrics.getNamenodeMetrics(); + public NamenodeBeanMetrics getNamenodeMetrics() throws IOException { + if (this.metrics == null) { + throw new IOException("Namenode metrics is not initialized"); } - return null; + return this.metrics.getNamenodeMetrics(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java index db4be292fd6e5..f83cfda6015ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java @@ -203,4 +203,18 @@ public void testRouterIDInRouterRpcClient() throws Exception { router.stop(); router.close(); } + + @Test + public void testRouterMetricsWhenDisabled() throws Exception { + + Router router = new Router(); + router.init(new RouterConfigBuilder(conf).rpc().build()); + router.start(); + + intercept(IOException.class, "Namenode metrics is not initialized", + () -> router.getNamenodeMetrics().getCacheCapacity()); + + router.stop(); + router.close(); + } } From 3d97142dff33b92251d6fd3cdc30fdb5c531adbf Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Tue, 18 Dec 2018 19:47:36 +0900 Subject: [PATCH 0279/1308] HDFS-14151. RBF: Make the read-only column of Mount Table clearly understandable. --- .../src/main/webapps/router/federationhealth.html | 2 +- .../src/main/webapps/router/federationhealth.js | 1 + .../hadoop-hdfs-rbf/src/main/webapps/static/rbf.css | 8 +------- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html index 068988c365cfb..0f089fe3c2e60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html @@ -408,7 +408,7 @@ {nameserviceId} {path} {order} - + {ownerName} {groupName} {mode} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js index 6311a801fa5b2..bb8e05707e4d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js @@ -317,6 +317,7 @@ for (var i = 0, e = mountTable.length; i < e; ++i) { if (mountTable[i].readonly == true) { mountTable[i].readonly = "true" + mountTable[i].status = "Read only" } else { mountTable[i].readonly = "false" } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css index 43112af044bf9..5cdd8269ca1f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css @@ -132,12 +132,6 @@ } .mount-table-read-only-true:before { - color: #c7254e; - content: "\e033"; -} - -.mount-table-read-only-false:before { color: #5fa341; - content: "\e013"; + content: "\e033"; } - From 8f6f9d9c8398567064c9369f48213db63f45538c Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Wed, 19 Dec 2018 11:40:00 +0800 Subject: [PATCH 0280/1308] HDFS-13443. RBF: Update mount table cache immediately after changing (add/update/remove) mount table entries. Contributed by Mohammad Arshad. --- ...erAdminProtocolServerSideTranslatorPB.java | 23 + .../RouterAdminProtocolTranslatorPB.java | 21 + .../resolver/MountTableManager.java | 16 + .../router/MountTableRefresherService.java | 289 +++++++++++++ .../router/MountTableRefresherThread.java | 96 +++++ .../federation/router/RBFConfigKeys.java | 25 ++ .../hdfs/server/federation/router/Router.java | 53 ++- .../federation/router/RouterAdminServer.java | 28 +- .../router/RouterHeartbeatService.java | 5 + .../federation/store/MountTableStore.java | 24 ++ .../federation/store/StateStoreUtils.java | 26 ++ .../store/impl/MountTableStoreImpl.java | 18 + .../RefreshMountTableEntriesRequest.java | 34 ++ .../RefreshMountTableEntriesResponse.java | 44 ++ ...RefreshMountTableEntriesRequestPBImpl.java | 67 +++ ...efreshMountTableEntriesResponsePBImpl.java | 74 ++++ .../federation/store/records/RouterState.java | 4 + .../records/impl/pb/RouterStatePBImpl.java | 10 + .../hdfs/tools/federation/RouterAdmin.java | 33 +- .../src/main/proto/FederationProtocol.proto | 8 + .../src/main/proto/RouterProtocol.proto | 5 + .../src/main/resources/hdfs-rbf-default.xml | 34 ++ .../src/site/markdown/HDFSRouterFederation.md | 9 + .../federation/FederationTestUtils.java | 27 ++ .../federation/RouterConfigBuilder.java | 12 + .../federation/router/TestRouterAdminCLI.java | 25 +- .../TestRouterMountTableCacheRefresh.java | 396 ++++++++++++++++++ .../src/site/markdown/HDFSCommands.md | 2 + 28 files changed, 1402 insertions(+), 6 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherService.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/RefreshMountTableEntriesRequest.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/RefreshMountTableEntriesResponse.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RefreshMountTableEntriesRequestPBImpl.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RefreshMountTableEntriesResponsePBImpl.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java index 6341ebd11a381..a31c46d2912c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java @@ -37,6 +37,8 @@ import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeRequestProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeResponseProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesRequestProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto; @@ -58,6 +60,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; @@ -78,6 +82,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetSafeModeResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafeModeRequestPBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafeModeResponsePBImpl; +import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesRequestPBImpl; +import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl; @@ -275,4 +281,21 @@ public GetDisabledNameservicesResponseProto getDisabledNameservices( throw new ServiceException(e); } } + + @Override + public RefreshMountTableEntriesResponseProto refreshMountTableEntries( + RpcController controller, RefreshMountTableEntriesRequestProto request) + throws ServiceException { + try { + RefreshMountTableEntriesRequest req = + new RefreshMountTableEntriesRequestPBImpl(request); + RefreshMountTableEntriesResponse response = + server.refreshMountTableEntries(req); + RefreshMountTableEntriesResponsePBImpl responsePB = + (RefreshMountTableEntriesResponsePBImpl) response; + return responsePB.getProto(); + } catch (IOException e) { + throw new ServiceException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java index 6e24438169109..1fbb06d2a7f61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java @@ -38,6 +38,8 @@ import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeRequestProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeResponseProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesRequestProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto; @@ -61,6 +63,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; @@ -77,6 +81,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetSafeModeResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafeModeResponsePBImpl; +import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesRequestPBImpl; +import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl; @@ -267,4 +273,19 @@ public GetDisabledNameservicesResponse getDisabledNameservices( throw new IOException(ProtobufHelper.getRemoteException(e).getMessage()); } } + + @Override + public RefreshMountTableEntriesResponse refreshMountTableEntries( + RefreshMountTableEntriesRequest request) throws IOException { + RefreshMountTableEntriesRequestPBImpl requestPB = + (RefreshMountTableEntriesRequestPBImpl) request; + RefreshMountTableEntriesRequestProto proto = requestPB.getProto(); + try { + RefreshMountTableEntriesResponseProto response = + rpcProxy.refreshMountTableEntries(null, proto); + return new RefreshMountTableEntriesResponsePBImpl(response); + } catch (ServiceException e) { + throw new IOException(ProtobufHelper.getRemoteException(e).getMessage()); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableManager.java index c2e4a5b4473cd..9a1e4160245aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableManager.java @@ -23,6 +23,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; @@ -77,4 +79,18 @@ RemoveMountTableEntryResponse removeMountTableEntry( */ GetMountTableEntriesResponse getMountTableEntries( GetMountTableEntriesRequest request) throws IOException; + + /** + * Refresh mount table entries cache from the state store. Cache is updated + * periodically but with this API cache can be refreshed immediately. This API + * is primarily meant to be called from the Admin Server. Admin Server will + * call this API and refresh mount table cache of all the routers while + * changing mount table entries. + * + * @param request Fully populated request object. + * @return True the mount table entry was updated without any error. + * @throws IOException Throws exception if the data store is not initialized. + */ + RefreshMountTableEntriesResponse refreshMountTableEntries( + RefreshMountTableEntriesRequest request) throws IOException; } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherService.java new file mode 100644 index 0000000000000..fafcef475a435 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherService.java @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreUnavailableException; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreUtils; +import org.apache.hadoop.hdfs.server.federation.store.records.RouterState; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.service.AbstractService; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.cache.RemovalListener; +import com.google.common.cache.RemovalNotification; +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * This service is invoked from {@link MountTableStore} when there is change in + * mount table entries and it updates mount table entry cache on local router as + * well as on all remote routers. Refresh on local router is done by calling + * {@link MountTableStore#loadCache(boolean)}} API directly, no RPC call + * involved, but on remote routers refresh is done through RouterClient(RPC + * call). To improve performance, all routers are refreshed in separate thread + * and all connection are cached. Cached connections are removed from + * cache and closed when their max live time is elapsed. + */ +public class MountTableRefresherService extends AbstractService { + private static final String ROUTER_CONNECT_ERROR_MSG = + "Router {} connection failed. Mount table cache will not refesh."; + private static final Logger LOG = + LoggerFactory.getLogger(MountTableRefresherService.class); + + /** Local router. */ + private final Router router; + /** Mount table store. */ + private MountTableStore mountTableStore; + /** Local router admin address in the form of host:port. */ + private String localAdminAdress; + /** Timeout in ms to update mount table cache on all the routers. */ + private long cacheUpdateTimeout; + + /** + * All router admin clients cached. So no need to create the client again and + * again. Router admin address(host:port) is used as key to cache RouterClient + * objects. + */ + private LoadingCache routerClientsCache; + + /** + * Removes expired RouterClient from routerClientsCache. + */ + private ScheduledExecutorService clientCacheCleanerScheduler; + + /** + * Create a new service to refresh mount table cache when there is change in + * mount table entries. + * + * @param router whose mount table cache will be refreshed + */ + public MountTableRefresherService(Router router) { + super(MountTableRefresherService.class.getSimpleName()); + this.router = router; + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + super.serviceInit(conf); + this.mountTableStore = getMountTableStore(); + // attach this service to mount table store. + this.mountTableStore.setRefreshService(this); + this.localAdminAdress = + StateStoreUtils.getHostPortString(router.getAdminServerAddress()); + this.cacheUpdateTimeout = conf.getTimeDuration( + RBFConfigKeys.MOUNT_TABLE_CACHE_UPDATE_TIMEOUT, + RBFConfigKeys.MOUNT_TABLE_CACHE_UPDATE_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); + long routerClientMaxLiveTime = conf.getTimeDuration( + RBFConfigKeys.MOUNT_TABLE_CACHE_UPDATE_CLIENT_MAX_TIME, + RBFConfigKeys.MOUNT_TABLE_CACHE_UPDATE_CLIENT_MAX_TIME_DEFAULT, + TimeUnit.MILLISECONDS); + routerClientsCache = CacheBuilder.newBuilder() + .expireAfterWrite(routerClientMaxLiveTime, TimeUnit.MILLISECONDS) + .removalListener(getClientRemover()).build(getClientCreator()); + + initClientCacheCleaner(routerClientMaxLiveTime); + } + + private void initClientCacheCleaner(long routerClientMaxLiveTime) { + clientCacheCleanerScheduler = + Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder() + .setNameFormat("MountTableRefresh_ClientsCacheCleaner") + .setDaemon(true).build()); + /* + * When cleanUp() method is called, expired RouterClient will be removed and + * closed. + */ + clientCacheCleanerScheduler.scheduleWithFixedDelay( + () -> routerClientsCache.cleanUp(), routerClientMaxLiveTime, + routerClientMaxLiveTime, TimeUnit.MILLISECONDS); + } + + /** + * Create cache entry remove listener. + */ + private RemovalListener getClientRemover() { + return new RemovalListener() { + @Override + public void onRemoval( + RemovalNotification notification) { + closeRouterClient(notification.getValue()); + } + }; + } + + @VisibleForTesting + protected void closeRouterClient(RouterClient client) { + try { + client.close(); + } catch (IOException e) { + LOG.error("Error while closing RouterClient", e); + } + } + + /** + * Creates RouterClient and caches it. + */ + private CacheLoader getClientCreator() { + return new CacheLoader() { + public RouterClient load(String adminAddress) throws IOException { + InetSocketAddress routerSocket = + NetUtils.createSocketAddr(adminAddress); + Configuration config = getConfig(); + return createRouterClient(routerSocket, config); + } + }; + } + + @VisibleForTesting + protected RouterClient createRouterClient(InetSocketAddress routerSocket, + Configuration config) throws IOException { + return new RouterClient(routerSocket, config); + } + + @Override + protected void serviceStart() throws Exception { + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + super.serviceStop(); + clientCacheCleanerScheduler.shutdown(); + // remove and close all admin clients + routerClientsCache.invalidateAll(); + } + + private MountTableStore getMountTableStore() throws IOException { + MountTableStore mountTblStore = + router.getStateStore().getRegisteredRecordStore(MountTableStore.class); + if (mountTblStore == null) { + throw new IOException("Mount table state store is not available."); + } + return mountTblStore; + } + + /** + * Refresh mount table cache of this router as well as all other routers. + */ + public void refresh() throws StateStoreUnavailableException { + List cachedRecords = + router.getRouterStateManager().getCachedRecords(); + List refreshThreads = new ArrayList<>(); + for (RouterState routerState : cachedRecords) { + String adminAddress = routerState.getAdminAddress(); + if (adminAddress == null || adminAddress.length() == 0) { + // this router has not enabled router admin + continue; + } + // No use of calling refresh on router which is not running state + if (routerState.getStatus() != RouterServiceState.RUNNING) { + LOG.info( + "Router {} is not running. Mount table cache will not refesh."); + // remove if RouterClient is cached. + removeFromCache(adminAddress); + } else if (isLocalAdmin(adminAddress)) { + /* + * Local router's cache update does not require RPC call, so no need for + * RouterClient + */ + refreshThreads.add(getLocalRefresher(adminAddress)); + } else { + try { + RouterClient client = routerClientsCache.get(adminAddress); + refreshThreads.add(new MountTableRefresherThread( + client.getMountTableManager(), adminAddress)); + } catch (ExecutionException execExcep) { + // Can not connect, seems router is stopped now. + LOG.warn(ROUTER_CONNECT_ERROR_MSG, adminAddress, execExcep); + } + } + } + if (!refreshThreads.isEmpty()) { + invokeRefresh(refreshThreads); + } + } + + @VisibleForTesting + protected MountTableRefresherThread getLocalRefresher(String adminAddress) { + return new MountTableRefresherThread(router.getAdminServer(), adminAddress); + } + + private void removeFromCache(String adminAddress) { + routerClientsCache.invalidate(adminAddress); + } + + private void invokeRefresh(List refreshThreads) { + CountDownLatch countDownLatch = new CountDownLatch(refreshThreads.size()); + // start all the threads + for (MountTableRefresherThread refThread : refreshThreads) { + refThread.setCountDownLatch(countDownLatch); + refThread.start(); + } + try { + /* + * Wait for all the thread to complete, await method returns false if + * refresh is not finished within specified time + */ + boolean allReqCompleted = + countDownLatch.await(cacheUpdateTimeout, TimeUnit.MILLISECONDS); + if (!allReqCompleted) { + LOG.warn("Not all router admins updated their cache"); + } + } catch (InterruptedException e) { + LOG.error("Mount table cache refresher was interrupted.", e); + } + logResult(refreshThreads); + } + + private boolean isLocalAdmin(String adminAddress) { + return adminAddress.contentEquals(localAdminAdress); + } + + private void logResult(List refreshThreads) { + int succesCount = 0; + int failureCount = 0; + for (MountTableRefresherThread mountTableRefreshThread : refreshThreads) { + if (mountTableRefreshThread.isSuccess()) { + succesCount++; + } else { + failureCount++; + // remove RouterClient from cache so that new client is created + removeFromCache(mountTableRefreshThread.getAdminAddress()); + } + } + LOG.info("Mount table entries cache refresh succesCount={},failureCount={}", + succesCount, failureCount); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java new file mode 100644 index 0000000000000..c9967a20736e6 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import java.io.IOException; +import java.util.concurrent.CountDownLatch; + +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Base class for updating mount table cache on all the router. + */ +public class MountTableRefresherThread extends Thread { + private static final Logger LOG = + LoggerFactory.getLogger(MountTableRefresherThread.class); + private boolean success; + /** Admin server on which refreshed to be invoked. */ + private String adminAddress; + private CountDownLatch countDownLatch; + private MountTableManager manager; + + public MountTableRefresherThread(MountTableManager manager, + String adminAddress) { + this.manager = manager; + this.adminAddress = adminAddress; + setName("MountTableRefresh_" + adminAddress); + setDaemon(true); + } + + /** + * Refresh mount table cache of local and remote routers. Local and remote + * routers will be refreshed differently. Lets understand what are the + * local and remote routers and refresh will be done differently on these + * routers. Suppose there are three routers R1, R2 and R3. User want to add + * new mount table entry. He will connect to only one router, not all the + * routers. Suppose He connects to R1 and calls add mount table entry through + * API or CLI. Now in this context R1 is local router, R2 and R3 are remote + * routers. Because add mount table entry is invoked on R1, R1 will update the + * cache locally it need not to make RPC call. But R1 will make RPC calls to + * update cache on R2 and R3. + */ + @Override + public void run() { + try { + RefreshMountTableEntriesResponse refreshMountTableEntries = + manager.refreshMountTableEntries( + RefreshMountTableEntriesRequest.newInstance()); + success = refreshMountTableEntries.getResult(); + } catch (IOException e) { + LOG.error("Failed to refresh mount table entries cache at router {}", + adminAddress, e); + } finally { + countDownLatch.countDown(); + } + } + + /** + * @return true if cache was refreshed successfully. + */ + public boolean isSuccess() { + return success; + } + + public void setCountDownLatch(CountDownLatch countDownLatch) { + this.countDownLatch = countDownLatch; + } + + @Override + public String toString() { + return "MountTableRefreshThread [success=" + success + ", adminAddress=" + + adminAddress + "]"; + } + + public String getAdminAddress() { + return adminAddress; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java index 0070de73b48ba..5e907c8a55e24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java @@ -204,6 +204,31 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic { FEDERATION_ROUTER_PREFIX + "mount-table.max-cache-size"; /** Remove cache entries if we have more than 10k. */ public static final int FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT = 10000; + /** + * If true then cache updated immediately after mount table entry change + * otherwise it is updated periodically based configuration. + */ + public static final String MOUNT_TABLE_CACHE_UPDATE = + FEDERATION_ROUTER_PREFIX + "mount-table.cache.update"; + public static final boolean MOUNT_TABLE_CACHE_UPDATE_DEFAULT = + false; + /** + * Timeout to update mount table cache on all the routers. + */ + public static final String MOUNT_TABLE_CACHE_UPDATE_TIMEOUT = + FEDERATION_ROUTER_PREFIX + "mount-table.cache.update.timeout"; + public static final long MOUNT_TABLE_CACHE_UPDATE_TIMEOUT_DEFAULT = + TimeUnit.MINUTES.toMillis(1); + /** + * Remote router mount table cache is updated through RouterClient(RPC call). + * To improve performance, RouterClient connections are cached but it should + * not be kept in cache forever. This property defines the max time a + * connection can be cached. + */ + public static final String MOUNT_TABLE_CACHE_UPDATE_CLIENT_MAX_TIME = + FEDERATION_ROUTER_PREFIX + "mount-table.cache.update.client.max.time"; + public static final long MOUNT_TABLE_CACHE_UPDATE_CLIENT_MAX_TIME_DEFAULT = + TimeUnit.MINUTES.toMillis(5); public static final String FEDERATION_MOUNT_TABLE_CACHE_ENABLE = FEDERATION_ROUTER_PREFIX + "mount-table.cache.enable"; public static final boolean FEDERATION_MOUNT_TABLE_CACHE_ENABLE_DEFAULT = diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 3182e27bcc93d..6a7437f29b587 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -254,9 +254,50 @@ protected void serviceInit(Configuration configuration) throws Exception { addService(this.safemodeService); } + /* + * Refresh mount table cache immediately after adding, modifying or deleting + * the mount table entries. If this service is not enabled mount table cache + * are refreshed periodically by StateStoreCacheUpdateService + */ + if (conf.getBoolean(RBFConfigKeys.MOUNT_TABLE_CACHE_UPDATE, + RBFConfigKeys.MOUNT_TABLE_CACHE_UPDATE_DEFAULT)) { + // There is no use of starting refresh service if state store and admin + // servers are not enabled + String disabledDependentServices = getDisabledDependentServices(); + /* + * disabledDependentServices null means all dependent services are + * enabled. + */ + if (disabledDependentServices == null) { + + MountTableRefresherService refreshService = + new MountTableRefresherService(this); + addService(refreshService); + LOG.info("Service {} is enabled.", + MountTableRefresherService.class.getSimpleName()); + } else { + LOG.warn( + "Service {} not enabled: depenendent service(s) {} not enabled.", + MountTableRefresherService.class.getSimpleName(), + disabledDependentServices); + } + } + super.serviceInit(conf); } + private String getDisabledDependentServices() { + if (this.stateStore == null && this.adminServer == null) { + return StateStoreService.class.getSimpleName() + "," + + RouterAdminServer.class.getSimpleName(); + } else if (this.stateStore == null) { + return StateStoreService.class.getSimpleName(); + } else if (this.adminServer == null) { + return RouterAdminServer.class.getSimpleName(); + } + return null; + } + /** * Returns the hostname for this Router. If the hostname is not * explicitly configured in the given config, then it is determined. @@ -696,9 +737,19 @@ Collection getNamenodeHearbeatServices() { } /** - * Get the Router safe mode service + * Get the Router safe mode service. */ RouterSafemodeService getSafemodeService() { return this.safemodeService; } + + /** + * Get router admin server. + * + * @return Null if admin is not enabled. + */ + public RouterAdminServer getAdminServer() { + return adminServer; + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index f34dc419eb63e..5bb7751cd1e92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; import org.apache.hadoop.hdfs.server.federation.store.DisabledNameserviceStore; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreCache; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceRequest; @@ -55,6 +56,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; @@ -102,6 +105,7 @@ public class RouterAdminServer extends AbstractService private static String routerOwner; private static String superGroup; private static boolean isPermissionEnabled; + private boolean iStateStoreCache; public RouterAdminServer(Configuration conf, Router router) throws IOException { @@ -154,6 +158,8 @@ public RouterAdminServer(Configuration conf, Router router) this.adminAddress = new InetSocketAddress( confRpcAddress.getHostName(), listenAddress.getPort()); router.setAdminServerAddress(this.adminAddress); + iStateStoreCache = + router.getSubclusterResolver() instanceof StateStoreCache; } /** @@ -243,7 +249,7 @@ public UpdateMountTableEntryResponse updateMountTableEntry( getMountTableStore().updateMountTableEntry(request); MountTable mountTable = request.getEntry(); - if (mountTable != null) { + if (mountTable != null && router.isQuotaEnabled()) { synchronizeQuota(mountTable); } return response; @@ -331,6 +337,26 @@ public GetSafeModeResponse getSafeMode(GetSafeModeRequest request) return GetSafeModeResponse.newInstance(isInSafeMode); } + @Override + public RefreshMountTableEntriesResponse refreshMountTableEntries( + RefreshMountTableEntriesRequest request) throws IOException { + if (iStateStoreCache) { + /* + * MountTableResolver updates MountTableStore cache also. Expecting other + * SubclusterResolver implementations to update MountTableStore cache also + * apart from updating its cache. + */ + boolean result = ((StateStoreCache) this.router.getSubclusterResolver()) + .loadCache(true); + RefreshMountTableEntriesResponse response = + RefreshMountTableEntriesResponse.newInstance(); + response.setResult(result); + return response; + } else { + return getMountTableStore().refreshMountTableEntries(request); + } + } + /** * Verify if Router set safe mode state correctly. * @param isInSafeMode Expected state to be set. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java index a7f02d33bdffc..c497d85335922 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.server.federation.store.RecordStore; import org.apache.hadoop.hdfs.server.federation.store.RouterStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreUtils; import org.apache.hadoop.hdfs.server.federation.store.protocol.RouterHeartbeatRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.RouterHeartbeatResponse; import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; @@ -91,6 +92,10 @@ synchronized void updateStateStore() { getStateStoreVersion(MembershipStore.class), getStateStoreVersion(MountTableStore.class)); record.setStateStoreVersion(stateStoreVersion); + // if admin server not started then hostPort will be empty + String hostPort = + StateStoreUtils.getHostPortString(router.getAdminServerAddress()); + record.setAdminAddress(hostPort); RouterHeartbeatRequest request = RouterHeartbeatRequest.newInstance(record); RouterHeartbeatResponse response = routerStore.routerHeartbeat(request); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/MountTableStore.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/MountTableStore.java index b43965997d0a0..9d4b64b7f4fd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/MountTableStore.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/MountTableStore.java @@ -20,8 +20,11 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; +import org.apache.hadoop.hdfs.server.federation.router.MountTableRefresherService; import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Management API for the HDFS mount table information stored in @@ -42,8 +45,29 @@ @InterfaceStability.Evolving public abstract class MountTableStore extends CachedRecordStore implements MountTableManager { + private static final Logger LOG = + LoggerFactory.getLogger(MountTableStore.class); + private MountTableRefresherService refreshService; public MountTableStore(StateStoreDriver driver) { super(MountTable.class, driver); } + + public void setRefreshService(MountTableRefresherService refreshService) { + this.refreshService = refreshService; + } + + /** + * Update mount table cache of this router as well as all other routers. + */ + protected void updateCacheAllRouters() { + if (refreshService != null) { + try { + refreshService.refresh(); + } catch (StateStoreUnavailableException e) { + LOG.error("Cannot refresh mount table: state store not available", e); + } + } + } + } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java index 924c96a5dc307..4b932d6d93953 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs.server.federation.store; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; import java.util.ArrayList; import java.util.List; @@ -110,4 +113,27 @@ public static List filterMultiple( } return matchingList; } + + /** + * Returns address in form of host:port, empty string if address is null. + * + * @param address address + * @return host:port + */ + public static String getHostPortString(InetSocketAddress address) { + if (null == address) { + return ""; + } + String hostName = address.getHostName(); + if (hostName.equals("0.0.0.0")) { + try { + hostName = InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + LOG.error("Failed to get local host name", e); + return ""; + } + } + return hostName + ":" + address.getPort(); + } + } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java index eb117d64424c5..76c7e781ab9ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java @@ -33,6 +33,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; @@ -68,6 +70,7 @@ public AddMountTableEntryResponse addMountTableEntry( AddMountTableEntryResponse response = AddMountTableEntryResponse.newInstance(); response.setStatus(status); + updateCacheAllRouters(); return response; } @@ -86,6 +89,7 @@ public UpdateMountTableEntryResponse updateMountTableEntry( UpdateMountTableEntryResponse response = UpdateMountTableEntryResponse.newInstance(); response.setStatus(status); + updateCacheAllRouters(); return response; } @@ -110,6 +114,7 @@ public RemoveMountTableEntryResponse removeMountTableEntry( RemoveMountTableEntryResponse response = RemoveMountTableEntryResponse.newInstance(); response.setStatus(status); + updateCacheAllRouters(); return response; } @@ -151,4 +156,17 @@ public GetMountTableEntriesResponse getMountTableEntries( response.setTimestamp(Time.now()); return response; } + + @Override + public RefreshMountTableEntriesResponse refreshMountTableEntries( + RefreshMountTableEntriesRequest request) throws IOException { + // Because this refresh is done through admin API, it should always be force + // refresh. + boolean result = loadCache(true); + RefreshMountTableEntriesResponse response = + RefreshMountTableEntriesResponse.newInstance(); + response.setResult(result); + return response; + } + } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/RefreshMountTableEntriesRequest.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/RefreshMountTableEntriesRequest.java new file mode 100644 index 0000000000000..899afe75c08c4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/RefreshMountTableEntriesRequest.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol; + +import java.io.IOException; + +import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer; + +/** + * API request for refreshing mount table cached entries from state store. + */ +public abstract class RefreshMountTableEntriesRequest { + + public static RefreshMountTableEntriesRequest newInstance() + throws IOException { + return StateStoreSerializer + .newRecord(RefreshMountTableEntriesRequest.class); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/RefreshMountTableEntriesResponse.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/RefreshMountTableEntriesResponse.java new file mode 100644 index 0000000000000..6c9ed77bc087f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/RefreshMountTableEntriesResponse.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer; + +/** + * API response for refreshing mount table entries cache from state store. + */ +public abstract class RefreshMountTableEntriesResponse { + + public static RefreshMountTableEntriesResponse newInstance() + throws IOException { + return StateStoreSerializer + .newRecord(RefreshMountTableEntriesResponse.class); + } + + @Public + @Unstable + public abstract boolean getResult(); + + @Public + @Unstable + public abstract void setResult(boolean result); +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RefreshMountTableEntriesRequestPBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RefreshMountTableEntriesRequestPBImpl.java new file mode 100644 index 0000000000000..cec0699c18383 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RefreshMountTableEntriesRequestPBImpl.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb; + +import java.io.IOException; + +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesRequestProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesRequestProto.Builder; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesRequestProtoOrBuilder; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord; + +import com.google.protobuf.Message; + +/** + * Protobuf implementation of the state store API object + * RefreshMountTableEntriesRequest. + */ +public class RefreshMountTableEntriesRequestPBImpl + extends RefreshMountTableEntriesRequest implements PBRecord { + + private FederationProtocolPBTranslator translator = + new FederationProtocolPBTranslator<>( + RefreshMountTableEntriesRequestProto.class); + + public RefreshMountTableEntriesRequestPBImpl() { + } + + public RefreshMountTableEntriesRequestPBImpl( + RefreshMountTableEntriesRequestProto proto) { + this.translator.setProto(proto); + } + + @Override + public RefreshMountTableEntriesRequestProto getProto() { + // if builder is null build() returns null, calling getBuilder() to + // instantiate builder + this.translator.getBuilder(); + return this.translator.build(); + } + + @Override + public void setProto(Message proto) { + this.translator.setProto(proto); + } + + @Override + public void readInstance(String base64String) throws IOException { + this.translator.readInstance(base64String); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RefreshMountTableEntriesResponsePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RefreshMountTableEntriesResponsePBImpl.java new file mode 100644 index 0000000000000..5acf47906b28e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RefreshMountTableEntriesResponsePBImpl.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb; + +import java.io.IOException; + +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesResponseProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesResponseProto.Builder; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesResponseProtoOrBuilder; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; +import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord; + +import com.google.protobuf.Message; + +/** + * Protobuf implementation of the state store API object + * RefreshMountTableEntriesResponse. + */ +public class RefreshMountTableEntriesResponsePBImpl + extends RefreshMountTableEntriesResponse implements PBRecord { + + private FederationProtocolPBTranslator translator = + new FederationProtocolPBTranslator<>( + RefreshMountTableEntriesResponseProto.class); + + public RefreshMountTableEntriesResponsePBImpl() { + } + + public RefreshMountTableEntriesResponsePBImpl( + RefreshMountTableEntriesResponseProto proto) { + this.translator.setProto(proto); + } + + @Override + public RefreshMountTableEntriesResponseProto getProto() { + return this.translator.build(); + } + + @Override + public void setProto(Message proto) { + this.translator.setProto(proto); + } + + @Override + public void readInstance(String base64String) throws IOException { + this.translator.readInstance(base64String); + } + + @Override + public boolean getResult() { + return this.translator.getProtoOrBuilder().getResult(); + }; + + @Override + public void setResult(boolean result) { + this.translator.getBuilder().setResult(result); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/RouterState.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/RouterState.java index c90abcc155c6e..2fe6941ba14d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/RouterState.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/RouterState.java @@ -88,6 +88,10 @@ public static RouterState newInstance(String addr, long startTime, public abstract long getDateStarted(); + public abstract void setAdminAddress(String adminAddress); + + public abstract String getAdminAddress(); + /** * Get the identifier for the Router. It uses the address. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/RouterStatePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/RouterStatePBImpl.java index 23a61f92b7d5b..d837386585f25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/RouterStatePBImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/RouterStatePBImpl.java @@ -199,4 +199,14 @@ public void setDateCreated(long time) { public long getDateCreated() { return this.translator.getProtoOrBuilder().getDateCreated(); } + + @Override + public void setAdminAddress(String adminAddress) { + this.translator.getBuilder().setAdminAddress(adminAddress); + } + + @Override + public String getAdminAddress() { + return this.translator.getProtoOrBuilder().getAdminAddress(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index bdaabe821f87a..27c42cd634d65 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -54,6 +54,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; @@ -107,7 +109,8 @@ private String getUsage(String cmd) { if (cmd == null) { String[] commands = {"-add", "-update", "-rm", "-ls", "-setQuota", "-clrQuota", - "-safemode", "-nameservice", "-getDisabledNameservices"}; + "-safemode", "-nameservice", "-getDisabledNameservices", + "-refresh"}; StringBuilder usage = new StringBuilder(); usage.append("Usage: hdfs dfsrouteradmin :\n"); for (int i = 0; i < commands.length; i++) { @@ -142,6 +145,8 @@ private String getUsage(String cmd) { return "\t[-nameservice enable | disable ]"; } else if (cmd.equals("-getDisabledNameservices")) { return "\t[-getDisabledNameservices]"; + } else if (cmd.equals("-refresh")) { + return "\t[-refresh]"; } return getUsage(null); } @@ -230,9 +235,10 @@ public int run(String[] argv) throws Exception { printUsage(cmd); return exitCode; } + String address = null; // Initialize RouterClient try { - String address = getConf().getTrimmed( + address = getConf().getTrimmed( RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT); InetSocketAddress routerSocket = NetUtils.createSocketAddr(address); @@ -302,6 +308,8 @@ public int run(String[] argv) throws Exception { manageNameservice(subcmd, nsId); } else if ("-getDisabledNameservices".equals(cmd)) { getDisabledNameservices(); + } else if ("-refresh".equals(cmd)) { + refresh(address); } else { throw new IllegalArgumentException("Unknown Command: " + cmd); } @@ -337,6 +345,27 @@ public int run(String[] argv) throws Exception { return exitCode; } + private void refresh(String address) throws IOException { + if (refreshRouterCache()) { + System.out.println( + "Successfully updated mount table cache on router " + address); + } + } + + /** + * Refresh mount table cache on connected router. + * + * @return true if cache refreshed successfully + * @throws IOException + */ + private boolean refreshRouterCache() throws IOException { + RefreshMountTableEntriesResponse response = + client.getMountTableManager().refreshMountTableEntries( + RefreshMountTableEntriesRequest.newInstance()); + return response.getResult(); + } + + /** * Add a mount table entry or update if it exists. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto index b1a62b1c345bc..17ae299bcd2ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto @@ -193,6 +193,7 @@ message RouterRecordProto { optional string version = 6; optional string compileInfo = 7; optional uint64 dateStarted = 8; + optional string adminAddress = 9; } message GetRouterRegistrationRequestProto { @@ -219,6 +220,13 @@ message RouterHeartbeatResponseProto { optional bool status = 1; } +message RefreshMountTableEntriesRequestProto { +} + +message RefreshMountTableEntriesResponseProto { + optional bool result = 1; +} + ///////////////////////////////////////////////// // Route State ///////////////////////////////////////////////// diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto index f3a2b6e8abc33..34a012acd8739 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto @@ -74,4 +74,9 @@ service RouterAdminProtocolService { * Get the list of disabled name services. */ rpc getDisabledNameservices(GetDisabledNameservicesRequestProto) returns (GetDisabledNameservicesResponseProto); + + /** + * Refresh mount entries + */ + rpc refreshMountTableEntries(RefreshMountTableEntriesRequestProto) returns(RefreshMountTableEntriesResponseProto); } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml index afb3c32ba6d9d..72f6c2f110478 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml @@ -547,4 +547,38 @@ + + dfs.federation.router.mount-table.cache.update + false + Set true to enable MountTableRefreshService. This service + updates mount table cache immediately after adding, modifying or + deleting the mount table entries. If this service is not enabled + mount table cache are refreshed periodically by + StateStoreCacheUpdateService + + + + + dfs.federation.router.mount-table.cache.update.timeout + 1m + This property defines how long to wait for all the + admin servers to finish their mount table cache update. This setting + supports multiple time unit suffixes as described in + dfs.federation.router.safemode.extension. + + + + + dfs.federation.router.mount-table.cache.update.client.max.time + + 5m + Remote router mount table cache is updated through + RouterClient(RPC call). To improve performance, RouterClient + connections are cached but it should not be kept in cache forever. + This property defines the max time a connection can be cached. This + setting supports multiple time unit suffixes as described in + dfs.federation.router.safemode.extension. + + + \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index 72bf6af965ee4..adc43838fe5b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -230,6 +230,12 @@ Ls command will show below information for each mount table entry: Source Destinations Owner Group Mode Quota/Usage /path ns0->/path root supergroup rwxr-xr-x [NsQuota: 50/0, SsQuota: 100 B/0 B] +Mount table cache is refreshed periodically but it can also be refreshed by executing refresh command: + + [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -refresh + +The above command will refresh cache of the connected router. This command is redundant when mount table refresh service is enabled as the service will always keep the cache updated. + #### Multiple subclusters A mount point also supports mapping multiple subclusters. For example, to create a mount point that stores files in subclusters `ns1` and `ns2`. @@ -380,6 +386,9 @@ The connection to the State Store and the internal caching at the Router. | dfs.federation.router.store.connection.test | 60000 | How often to check for the connection to the State Store in milliseconds. | | dfs.federation.router.cache.ttl | 60000 | How often to refresh the State Store caches in milliseconds. | | dfs.federation.router.store.membership.expiration | 300000 | Expiration time in milliseconds for a membership record. | +| dfs.federation.router.mount-table.cache.update | false | If true, Mount table cache is updated whenever a mount table entry is added, modified or removed for all the routers. | +| dfs.federation.router.mount-table.cache.update.timeout | 1m | Max time to wait for all the routers to finish their mount table cache update. | +| dfs.federation.router.mount-table.cache.update.client.max.time | 5m | Max time a RouterClient connection can be cached. | ### Routing diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java index c48e6e26b9eac..5095c6b139c3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java @@ -56,6 +56,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; +import org.apache.hadoop.hdfs.server.federation.store.RouterStore; +import org.apache.hadoop.hdfs.server.federation.store.records.RouterState; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.test.GenericTestUtils; @@ -316,4 +318,29 @@ public Object answer(InvocationOnMock invocation) throws Throwable { }).when(spyHAContext).checkOperation(any(OperationCategory.class)); Whitebox.setInternalState(namesystem, "haContext", spyHAContext); } + + /** + * Wait for a number of routers to be registered in state store. + * + * @param stateManager number of routers to be registered. + * @param routerCount number of routers to be registered. + * @param tiemout max wait time in ms + */ + public static void waitRouterRegistered(RouterStore stateManager, + long routerCount, int timeout) throws Exception { + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + try { + List cachedRecords = stateManager.getCachedRecords(); + if (cachedRecords.size() == routerCount) { + return true; + } + } catch (IOException e) { + // Ignore + } + return false; + } + }, 100, timeout); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterConfigBuilder.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterConfigBuilder.java index be0de529b514d..6d9b2c08779f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterConfigBuilder.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterConfigBuilder.java @@ -38,6 +38,7 @@ public class RouterConfigBuilder { private boolean enableMetrics = false; private boolean enableQuota = false; private boolean enableSafemode = false; + private boolean enableCacheRefresh; public RouterConfigBuilder(Configuration configuration) { this.conf = configuration; @@ -104,6 +105,11 @@ public RouterConfigBuilder safemode(boolean enable) { return this; } + public RouterConfigBuilder refreshCache(boolean enable) { + this.enableCacheRefresh = enable; + return this; + } + public RouterConfigBuilder rpc() { return this.rpc(true); } @@ -140,6 +146,10 @@ public RouterConfigBuilder safemode() { return this.safemode(true); } + public RouterConfigBuilder refreshCache() { + return this.refreshCache(true); + } + public Configuration build() { conf.setBoolean(RBFConfigKeys.DFS_ROUTER_STORE_ENABLE, this.enableStateStore); @@ -158,6 +168,8 @@ public Configuration build() { this.enableQuota); conf.setBoolean(RBFConfigKeys.DFS_ROUTER_SAFEMODE_ENABLE, this.enableSafemode); + conf.setBoolean(RBFConfigKeys.MOUNT_TABLE_CACHE_UPDATE, + this.enableCacheRefresh); return conf; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index d0e3e50fce4e6..445022bbce97a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -799,6 +799,28 @@ public void testNameserviceManager() throws Exception { assertTrue(err.toString().contains("No arguments allowed")); } + @Test + public void testRefreshMountTableCache() throws Exception { + String src = "/refreshMount"; + + // create mount table entry + String[] argv = new String[] {"-add", src, "refreshNS0", "/refreshDest"}; + assertEquals(0, ToolRunner.run(admin, argv)); + + // refresh the mount table entry cache + System.setOut(new PrintStream(out)); + argv = new String[] {"-refresh"}; + assertEquals(0, ToolRunner.run(admin, argv)); + assertTrue( + out.toString().startsWith("Successfully updated mount table cache")); + + // Now ls should return that mount table entry + out.reset(); + argv = new String[] {"-ls", src}; + assertEquals(0, ToolRunner.run(admin, argv)); + assertTrue(out.toString().contains(src)); + } + /** * Wait for the Router transforming to expected state. * @param expectedState Expected Router state. @@ -836,8 +858,7 @@ public void testUpdateNonExistingMountTable() throws Exception { } @Test - public void testUpdateDestinationForExistingMountTable() throws - Exception { + public void testUpdateDestinationForExistingMountTable() throws Exception { // Add a mount table firstly String nsId = "ns0"; String src = "/test-updateDestinationForExistingMountTable"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java new file mode 100644 index 0000000000000..c90e614a5cd40 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java @@ -0,0 +1,396 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.curator.test.TestingServer; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hdfs.server.federation.FederationTestUtils; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; +import org.apache.hadoop.hdfs.server.federation.store.RouterStore; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.service.Service.STATE; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Time; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * This test class verifies that mount table cache is updated on all the routers + * when MountTableRefreshService is enabled and there is a change in mount table + * entries. + */ +public class TestRouterMountTableCacheRefresh { + private static TestingServer curatorTestingServer; + private static MiniRouterDFSCluster cluster; + private static RouterContext routerContext; + private static MountTableManager mountTableManager; + + @BeforeClass + public static void setUp() throws Exception { + curatorTestingServer = new TestingServer(); + curatorTestingServer.start(); + final String connectString = curatorTestingServer.getConnectString(); + int numNameservices = 2; + cluster = new MiniRouterDFSCluster(false, numNameservices); + Configuration conf = new RouterConfigBuilder().refreshCache().admin().rpc() + .heartbeat().build(); + conf.setClass(RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS, + RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT, + FileSubclusterResolver.class); + conf.set(CommonConfigurationKeys.ZK_ADDRESS, connectString); + conf.setBoolean(RBFConfigKeys.DFS_ROUTER_STORE_ENABLE, true); + cluster.addRouterOverrides(conf); + cluster.startCluster(); + cluster.startRouters(); + cluster.waitClusterUp(); + routerContext = cluster.getRandomRouter(); + RouterStore routerStateManager = + routerContext.getRouter().getRouterStateManager(); + mountTableManager = routerContext.getAdminClient().getMountTableManager(); + // wait for one minute for all the routers to get registered + FederationTestUtils.waitRouterRegistered(routerStateManager, + numNameservices, 60000); + } + + @AfterClass + public static void destory() { + try { + curatorTestingServer.close(); + cluster.shutdown(); + } catch (IOException e) { + // do nothing + } + } + + @After + public void tearDown() throws IOException { + clearEntries(); + } + + private void clearEntries() throws IOException { + List result = getMountTableEntries(); + for (MountTable mountTable : result) { + RemoveMountTableEntryResponse removeMountTableEntry = + mountTableManager.removeMountTableEntry(RemoveMountTableEntryRequest + .newInstance(mountTable.getSourcePath())); + assertTrue(removeMountTableEntry.getStatus()); + } + } + + /** + * addMountTableEntry API should internally update the cache on all the + * routers. + */ + @Test + public void testMountTableEntriesCacheUpdatedAfterAddAPICall() + throws IOException { + + // Existing mount table size + int existingEntriesCount = getNumMountTableEntries(); + String srcPath = "/addPath"; + MountTable newEntry = MountTable.newInstance(srcPath, + Collections.singletonMap("ns0", "/addPathDest"), Time.now(), + Time.now()); + addMountTableEntry(mountTableManager, newEntry); + + // When Add entry is done, all the routers must have updated its mount table + // entry + List routers = getRouters(); + for (RouterContext rc : routers) { + List result = + getMountTableEntries(rc.getAdminClient().getMountTableManager()); + assertEquals(1 + existingEntriesCount, result.size()); + MountTable mountTableResult = result.get(0); + assertEquals(srcPath, mountTableResult.getSourcePath()); + } + } + + /** + * removeMountTableEntry API should internally update the cache on all the + * routers. + */ + @Test + public void testMountTableEntriesCacheUpdatedAfterRemoveAPICall() + throws IOException { + // add + String srcPath = "/removePathSrc"; + MountTable newEntry = MountTable.newInstance(srcPath, + Collections.singletonMap("ns0", "/removePathDest"), Time.now(), + Time.now()); + addMountTableEntry(mountTableManager, newEntry); + int addCount = getNumMountTableEntries(); + assertEquals(1, addCount); + + // remove + RemoveMountTableEntryResponse removeMountTableEntry = + mountTableManager.removeMountTableEntry( + RemoveMountTableEntryRequest.newInstance(srcPath)); + assertTrue(removeMountTableEntry.getStatus()); + + int removeCount = getNumMountTableEntries(); + assertEquals(addCount - 1, removeCount); + } + + /** + * updateMountTableEntry API should internally update the cache on all the + * routers. + */ + @Test + public void testMountTableEntriesCacheUpdatedAfterUpdateAPICall() + throws IOException { + // add + String srcPath = "/updatePathSrc"; + MountTable newEntry = MountTable.newInstance(srcPath, + Collections.singletonMap("ns0", "/updatePathDest"), Time.now(), + Time.now()); + addMountTableEntry(mountTableManager, newEntry); + int addCount = getNumMountTableEntries(); + assertEquals(1, addCount); + + // update + String key = "ns1"; + String value = "/updatePathDest2"; + MountTable upateEntry = MountTable.newInstance(srcPath, + Collections.singletonMap(key, value), Time.now(), Time.now()); + UpdateMountTableEntryResponse updateMountTableEntry = + mountTableManager.updateMountTableEntry( + UpdateMountTableEntryRequest.newInstance(upateEntry)); + assertTrue(updateMountTableEntry.getStatus()); + MountTable updatedMountTable = getMountTableEntry(srcPath); + assertNotNull("Updated mount table entrty cannot be null", + updatedMountTable); + assertEquals(1, updatedMountTable.getDestinations().size()); + assertEquals(key, + updatedMountTable.getDestinations().get(0).getNameserviceId()); + assertEquals(value, updatedMountTable.getDestinations().get(0).getDest()); + } + + /** + * After caching RouterClient if router goes down, refresh should be + * successful on other available router. The router which is not running + * should be ignored. + */ + @Test + public void testCachedRouterClientBehaviourAfterRouterStoped() + throws IOException { + String srcPath = "/addPathClientCache"; + MountTable newEntry = MountTable.newInstance(srcPath, + Collections.singletonMap("ns0", "/addPathClientCacheDest"), Time.now(), + Time.now()); + addMountTableEntry(mountTableManager, newEntry); + + // When Add entry is done, all the routers must have updated its mount table + // entry + List routers = getRouters(); + for (RouterContext rc : routers) { + List result = + getMountTableEntries(rc.getAdminClient().getMountTableManager()); + assertEquals(1, result.size()); + MountTable mountTableResult = result.get(0); + assertEquals(srcPath, mountTableResult.getSourcePath()); + } + + // Lets stop one router + for (RouterContext rc : routers) { + InetSocketAddress adminServerAddress = + rc.getRouter().getAdminServerAddress(); + if (!routerContext.getRouter().getAdminServerAddress() + .equals(adminServerAddress)) { + cluster.stopRouter(rc); + break; + } + } + + srcPath = "/addPathClientCache2"; + newEntry = MountTable.newInstance(srcPath, + Collections.singletonMap("ns0", "/addPathClientCacheDest2"), Time.now(), + Time.now()); + addMountTableEntry(mountTableManager, newEntry); + for (RouterContext rc : getRouters()) { + List result = + getMountTableEntries(rc.getAdminClient().getMountTableManager()); + assertEquals(2, result.size()); + } + } + + private List getRouters() { + List result = new ArrayList<>(); + for (RouterContext rc : cluster.getRouters()) { + if (rc.getRouter().getServiceState() == STATE.STARTED) { + result.add(rc); + } + } + return result; + } + + @Test + public void testRefreshMountTableEntriesAPI() throws IOException { + RefreshMountTableEntriesRequest request = + RefreshMountTableEntriesRequest.newInstance(); + RefreshMountTableEntriesResponse refreshMountTableEntriesRes = + mountTableManager.refreshMountTableEntries(request); + // refresh should be successful + assertTrue(refreshMountTableEntriesRes.getResult()); + } + + /** + * Verify cache update timeouts when any of the router takes more time than + * the configured timeout period. + */ + @Test(timeout = 10000) + public void testMountTableEntriesCacheUpdateTimeout() throws IOException { + // Resources will be closed when router is closed + @SuppressWarnings("resource") + MountTableRefresherService mountTableRefresherService = + new MountTableRefresherService(routerContext.getRouter()) { + @Override + protected MountTableRefresherThread getLocalRefresher( + String adminAddress) { + return new MountTableRefresherThread(null, adminAddress) { + @Override + public void run() { + try { + // Sleep 1 minute + Thread.sleep(60000); + } catch (InterruptedException e) { + // Do nothing + } + } + }; + } + }; + Configuration config = routerContext.getRouter().getConfig(); + config.setTimeDuration(RBFConfigKeys.MOUNT_TABLE_CACHE_UPDATE_TIMEOUT, 5, + TimeUnit.SECONDS); + mountTableRefresherService.init(config); + // One router is not responding for 1 minute, still refresh should + // finished in 5 second as cache update timeout is set 5 second. + mountTableRefresherService.refresh(); + // Test case timeout is assert for this test case. + } + + /** + * Verify Cached RouterClient connections are removed from cache and closed + * when their max live time is elapsed. + */ + @Test + public void testRouterClientConnectionExpiration() throws Exception { + final AtomicInteger createCounter = new AtomicInteger(); + final AtomicInteger removeCounter = new AtomicInteger(); + // Resources will be closed when router is closed + @SuppressWarnings("resource") + MountTableRefresherService mountTableRefresherService = + new MountTableRefresherService(routerContext.getRouter()) { + @Override + protected void closeRouterClient(RouterClient client) { + super.closeRouterClient(client); + removeCounter.incrementAndGet(); + } + + @Override + protected RouterClient createRouterClient( + InetSocketAddress routerSocket, Configuration config) + throws IOException { + createCounter.incrementAndGet(); + return super.createRouterClient(routerSocket, config); + } + }; + int clientCacheTime = 2000; + Configuration config = routerContext.getRouter().getConfig(); + config.setTimeDuration( + RBFConfigKeys.MOUNT_TABLE_CACHE_UPDATE_CLIENT_MAX_TIME, clientCacheTime, + TimeUnit.MILLISECONDS); + mountTableRefresherService.init(config); + // Do refresh to created RouterClient + mountTableRefresherService.refresh(); + assertNotEquals("No RouterClient is created.", 0, createCounter.get()); + /* + * Wait for clients to expire. Lets wait triple the cache eviction period. + * After cache eviction period all created client must be removed and + * closed. + */ + GenericTestUtils.waitFor(() -> createCounter.get() == removeCounter.get(), + 100, 3 * clientCacheTime); + } + + private int getNumMountTableEntries() throws IOException { + List records = getMountTableEntries(); + int oldEntriesCount = records.size(); + return oldEntriesCount; + } + + private MountTable getMountTableEntry(String srcPath) throws IOException { + List mountTableEntries = getMountTableEntries(); + for (MountTable mountTable : mountTableEntries) { + String sourcePath = mountTable.getSourcePath(); + if (srcPath.equals(sourcePath)) { + return mountTable; + } + } + return null; + } + + private void addMountTableEntry(MountTableManager mountTableMgr, + MountTable newEntry) throws IOException { + AddMountTableEntryRequest addRequest = + AddMountTableEntryRequest.newInstance(newEntry); + AddMountTableEntryResponse addResponse = + mountTableMgr.addMountTableEntry(addRequest); + assertTrue(addResponse.getStatus()); + } + + private List getMountTableEntries() throws IOException { + return getMountTableEntries(mountTableManager); + } + + private List getMountTableEntries( + MountTableManager mountTableManagerParam) throws IOException { + GetMountTableEntriesRequest request = + GetMountTableEntriesRequest.newInstance("/"); + return mountTableManagerParam.getMountTableEntries(request).getEntries(); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md index eba81afe3bf7c..a967ee4342bfb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md @@ -437,6 +437,7 @@ Usage: [-safemode enter | leave | get] [-nameservice disable | enable ] [-getDisabledNameservices] + [-refresh] | COMMAND\_OPTION | Description | |:---- |:---- | @@ -449,6 +450,7 @@ Usage: | `-safemode` `enter` `leave` `get` | Manually set the Router entering or leaving safe mode. The option *get* will be used for verifying if the Router is in safe mode state. | | `-nameservice` `disable` `enable` *nameservice* | Disable/enable a name service from the federation. If disabled, requests will not go to that name service. | | `-getDisabledNameservices` | Get the name services that are disabled in the federation. | +| `-refresh` | Update mount table cache of the connected router. | The commands for managing Router-based federation. See [Mount table management](../hadoop-hdfs-rbf/HDFSRouterFederation.html#Mount_table_management) for more info. From 1dc01e59af9a504e473d696be3d40e056db450ab Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Wed, 2 Jan 2019 10:38:33 -0800 Subject: [PATCH 0281/1308] HDFS-14167. RBF: Add stale nodes to federation metrics. Contributed by Inigo Goiri. --- .../federation/metrics/FederationMBean.java | 6 ++++++ .../federation/metrics/FederationMetrics.java | 6 ++++++ .../metrics/NamenodeBeanMetrics.java | 7 ++++++- .../resolver/MembershipNamenodeResolver.java | 1 + .../resolver/NamenodeStatusReport.java | 18 +++++++++++++++--- .../router/NamenodeHeartbeatService.java | 1 + .../store/records/MembershipStats.java | 4 ++++ .../records/impl/pb/MembershipStatsPBImpl.java | 10 ++++++++++ .../src/main/proto/FederationProtocol.proto | 1 + .../metrics/TestFederationMetrics.java | 7 +++++++ .../store/records/TestMembershipState.java | 3 +++ 11 files changed, 60 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index 79fb3e4326e8e..b37f5efb94a81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -106,6 +106,12 @@ public interface FederationMBean { */ int getNumDeadNodes(); + /** + * Get the number of stale datanodes. + * @return Number of stale datanodes. + */ + int getNumStaleNodes(); + /** * Get the number of decommissioning datanodes. * @return Number of decommissioning datanodes. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java index 6a0a46e89e83c..b3fe6cc9e20f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java @@ -413,6 +413,12 @@ public int getNumDeadNodes() { return getNameserviceAggregatedInt(MembershipStats::getNumOfDeadDatanodes); } + @Override + public int getNumStaleNodes() { + return getNameserviceAggregatedInt( + MembershipStats::getNumOfStaleDatanodes); + } + @Override public int getNumDecommissioningNodes() { return getNameserviceAggregatedInt( diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java index a05fdc144989d..1b97b1aa9c843 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java @@ -631,7 +631,12 @@ public int getNumDeadDataNodes() { @Override public int getNumStaleDataNodes() { - return -1; + try { + return getFederationMetrics().getNumStaleNodes(); + } catch (IOException e) { + LOG.debug("Failed to get number of stale nodes", e.getMessage()); + } + return 0; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java index 2707304f6024d..178db1b3dde55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java @@ -280,6 +280,7 @@ public boolean registerNamenode(NamenodeStatusReport report) report.getNumDecommissioningDatanodes()); stats.setNumOfActiveDatanodes(report.getNumLiveDatanodes()); stats.setNumOfDeadDatanodes(report.getNumDeadDatanodes()); + stats.setNumOfStaleDatanodes(report.getNumStaleDatanodes()); stats.setNumOfDecomActiveDatanodes(report.getNumDecomLiveDatanodes()); stats.setNumOfDecomDeadDatanodes(report.getNumDecomDeadDatanodes()); record.setStats(stats); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java index b121e24a0e4e2..5b603facb0dcc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java @@ -42,6 +42,7 @@ public class NamenodeStatusReport { /** Datanodes stats. */ private int liveDatanodes = -1; private int deadDatanodes = -1; + private int staleDatanodes = -1; /** Decommissioning datanodes. */ private int decomDatanodes = -1; /** Live decommissioned datanodes. */ @@ -223,14 +224,16 @@ public boolean getSafemode() { * * @param numLive Number of live nodes. * @param numDead Number of dead nodes. + * @param numStale Number of stale nodes. * @param numDecom Number of decommissioning nodes. * @param numLiveDecom Number of decommissioned live nodes. * @param numDeadDecom Number of decommissioned dead nodes. */ - public void setDatanodeInfo(int numLive, int numDead, int numDecom, - int numLiveDecom, int numDeadDecom) { + public void setDatanodeInfo(int numLive, int numDead, int numStale, + int numDecom, int numLiveDecom, int numDeadDecom) { this.liveDatanodes = numLive; this.deadDatanodes = numDead; + this.staleDatanodes = numStale; this.decomDatanodes = numDecom; this.liveDecomDatanodes = numLiveDecom; this.deadDecomDatanodes = numDeadDecom; @@ -247,7 +250,7 @@ public int getNumLiveDatanodes() { } /** - * Get the number of dead blocks. + * Get the number of dead nodes. * * @return The number of dead nodes. */ @@ -255,6 +258,15 @@ public int getNumDeadDatanodes() { return this.deadDatanodes; } + /** + * Get the number of stale nodes. + * + * @return The number of stale nodes. + */ + public int getNumStaleDatanodes() { + return this.staleDatanodes; + } + /** * Get the number of decommissionining nodes. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java index 871ebaf9eef50..475e90d7981e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java @@ -338,6 +338,7 @@ private void updateJMXParameters( report.setDatanodeInfo( jsonObject.getInt("NumLiveDataNodes"), jsonObject.getInt("NumDeadDataNodes"), + jsonObject.getInt("NumStaleDataNodes"), jsonObject.getInt("NumDecommissioningDataNodes"), jsonObject.getInt("NumDecomLiveDataNodes"), jsonObject.getInt("NumDecomDeadDataNodes")); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipStats.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipStats.java index 654140cbd529f..d452cd2c4022f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipStats.java @@ -81,6 +81,10 @@ public static MembershipStats newInstance() throws IOException { public abstract int getNumOfDeadDatanodes(); + public abstract void setNumOfStaleDatanodes(int nodes); + + public abstract int getNumOfStaleDatanodes(); + public abstract void setNumOfDecommissioningDatanodes(int nodes); public abstract int getNumOfDecommissioningDatanodes(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MembershipStatsPBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MembershipStatsPBImpl.java index 3347bc677f71b..50ecbf3d48a67 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MembershipStatsPBImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MembershipStatsPBImpl.java @@ -168,6 +168,16 @@ public int getNumOfDeadDatanodes() { return this.translator.getProtoOrBuilder().getNumOfDeadDatanodes(); } + @Override + public void setNumOfStaleDatanodes(int nodes) { + this.translator.getBuilder().setNumOfStaleDatanodes(nodes); + } + + @Override + public int getNumOfStaleDatanodes() { + return this.translator.getProtoOrBuilder().getNumOfStaleDatanodes(); + } + @Override public void setNumOfDecommissioningDatanodes(int nodes) { this.translator.getBuilder().setNumOfDecommissioningDatanodes(nodes); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto index 17ae299bcd2ca..1e5e37b3e2283 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto @@ -45,6 +45,7 @@ message NamenodeMembershipStatsRecordProto { optional uint32 numOfDecommissioningDatanodes = 22; optional uint32 numOfDecomActiveDatanodes = 23; optional uint32 numOfDecomDeadDatanodes = 24; + optional uint32 numOfStaleDatanodes = 25; } message NamenodeMembershipRecordProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java index 94799f35d92b6..5d984e8645a30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java @@ -137,6 +137,8 @@ public void testNamenodeStatsDataSource() throws IOException, JSONException { stats.getNumOfActiveDatanodes()); assertEquals(json.getLong("numOfDeadDatanodes"), stats.getNumOfDeadDatanodes()); + assertEquals(json.getLong("numOfStaleDatanodes"), + stats.getNumOfStaleDatanodes()); assertEquals(json.getLong("numOfDecommissioningDatanodes"), stats.getNumOfDecommissioningDatanodes()); assertEquals(json.getLong("numOfDecomActiveDatanodes"), @@ -187,6 +189,8 @@ public void testNameserviceStatsDataSource() json.getLong("numOfActiveDatanodes")); assertEquals(stats.getNumOfDeadDatanodes(), json.getLong("numOfDeadDatanodes")); + assertEquals(stats.getNumOfStaleDatanodes(), + json.getLong("numOfStaleDatanodes")); assertEquals(stats.getNumOfDecommissioningDatanodes(), json.getLong("numOfDecommissioningDatanodes")); assertEquals(stats.getNumOfDecomActiveDatanodes(), @@ -260,6 +264,7 @@ private void validateClusterStatsBean(FederationMBean bean) long numBlocks = 0; long numLive = 0; long numDead = 0; + long numStale = 0; long numDecom = 0; long numDecomLive = 0; long numDecomDead = 0; @@ -269,6 +274,7 @@ private void validateClusterStatsBean(FederationMBean bean) numBlocks += stats.getNumOfBlocks(); numLive += stats.getNumOfActiveDatanodes(); numDead += stats.getNumOfDeadDatanodes(); + numStale += stats.getNumOfStaleDatanodes(); numDecom += stats.getNumOfDecommissioningDatanodes(); numDecomLive += stats.getNumOfDecomActiveDatanodes(); numDecomDead += stats.getNumOfDecomDeadDatanodes(); @@ -277,6 +283,7 @@ private void validateClusterStatsBean(FederationMBean bean) assertEquals(numBlocks, bean.getNumBlocks()); assertEquals(numLive, bean.getNumLiveNodes()); assertEquals(numDead, bean.getNumDeadNodes()); + assertEquals(numStale, bean.getNumStaleNodes()); assertEquals(numDecom, bean.getNumDecommissioningNodes()); assertEquals(numDecomLive, bean.getNumDecomLiveNodes()); assertEquals(numDecomDead, bean.getNumDecomDeadNodes()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java index d922414b8a44b..1aac632784f2d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java @@ -47,6 +47,7 @@ public class TestMembershipState { private static final long NUM_BLOCKS = 300; private static final long NUM_FILES = 400; private static final int NUM_DEAD = 500; + private static final int NUM_STALE = 550; private static final int NUM_ACTIVE = 600; private static final int NUM_DECOM = 700; private static final int NUM_DECOM_ACTIVE = 800; @@ -73,6 +74,7 @@ private MembershipState createRecord() throws IOException { stats.setNumOfFiles(NUM_FILES); stats.setNumOfActiveDatanodes(NUM_ACTIVE); stats.setNumOfDeadDatanodes(NUM_DEAD); + stats.setNumOfStaleDatanodes(NUM_STALE); stats.setNumOfDecommissioningDatanodes(NUM_DECOM); stats.setNumOfDecomActiveDatanodes(NUM_DECOM_ACTIVE); stats.setNumOfDecomDeadDatanodes(NUM_DECOM_DEAD); @@ -101,6 +103,7 @@ private void validateRecord(MembershipState record) throws IOException { assertEquals(NUM_FILES, stats.getNumOfFiles()); assertEquals(NUM_ACTIVE, stats.getNumOfActiveDatanodes()); assertEquals(NUM_DEAD, stats.getNumOfDeadDatanodes()); + assertEquals(NUM_STALE, stats.getNumOfStaleDatanodes()); assertEquals(NUM_DECOM, stats.getNumOfDecommissioningDatanodes()); assertEquals(NUM_DECOM_ACTIVE, stats.getNumOfDecomActiveDatanodes()); assertEquals(NUM_DECOM_DEAD, stats.getNumOfDecomDeadDatanodes()); From f3cbf0eb9ace860cb2da77b09042e2056ef9255f Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Wed, 2 Jan 2019 10:49:00 -0800 Subject: [PATCH 0282/1308] HDFS-14161. RBF: Throw StandbyException instead of IOException so that client can retry when can not get connection. Contributed by Fei Hui. --- .../router/ConnectionNullException.java | 33 +++++++++++++++ .../federation/router/RouterRpcClient.java | 20 ++++++++-- .../federation/FederationTestUtils.java | 31 ++++++++++++++ .../TestRouterClientRejectOverload.java | 40 +++++++++++++++++++ 4 files changed, 120 insertions(+), 4 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionNullException.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionNullException.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionNullException.java new file mode 100644 index 0000000000000..53de6021589ed --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionNullException.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import java.io.IOException; + + +/** + * Exception when can not get a non-null connection. + */ +public class ConnectionNullException extends IOException { + + private static final long serialVersionUID = 1L; + + public ConnectionNullException(String msg) { + super(msg); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index a21e9802c7c25..c4d3a20178381 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -270,7 +270,8 @@ private ConnectionContext getConnection(UserGroupInformation ugi, String nsId, } if (connection == null) { - throw new IOException("Cannot get a connection to " + rpcAddress); + throw new ConnectionNullException("Cannot get a connection to " + + rpcAddress); } return connection; } @@ -363,9 +364,9 @@ private Object invokeMethod( Map ioes = new LinkedHashMap<>(); for (FederationNamenodeContext namenode : namenodes) { ConnectionContext connection = null; + String nsId = namenode.getNameserviceId(); + String rpcAddress = namenode.getRpcAddress(); try { - String nsId = namenode.getNameserviceId(); - String rpcAddress = namenode.getRpcAddress(); connection = this.getConnection(ugi, nsId, rpcAddress, protocol); ProxyAndInfo client = connection.getClient(); final Object proxy = client.getProxy(); @@ -394,6 +395,16 @@ private Object invokeMethod( } // RemoteException returned by NN throw (RemoteException) ioe; + } else if (ioe instanceof ConnectionNullException) { + if (this.rpcMonitor != null) { + this.rpcMonitor.proxyOpFailureCommunicate(); + } + LOG.error("Get connection for {} {} error: {}", nsId, rpcAddress, + ioe.getMessage()); + // Throw StandbyException so that client can retry + StandbyException se = new StandbyException(ioe.getMessage()); + se.initCause(ioe); + throw se; } else { // Other communication error, this is a failure // Communication retries are handled by the retry policy @@ -425,7 +436,8 @@ private Object invokeMethod( String addr = namenode.getRpcAddress(); IOException ioe = entry.getValue(); if (ioe instanceof StandbyException) { - LOG.error("{} {} at {} is in Standby", nsId, nnId, addr); + LOG.error("{} {} at {} is in Standby: {}", nsId, nnId, addr, + ioe.getMessage()); } else { LOG.error("{} {} at {} error: \"{}\"", nsId, nnId, addr, ioe.getMessage()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java index 5095c6b139c3e..d92edac35b6df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java @@ -52,6 +52,9 @@ import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; +import org.apache.hadoop.hdfs.server.federation.router.ConnectionManager; +import org.apache.hadoop.hdfs.server.federation.router.RouterRpcClient; +import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; @@ -60,6 +63,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.RouterState; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.Whitebox; import org.mockito.invocation.InvocationOnMock; @@ -343,4 +347,31 @@ public Boolean get() { } }, 100, timeout); } + + /** + * Simulate that a RouterRpcServer, the ConnectionManager of its + * RouterRpcClient throws IOException when call getConnection. So the + * RouterRpcClient will get a null Connection. + * @param server RouterRpcServer + * @throws IOException + */ + public static void simulateThrowExceptionRouterRpcServer( + final RouterRpcServer server) throws IOException { + RouterRpcClient rpcClient = server.getRPCClient(); + ConnectionManager connectionManager = + new ConnectionManager(server.getConfig()); + ConnectionManager spyConnectionManager = spy(connectionManager); + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + LOG.info("Simulating connectionManager throw IOException {}", + invocation.getMock()); + throw new IOException("Simulate connectionManager throw IOException"); + } + }).when(spyConnectionManager).getConnection( + any(UserGroupInformation.class), any(String.class), any(Class.class)); + + Whitebox.setInternalState(rpcClient, "connectionManager", + spyConnectionManager); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java index 3c51e13182c92..066415935191c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.federation.router; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.simulateSlowNamenode; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.simulateThrowExceptionRouterRpcServer; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -240,4 +241,43 @@ public void run() { num <= expOverloadMax); } } + + @Test + public void testConnectionNullException() throws Exception { + setupCluster(false); + + // Choose 1st router + RouterContext routerContext = cluster.getRouters().get(0); + Router router = routerContext.getRouter(); + // This router will throw ConnectionNullException + simulateThrowExceptionRouterRpcServer(router.getRpcServer()); + + // Set dfs.client.failover.random.order false, to pick 1st router at first + Configuration conf = cluster.getRouterClientConf(); + conf.setBoolean("dfs.client.failover.random.order", false); + // Client to access Router Cluster + DFSClient routerClient = + new DFSClient(new URI("hdfs://fed"), conf); + + // Get router0 metrics + FederationRPCMetrics rpcMetrics0 = cluster.getRouters().get(0) + .getRouter().getRpcServer().getRPCMetrics(); + // Get router1 metrics + FederationRPCMetrics rpcMetrics1 = cluster.getRouters().get(1) + .getRouter().getRpcServer().getRPCMetrics(); + + // Original failures + long originalRouter0Failures = rpcMetrics0.getProxyOpFailureCommunicate(); + long originalRouter1Failures = rpcMetrics1.getProxyOpFailureCommunicate(); + + // RPC call must be successful + routerClient.getFileInfo("/"); + + // Router 0 failures will increase + assertEquals(originalRouter0Failures + 1, + rpcMetrics0.getProxyOpFailureCommunicate()); + // Router 1 failures will not change + assertEquals(originalRouter1Failures, + rpcMetrics1.getProxyOpFailureCommunicate()); + } } From 4244653e430374d77ce52e4791f5b4817092003a Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Wed, 9 Jan 2019 17:18:43 +0800 Subject: [PATCH 0283/1308] HDFS-14150. RBF: Quotas of the sub-cluster should be removed when removing the mount point. Contributed by Takanobu Asanuma. --- .../federation/router/RouterAdminServer.java | 23 +++++---- .../src/main/resources/hdfs-rbf-default.xml | 4 +- .../src/site/markdown/HDFSRouterFederation.md | 4 +- .../federation/router/TestRouterQuota.java | 48 ++++++++++++++++++- 4 files changed, 67 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index 5bb7751cd1e92..18c19e087e889 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -250,23 +250,25 @@ public UpdateMountTableEntryResponse updateMountTableEntry( MountTable mountTable = request.getEntry(); if (mountTable != null && router.isQuotaEnabled()) { - synchronizeQuota(mountTable); + synchronizeQuota(mountTable.getSourcePath(), + mountTable.getQuota().getQuota(), + mountTable.getQuota().getSpaceQuota()); } return response; } /** * Synchronize the quota value across mount table and subclusters. - * @param mountTable Quota set in given mount table. + * @param path Source path in given mount table. + * @param nsQuota Name quota definition in given mount table. + * @param ssQuota Space quota definition in given mount table. * @throws IOException */ - private void synchronizeQuota(MountTable mountTable) throws IOException { - String path = mountTable.getSourcePath(); - long nsQuota = mountTable.getQuota().getQuota(); - long ssQuota = mountTable.getQuota().getSpaceQuota(); - - if (nsQuota != HdfsConstants.QUOTA_DONT_SET - || ssQuota != HdfsConstants.QUOTA_DONT_SET) { + private void synchronizeQuota(String path, long nsQuota, long ssQuota) + throws IOException { + if (router.isQuotaEnabled() && + (nsQuota != HdfsConstants.QUOTA_DONT_SET + || ssQuota != HdfsConstants.QUOTA_DONT_SET)) { HdfsFileStatus ret = this.router.getRpcServer().getFileInfo(path); if (ret != null) { this.router.getRpcServer().getQuotaModule().setQuota(path, nsQuota, @@ -278,6 +280,9 @@ private void synchronizeQuota(MountTable mountTable) throws IOException { @Override public RemoveMountTableEntryResponse removeMountTableEntry( RemoveMountTableEntryRequest request) throws IOException { + // clear sub-cluster's quota definition + synchronizeQuota(request.getSrcPath(), HdfsConstants.QUOTA_RESET, + HdfsConstants.QUOTA_RESET); return getMountTableStore().removeMountTableEntry(request); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml index 72f6c2f110478..20ae778ef1d46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml @@ -447,7 +447,9 @@ dfs.federation.router.quota.enable false - Set to true to enable quota system in Router. + Set to true to enable quota system in Router. When it's enabled, setting + or clearing sub-cluster's quota directly is not recommended since Router + Admin server will override sub-cluster's quota with global quota. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index adc43838fe5b4..959cd637dd933 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -143,6 +143,8 @@ For performance reasons, the Router caches the quota usage and updates it period will be used for quota-verification during each WRITE RPC call invoked in RouterRPCSever. See [HDFS Quotas Guide](../hadoop-hdfs/HdfsQuotaAdminGuide.html) for the quota detail. +Note: When global quota is enabled, setting or clearing sub-cluster's quota directly is not recommended since Router Admin server will override sub-cluster's quota with global quota. + ### State Store The (logically centralized, but physically distributed) State Store maintains: @@ -421,7 +423,7 @@ Global quota supported in federation. | Property | Default | Description| |:---- |:---- |:---- | -| dfs.federation.router.quota.enable | `false` | If `true`, the quota system enabled in the Router. | +| dfs.federation.router.quota.enable | `false` | If `true`, the quota system enabled in the Router. In that case, setting or clearing sub-cluster's quota directly is not recommended since Router Admin server will override sub-cluster's quota with global quota.| | dfs.federation.router.quota-cache.update.interval | 60s | How often the Router updates quota cache. This setting supports multiple time unit suffixes. If no suffix is specified then milliseconds is assumed. | Metrics diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java index 6a29446f80258..656b401ec2482 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java @@ -605,7 +605,7 @@ public void testQuotaRefreshAfterQuotaExceed() throws Exception { @Test public void testQuotaRefreshWhenDestinationNotPresent() throws Exception { long nsQuota = 5; - long ssQuota = 3*BLOCK_SIZE; + long ssQuota = 3 * BLOCK_SIZE; final FileSystem nnFs = nnContext1.getFileSystem(); // Add three mount tables: @@ -709,4 +709,50 @@ public void testQuotaRefreshWhenDestinationNotPresent() throws Exception { assertEquals(updatedSpace, cacheQuota2.getSpaceConsumed()); assertEquals(updatedSpace, mountQuota2.getSpaceConsumed()); } + + @Test + public void testClearQuotaDefAfterRemovingMountTable() throws Exception { + long nsQuota = 5; + long ssQuota = 3 * BLOCK_SIZE; + final FileSystem nnFs = nnContext1.getFileSystem(); + + // Add one mount tables: + // /setdir --> ns0---testdir15 + // Create destination directory + nnFs.mkdirs(new Path("/testdir15")); + + MountTable mountTable = MountTable.newInstance("/setdir", + Collections.singletonMap("ns0", "/testdir15")); + mountTable.setQuota(new RouterQuotaUsage.Builder().quota(nsQuota) + .spaceQuota(ssQuota).build()); + addMountTable(mountTable); + + // Update router quota + RouterQuotaUpdateService updateService = + routerContext.getRouter().getQuotaCacheUpdateService(); + updateService.periodicInvoke(); + + RouterQuotaManager quotaManager = + routerContext.getRouter().getQuotaManager(); + ClientProtocol client = nnContext1.getClient().getNamenode(); + QuotaUsage routerQuota = quotaManager.getQuotaUsage("/setdir"); + QuotaUsage subClusterQuota = client.getQuotaUsage("/testdir15"); + + // Verify current quota definitions + assertEquals(nsQuota, routerQuota.getQuota()); + assertEquals(ssQuota, routerQuota.getSpaceQuota()); + assertEquals(nsQuota, subClusterQuota.getQuota()); + assertEquals(ssQuota, subClusterQuota.getSpaceQuota()); + + // Remove mount table + removeMountTable("/setdir"); + updateService.periodicInvoke(); + routerQuota = quotaManager.getQuotaUsage("/setdir"); + subClusterQuota = client.getQuotaUsage("/testdir15"); + + // Verify quota definitions are cleared after removing the mount table + assertNull(routerQuota); + assertEquals(HdfsConstants.QUOTA_RESET, subClusterQuota.getQuota()); + assertEquals(HdfsConstants.QUOTA_RESET, subClusterQuota.getSpaceQuota()); + } } \ No newline at end of file From b8bcbd0ed2d9552ca4e5a4b429b8852a5932407c Mon Sep 17 00:00:00 2001 From: Surendra Singh Lilhore Date: Thu, 10 Jan 2019 16:18:23 +0530 Subject: [PATCH 0284/1308] HDFS-14191. RBF: Remove hard coded router status from FederationMetrics. Contributed by Ranith Sardar. --- .../federation/metrics/FederationMetrics.java | 2 +- .../metrics/NamenodeBeanMetrics.java | 25 +++++++++++++- .../hdfs/server/federation/router/Router.java | 7 ++++ .../main/webapps/router/federationhealth.js | 2 +- .../federation/router/TestRouterAdminCLI.java | 33 ++++++++++++++++++- 5 files changed, 65 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java index b3fe6cc9e20f3..c66910cf97dc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java @@ -582,7 +582,7 @@ public String getBlockPoolId() { @Override public String getRouterStatus() { - return "RUNNING"; + return this.router.getRouterState().toString(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java index 1b97b1aa9c843..b08d9608c01cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; +import org.apache.hadoop.hdfs.server.federation.router.RouterServiceState; import org.apache.hadoop.hdfs.server.federation.router.SubClusterTimeoutException; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; @@ -232,7 +233,29 @@ public long getProvidedCapacity() { @Override public String getSafemode() { - // We assume that the global federated view is never in safe mode + try { + if (!getRouter().isRouterState(RouterServiceState.SAFEMODE)) { + return "Safe mode is ON. " + this.getSafeModeTip(); + } + } catch (IOException e) { + return "Failed to get safemode status. Please check router" + + "log for more detail."; + } + return ""; + } + + private String getSafeModeTip() throws IOException { + Router rt = getRouter(); + String cmd = "Use \"hdfs dfsrouteradmin -safemode leave\" " + + "to turn safe mode off."; + if (rt.isRouterState(RouterServiceState.INITIALIZING) + || rt.isRouterState(RouterServiceState.UNINITIALIZED)) { + return "Router is in" + rt.getRouterState() + + "mode, the router will immediately return to " + + "normal mode after some time. " + cmd; + } else if (rt.isRouterState(RouterServiceState.SAFEMODE)) { + return "It was turned on manually. " + cmd; + } return ""; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 6a7437f29b587..0257162d1391b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -585,6 +585,13 @@ public RouterServiceState getRouterState() { return this.state; } + /** + * Compare router state. + */ + public boolean isRouterState(RouterServiceState routerState) { + return routerState.equals(this.state); + } + ///////////////////////////////////////////////////////// // Submodule getters ///////////////////////////////////////////////////////// diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js index bb8e05707e4d7..5da7b079ffe01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js @@ -35,7 +35,7 @@ var BEANS = [ {"name": "federation", "url": "/jmx?qry=Hadoop:service=Router,name=FederationState"}, {"name": "routerstat", "url": "/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"}, - {"name": "router", "url": "/jmx?qrt=Hadoop:service=NameNode,name=NameNodeInfo"}, + {"name": "router", "url": "/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo"}, {"name": "mem", "url": "/jmx?qry=java.lang:type=Memory"} ]; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index 445022bbce97a..ab733dde8dffc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; +import org.apache.hadoop.hdfs.server.federation.metrics.FederationMetrics; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; @@ -66,6 +67,7 @@ public class TestRouterAdminCLI { private static RouterAdmin admin; private static RouterClient client; + private static Router router; private static final String TEST_USER = "test-user"; @@ -80,6 +82,7 @@ public static void globalSetUp() throws Exception { // Build and start a router with State Store + admin + RPC Configuration conf = new RouterConfigBuilder() .stateStore() + .metrics() .admin() .rpc() .safemode() @@ -90,7 +93,7 @@ public static void globalSetUp() throws Exception { cluster.startRouters(); routerContext = cluster.getRandomRouter(); - Router router = routerContext.getRouter(); + router = routerContext.getRouter(); stateStore = router.getStateStore(); Configuration routerConf = new Configuration(); @@ -720,6 +723,34 @@ public void testManageSafeMode() throws Exception { err.reset(); } + @Test + public void testSafeModeStatus() throws Exception { + // ensure the Router become RUNNING state + waitState(RouterServiceState.RUNNING); + assertFalse(routerContext.getRouter().getSafemodeService().isInSafeMode()); + assertEquals(0, + ToolRunner.run(admin, new String[] {"-safemode", "enter" })); + + FederationMetrics metrics = router.getMetrics(); + String jsonString = metrics.getRouterStatus(); + + // verify state using FederationMetrics + assertEquals(RouterServiceState.SAFEMODE.toString(), jsonString); + assertTrue(routerContext.getRouter().getSafemodeService().isInSafeMode()); + + System.setOut(new PrintStream(out)); + assertEquals(0, + ToolRunner.run(admin, new String[] {"-safemode", "leave" })); + jsonString = metrics.getRouterStatus(); + // verify state + assertEquals(RouterServiceState.RUNNING.toString(), jsonString); + assertFalse(routerContext.getRouter().getSafemodeService().isInSafeMode()); + + out.reset(); + assertEquals(0, ToolRunner.run(admin, new String[] {"-safemode", "get" })); + assertTrue(out.toString().contains("false")); + } + @Test public void testCreateInvalidEntry() throws Exception { String[] argv = new String[] { From f4e2bfce585d762eaf26096613d135203f080eb3 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Fri, 11 Jan 2019 10:11:18 -0800 Subject: [PATCH 0285/1308] HDFS-13856. RBF: RouterAdmin should support dfsrouteradmin -refreshRouterArgs command. Contributed by yanghuafeng. --- .../federation/router/RouterAdminServer.java | 26 +- .../hdfs/tools/federation/RouterAdmin.java | 72 +++++ .../src/site/markdown/HDFSRouterFederation.md | 6 + .../router/TestRouterAdminGenericRefresh.java | 252 ++++++++++++++++++ .../src/site/markdown/HDFSCommands.md | 2 + 5 files changed, 357 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminGenericRefresh.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index 18c19e087e889..027dd111441cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -23,12 +23,14 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.util.Collection; import java.util.Set; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HDFSPolicyProvider; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService; @@ -64,9 +66,15 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.ipc.GenericRefreshProtocol; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; +import org.apache.hadoop.ipc.RefreshRegistry; +import org.apache.hadoop.ipc.RefreshResponse; +import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos; +import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB; +import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolServerSideTranslatorPB; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; @@ -81,7 +89,8 @@ * router. It is created, started, and stopped by {@link Router}. */ public class RouterAdminServer extends AbstractService - implements MountTableManager, RouterStateManager, NameserviceManager { + implements MountTableManager, RouterStateManager, NameserviceManager, + GenericRefreshProtocol { private static final Logger LOG = LoggerFactory.getLogger(RouterAdminServer.class); @@ -160,6 +169,15 @@ public RouterAdminServer(Configuration conf, Router router) router.setAdminServerAddress(this.adminAddress); iStateStoreCache = router.getSubclusterResolver() instanceof StateStoreCache; + + GenericRefreshProtocolServerSideTranslatorPB genericRefreshXlator = + new GenericRefreshProtocolServerSideTranslatorPB(this); + BlockingService genericRefreshService = + GenericRefreshProtocolProtos.GenericRefreshProtocolService. + newReflectiveBlockingService(genericRefreshXlator); + + DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class, + genericRefreshService, adminServer); } /** @@ -487,4 +505,10 @@ public static String getSuperUser() { public static String getSuperGroup(){ return superGroup; } + + @Override // GenericRefreshProtocol + public Collection refresh(String identifier, String[] args) { + // Let the registry handle as needed + return RefreshRegistry.defaultRegistry().dispatch(identifier, args); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 27c42cd634d65..37aad88565a0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -19,6 +19,8 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collection; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -26,8 +28,10 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; @@ -61,9 +65,14 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.RefreshResponse; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolClientSideTranslatorPB; +import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -147,6 +156,8 @@ private String getUsage(String cmd) { return "\t[-getDisabledNameservices]"; } else if (cmd.equals("-refresh")) { return "\t[-refresh]"; + } else if (cmd.equals("-refreshRouterArgs")) { + return "\t[-refreshRouterArgs [arg1..argn]]"; } return getUsage(null); } @@ -213,6 +224,10 @@ private boolean validateMin(String[] argv) { if (argv.length < 3) { return false; } + } else if ("-refreshRouterArgs".equals(cmd)) { + if (argv.length < 2) { + return false; + } } return true; } @@ -310,6 +325,8 @@ public int run(String[] argv) throws Exception { getDisabledNameservices(); } else if ("-refresh".equals(cmd)) { refresh(address); + } else if ("-refreshRouterArgs".equals(cmd)) { + exitCode = genericRefresh(argv, i); } else { throw new IllegalArgumentException("Unknown Command: " + cmd); } @@ -923,6 +940,61 @@ private void getDisabledNameservices() throws IOException { } } + public int genericRefresh(String[] argv, int i) throws IOException { + String hostport = argv[i++]; + String identifier = argv[i++]; + String[] args = Arrays.copyOfRange(argv, i, argv.length); + + // Get the current configuration + Configuration conf = getConf(); + + // for security authorization + // server principal for this call + // should be NN's one. + conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, + conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "")); + + // Create the client + Class xface = GenericRefreshProtocolPB.class; + InetSocketAddress address = NetUtils.createSocketAddr(hostport); + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + + RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class); + GenericRefreshProtocolPB proxy = (GenericRefreshProtocolPB)RPC.getProxy( + xface, RPC.getProtocolVersion(xface), address, ugi, conf, + NetUtils.getDefaultSocketFactory(conf), 0); + + Collection responses = null; + try (GenericRefreshProtocolClientSideTranslatorPB xlator = + new GenericRefreshProtocolClientSideTranslatorPB(proxy)) { + // Refresh + responses = xlator.refresh(identifier, args); + + int returnCode = 0; + + // Print refresh responses + System.out.println("Refresh Responses:\n"); + for (RefreshResponse response : responses) { + System.out.println(response.toString()); + + if (returnCode == 0 && response.getReturnCode() != 0) { + // This is the first non-zero return code, so we should return this + returnCode = response.getReturnCode(); + } else if (returnCode != 0 && response.getReturnCode() != 0) { + // Then now we have multiple non-zero return codes, + // so we merge them into -1 + returnCode = -1; + } + } + return returnCode; + } finally { + if (responses == null) { + System.out.println("Failed to get response.\n"); + return -1; + } + } + } + /** * Normalize a path for that filesystem. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index 959cd637dd933..bcf8fa9c31cc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -274,6 +274,12 @@ For example, one can disable `ns1`, list it and enable it again: This is useful when decommissioning subclusters or when one subcluster is missbehaving (e.g., low performance or unavailability). +### Router server generically refresh + +To trigger a runtime-refresh of the resource specified by \ on \. For example, to enable white list checking, we just need to send a refresh command other than restart the router server. + + [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -refreshRouterArgs [arg1..argn] + Client configuration -------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminGenericRefresh.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminGenericRefresh.java new file mode 100644 index 0000000000000..fd68116ad25c2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminGenericRefresh.java @@ -0,0 +1,252 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.tools.federation.RouterAdmin; +import org.apache.hadoop.ipc.RefreshHandler; +import org.apache.hadoop.ipc.RefreshRegistry; +import org.apache.hadoop.ipc.RefreshResponse; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.mockito.Mockito; + +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * Before all tests, a router is spun up. + * Before each test, mock refresh handlers are created and registered. + * After each test, the mock handlers are unregistered. + * After all tests, the router is spun down. + */ +public class TestRouterAdminGenericRefresh { + private static Router router; + private static RouterAdmin admin; + + private static RefreshHandler firstHandler; + private static RefreshHandler secondHandler; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + + // Build and start a router with admin + RPC + router = new Router(); + Configuration config = new RouterConfigBuilder() + .admin() + .rpc() + .build(); + router.init(config); + router.start(); + admin = new RouterAdmin(config); + } + + @AfterClass + public static void tearDownBeforeClass() throws IOException { + if (router != null) { + router.stop(); + router.close(); + } + } + + @Before + public void setUp() throws Exception { + // Register Handlers, first one just sends an ok response + firstHandler = Mockito.mock(RefreshHandler.class); + Mockito.when(firstHandler.handleRefresh(Mockito.anyString(), + Mockito.any(String[].class))).thenReturn( + RefreshResponse.successResponse()); + RefreshRegistry.defaultRegistry().register("firstHandler", firstHandler); + + // Second handler has conditional response for testing args + secondHandler = Mockito.mock(RefreshHandler.class); + Mockito.when(secondHandler.handleRefresh( + "secondHandler", new String[]{"one", "two"})).thenReturn( + new RefreshResponse(3, "three")); + Mockito.when(secondHandler.handleRefresh( + "secondHandler", new String[]{"one"})).thenReturn( + new RefreshResponse(2, "two")); + RefreshRegistry.defaultRegistry().register("secondHandler", secondHandler); + } + + @After + public void tearDown() throws Exception { + RefreshRegistry.defaultRegistry().unregisterAll("firstHandler"); + RefreshRegistry.defaultRegistry().unregisterAll("secondHandler"); + } + + @Test + public void testInvalidCommand() throws Exception { + String[] args = new String[]{"-refreshRouterArgs", "nn"}; + int exitCode = admin.run(args); + assertEquals("RouterAdmin should fail due to bad args", -1, exitCode); + } + + @Test + public void testInvalidIdentifier() throws Exception { + String[] argv = new String[]{"-refreshRouterArgs", "localhost:" + + router.getAdminServerAddress().getPort(), "unregisteredIdentity"}; + int exitCode = admin.run(argv); + assertEquals("RouterAdmin should fail due to no handler registered", + -1, exitCode); + } + + @Test + public void testValidIdentifier() throws Exception { + String[] args = new String[]{"-refreshRouterArgs", "localhost:" + + router.getAdminServerAddress().getPort(), "firstHandler"}; + int exitCode = admin.run(args); + assertEquals("RouterAdmin should succeed", 0, exitCode); + + Mockito.verify(firstHandler).handleRefresh("firstHandler", new String[]{}); + // Second handler was never called + Mockito.verify(secondHandler, Mockito.never()) + .handleRefresh(Mockito.anyString(), Mockito.any(String[].class)); + } + + @Test + public void testVariableArgs() throws Exception { + String[] args = new String[]{"-refreshRouterArgs", "localhost:" + + router.getAdminServerAddress().getPort(), "secondHandler", "one"}; + int exitCode = admin.run(args); + assertEquals("RouterAdmin should return 2", 2, exitCode); + + exitCode = admin.run(new String[]{"-refreshRouterArgs", "localhost:" + + router.getAdminServerAddress().getPort(), + "secondHandler", "one", "two"}); + assertEquals("RouterAdmin should now return 3", 3, exitCode); + + Mockito.verify(secondHandler).handleRefresh( + "secondHandler", new String[]{"one"}); + Mockito.verify(secondHandler).handleRefresh( + "secondHandler", new String[]{"one", "two"}); + } + + @Test + public void testUnregistration() throws Exception { + RefreshRegistry.defaultRegistry().unregisterAll("firstHandler"); + + // And now this should fail + String[] args = new String[]{"-refreshRouterArgs", "localhost:" + + router.getAdminServerAddress().getPort(), "firstHandler"}; + int exitCode = admin.run(args); + assertEquals("RouterAdmin should return -1", -1, exitCode); + } + + @Test + public void testUnregistrationReturnValue() { + RefreshHandler mockHandler = Mockito.mock(RefreshHandler.class); + RefreshRegistry.defaultRegistry().register("test", mockHandler); + boolean ret = RefreshRegistry.defaultRegistry(). + unregister("test", mockHandler); + assertTrue(ret); + } + + @Test + public void testMultipleRegistration() throws Exception { + RefreshRegistry.defaultRegistry().register("sharedId", firstHandler); + RefreshRegistry.defaultRegistry().register("sharedId", secondHandler); + + // this should trigger both + String[] args = new String[]{"-refreshRouterArgs", "localhost:" + + router.getAdminServerAddress().getPort(), "sharedId", "one"}; + int exitCode = admin.run(args); + + // -1 because one of the responses is unregistered + assertEquals(-1, exitCode); + + // verify we called both + Mockito.verify(firstHandler).handleRefresh( + "sharedId", new String[]{"one"}); + Mockito.verify(secondHandler).handleRefresh( + "sharedId", new String[]{"one"}); + + RefreshRegistry.defaultRegistry().unregisterAll("sharedId"); + } + + @Test + public void testMultipleReturnCodeMerging() throws Exception { + // Two handlers which return two non-zero values + RefreshHandler handlerOne = Mockito.mock(RefreshHandler.class); + Mockito.when(handlerOne.handleRefresh(Mockito.anyString(), + Mockito.any(String[].class))).thenReturn( + new RefreshResponse(23, "Twenty Three")); + + RefreshHandler handlerTwo = Mockito.mock(RefreshHandler.class); + Mockito.when(handlerTwo.handleRefresh(Mockito.anyString(), + Mockito.any(String[].class))).thenReturn( + new RefreshResponse(10, "Ten")); + + // Then registered to the same ID + RefreshRegistry.defaultRegistry().register("shared", handlerOne); + RefreshRegistry.defaultRegistry().register("shared", handlerTwo); + + // We refresh both + String[] args = new String[]{"-refreshRouterArgs", "localhost:" + + router.getAdminServerAddress().getPort(), "shared"}; + int exitCode = admin.run(args); + + // We get -1 because of our logic for melding non-zero return codes + assertEquals(-1, exitCode); + + // Verify we called both + Mockito.verify(handlerOne).handleRefresh("shared", new String[]{}); + Mockito.verify(handlerTwo).handleRefresh("shared", new String[]{}); + + RefreshRegistry.defaultRegistry().unregisterAll("shared"); + } + + @Test + public void testExceptionResultsInNormalError() throws Exception { + // In this test, we ensure that all handlers are called + // even if we throw an exception in one + RefreshHandler exceptionalHandler = Mockito.mock(RefreshHandler.class); + Mockito.when(exceptionalHandler.handleRefresh(Mockito.anyString(), + Mockito.any(String[].class))).thenThrow( + new RuntimeException("Exceptional Handler Throws Exception")); + + RefreshHandler otherExceptionalHandler = Mockito.mock(RefreshHandler.class); + Mockito.when(otherExceptionalHandler.handleRefresh(Mockito.anyString(), + Mockito.any(String[].class))).thenThrow( + new RuntimeException("More Exceptions")); + + RefreshRegistry.defaultRegistry().register("exceptional", + exceptionalHandler); + RefreshRegistry.defaultRegistry().register("exceptional", + otherExceptionalHandler); + + String[] args = new String[]{"-refreshRouterArgs", "localhost:" + + router.getAdminServerAddress().getPort(), "exceptional"}; + int exitCode = admin.run(args); + assertEquals(-1, exitCode); // Exceptions result in a -1 + + Mockito.verify(exceptionalHandler).handleRefresh( + "exceptional", new String[]{}); + Mockito.verify(otherExceptionalHandler).handleRefresh( + "exceptional", new String[]{}); + + RefreshRegistry.defaultRegistry().unregisterAll("exceptional"); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md index a967ee4342bfb..421e3881db926 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md @@ -438,6 +438,7 @@ Usage: [-nameservice disable | enable ] [-getDisabledNameservices] [-refresh] + [-refreshRouterArgs [arg1..argn]] | COMMAND\_OPTION | Description | |:---- |:---- | @@ -451,6 +452,7 @@ Usage: | `-nameservice` `disable` `enable` *nameservice* | Disable/enable a name service from the federation. If disabled, requests will not go to that name service. | | `-getDisabledNameservices` | Get the name services that are disabled in the federation. | | `-refresh` | Update mount table cache of the connected router. | +| `refreshRouterArgs` \ \ [arg1..argn] | To trigger a runtime-refresh of the resource specified by \ on \. For example, to enable white list checking, we just need to send a refresh command other than restart the router server. | The commands for managing Router-based federation. See [Mount table management](../hadoop-hdfs-rbf/HDFSRouterFederation.html#Mount_table_management) for more info. From 221f24cbdc7e6477ec11bb9d98c9298d6fccf3ad Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Tue, 15 Jan 2019 14:21:33 +0800 Subject: [PATCH 0286/1308] HDFS-14206. RBF: Cleanup quota modules. Contributed by Inigo Goiri. --- .../hdfs/server/federation/router/Quota.java | 6 ++-- .../router/RouterClientProtocol.java | 22 ++++++------ .../federation/router/RouterQuotaManager.java | 2 +- .../router/RouterQuotaUpdateService.java | 6 ++-- .../federation/router/RouterQuotaUsage.java | 35 +++++++++++-------- 5 files changed, 38 insertions(+), 33 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java index 5d0309fa5711b..cfb538f8f6ada 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java @@ -163,7 +163,7 @@ private QuotaUsage aggregateQuota(Map results) { long ssCount = 0; long nsQuota = HdfsConstants.QUOTA_RESET; long ssQuota = HdfsConstants.QUOTA_RESET; - boolean hasQuotaUnSet = false; + boolean hasQuotaUnset = false; for (Map.Entry entry : results.entrySet()) { RemoteLocation loc = entry.getKey(); @@ -172,7 +172,7 @@ private QuotaUsage aggregateQuota(Map results) { // If quota is not set in real FileSystem, the usage // value will return -1. if (usage.getQuota() == -1 && usage.getSpaceQuota() == -1) { - hasQuotaUnSet = true; + hasQuotaUnset = true; } nsQuota = usage.getQuota(); ssQuota = usage.getSpaceQuota(); @@ -189,7 +189,7 @@ private QuotaUsage aggregateQuota(Map results) { QuotaUsage.Builder builder = new QuotaUsage.Builder() .fileAndDirectoryCount(nsCount).spaceConsumed(ssCount); - if (hasQuotaUnSet) { + if (hasQuotaUnset) { builder.quota(HdfsConstants.QUOTA_RESET) .spaceQuota(HdfsConstants.QUOTA_RESET); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index c52b7655657bb..13f8f3774976e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.updateMountPointStatus; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CryptoProtocolVersion; -import org.apache.hadoop.fs.BatchedRemoteIterator; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; @@ -1147,7 +1147,7 @@ public void removeCacheDirective(long id) throws IOException { } @Override - public BatchedRemoteIterator.BatchedEntries listCacheDirectives( + public BatchedEntries listCacheDirectives( long prevId, CacheDirectiveInfo filter) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ, false); return null; @@ -1169,7 +1169,7 @@ public void removeCachePool(String cachePoolName) throws IOException { } @Override - public BatchedRemoteIterator.BatchedEntries listCachePools(String prevKey) + public BatchedEntries listCachePools(String prevKey) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ, false); return null; @@ -1280,7 +1280,7 @@ public EncryptionZone getEZForPath(String src) throws IOException { } @Override - public BatchedRemoteIterator.BatchedEntries listEncryptionZones(long prevId) + public BatchedEntries listEncryptionZones(long prevId) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ, false); return null; @@ -1293,7 +1293,7 @@ public void reencryptEncryptionZone(String zone, HdfsConstants.ReencryptAction a } @Override - public BatchedRemoteIterator.BatchedEntries listReencryptionStatus( + public BatchedEntries listReencryptionStatus( long prevId) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ, false); return null; @@ -1529,15 +1529,17 @@ public ReplicatedBlockStats getReplicatedBlockStats() throws IOException { @Deprecated @Override - public BatchedRemoteIterator.BatchedEntries listOpenFiles(long prevId) + public BatchedEntries listOpenFiles(long prevId) throws IOException { - return listOpenFiles(prevId, EnumSet.of(OpenFilesIterator.OpenFilesType.ALL_OPEN_FILES), + return listOpenFiles(prevId, + EnumSet.of(OpenFilesIterator.OpenFilesType.ALL_OPEN_FILES), OpenFilesIterator.FILTER_PATH_DEFAULT); } @Override - public BatchedRemoteIterator.BatchedEntries listOpenFiles(long prevId, - EnumSet openFilesTypes, String path) throws IOException { + public BatchedEntries listOpenFiles(long prevId, + EnumSet openFilesTypes, String path) + throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ, false); return null; } @@ -1669,7 +1671,7 @@ private HdfsFileStatus getFileInfoAll(final List locations, // Get the file info from everybody Map results = rpcClient.invokeConcurrent(locations, method, HdfsFileStatus.class); - int children=0; + int children = 0; // We return the first file HdfsFileStatus dirStatus = null; for (RemoteLocation loc : locations) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java index fa2a6e43faa8d..e818f5accd817 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java @@ -88,7 +88,7 @@ public RouterQuotaUsage getQuotaUsage(String path) { } /** - * Get children paths (can including itself) under specified federation path. + * Get children paths (can include itself) under specified federation path. * @param parentPath Federated path. * @return Set of children paths. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java index 9bfd705efbefe..dd21e1a7e6b16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java @@ -186,10 +186,8 @@ private List getMountTableEntries() throws IOException { */ private List getQuotaSetMountTables() throws IOException { List mountTables = getMountTableEntries(); - Set stalePaths = new HashSet<>(); - for (String path : this.quotaManager.getAll()) { - stalePaths.add(path); - } + Set allPaths = this.quotaManager.getAll(); + Set stalePaths = new HashSet<>(allPaths); List neededMountTables = new LinkedList<>(); for (MountTable entry : mountTables) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java index de9119aed47a7..7fd845af762a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java @@ -75,9 +75,10 @@ public Builder spaceQuota(long spaceQuota) { * @throws NSQuotaExceededException If the quota is exceeded. */ public void verifyNamespaceQuota() throws NSQuotaExceededException { - if (Quota.isViolated(getQuota(), getFileAndDirectoryCount())) { - throw new NSQuotaExceededException(getQuota(), - getFileAndDirectoryCount()); + long quota = getQuota(); + long fileAndDirectoryCount = getFileAndDirectoryCount(); + if (Quota.isViolated(quota, fileAndDirectoryCount)) { + throw new NSQuotaExceededException(quota, fileAndDirectoryCount); } } @@ -87,25 +88,29 @@ public void verifyNamespaceQuota() throws NSQuotaExceededException { * @throws DSQuotaExceededException If the quota is exceeded. */ public void verifyStoragespaceQuota() throws DSQuotaExceededException { - if (Quota.isViolated(getSpaceQuota(), getSpaceConsumed())) { - throw new DSQuotaExceededException(getSpaceQuota(), getSpaceConsumed()); + long spaceQuota = getSpaceQuota(); + long spaceConsumed = getSpaceConsumed(); + if (Quota.isViolated(spaceQuota, spaceConsumed)) { + throw new DSQuotaExceededException(spaceQuota, spaceConsumed); } } @Override public String toString() { - String nsQuota = String.valueOf(getQuota()); - String nsCount = String.valueOf(getFileAndDirectoryCount()); - if (getQuota() == HdfsConstants.QUOTA_RESET) { - nsQuota = "-"; - nsCount = "-"; + String nsQuota = "-"; + String nsCount = "-"; + long quota = getQuota(); + if (quota != HdfsConstants.QUOTA_RESET) { + nsQuota = String.valueOf(quota); + nsCount = String.valueOf(getFileAndDirectoryCount()); } - String ssQuota = StringUtils.byteDesc(getSpaceQuota()); - String ssCount = StringUtils.byteDesc(getSpaceConsumed()); - if (getSpaceQuota() == HdfsConstants.QUOTA_RESET) { - ssQuota = "-"; - ssCount = "-"; + String ssQuota = "-"; + String ssCount = "-"; + long spaceQuota = getSpaceQuota(); + if (spaceQuota != HdfsConstants.QUOTA_RESET) { + ssQuota = StringUtils.byteDesc(spaceQuota); + ssCount = StringUtils.byteDesc(getSpaceConsumed()); } StringBuilder str = new StringBuilder(); From f40e10b349ea3f67ad2a15911af5a12bcee38294 Mon Sep 17 00:00:00 2001 From: Surendra Singh Lilhore Date: Tue, 15 Jan 2019 16:40:39 +0530 Subject: [PATCH 0287/1308] HDFS-14129. RBF: Create new policy provider for router. Contributed by Ranith Sardar. --- .../hadoop-common/src/main/conf/hadoop-policy.xml | 10 ++++++++++ .../org/apache/hadoop/fs/CommonConfigurationKeys.java | 2 ++ .../org/apache/hadoop/hdfs/protocol/HdfsConstants.java | 5 +++++ .../hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java | 6 +++--- .../server/federation/router/RouterAdminServer.java | 10 ++++------ .../hdfs/server/federation/router/RouterRpcServer.java | 4 ++-- .../hadoop/fs/contract/router/RouterHDFSContract.java | 4 ++++ 7 files changed, 30 insertions(+), 11 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml index bd7c11124f5b3..e1640f97546ac 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml @@ -109,6 +109,16 @@ active and stand-by states of namenode. + + security.router.admin.protocol.acl + * + ACL for RouterAdmin Protocol. The ACL is a comma-separated + list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + security.zkfc.protocol.acl * diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java index 384e5d1e5f33c..2e6b132d747d1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java @@ -218,6 +218,8 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { SECURITY_CLIENT_PROTOCOL_ACL = "security.client.protocol.acl"; public static final String SECURITY_CLIENT_DATANODE_PROTOCOL_ACL = "security.client.datanode.protocol.acl"; + public static final String SECURITY_ROUTER_ADMIN_PROTOCOL_ACL = + "security.router.admin.protocol.acl"; public static final String SECURITY_DATANODE_PROTOCOL_ACL = "security.datanode.protocol.acl"; public static final String diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index 6de186a1bc702..c449a2e0d3528 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -92,6 +92,11 @@ public final class HdfsConstants { */ public static final String CLIENT_NAMENODE_PROTOCOL_NAME = "org.apache.hadoop.hdfs.protocol.ClientProtocol"; + /** + * Router admin Protocol Names. + */ + public static final String ROUTER_ADMIN_PROTOCOL_NAME = + "org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocol"; // Timeouts for communicating with DataNode for streaming writes/reads public static final int READ_TIMEOUT = 60 * 1000; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java index 96fa794183c3e..d308616ba63e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java @@ -19,10 +19,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; +import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.ipc.ProtocolInfo; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.token.TokenInfo; @@ -35,9 +35,9 @@ @InterfaceAudience.Private @InterfaceStability.Stable @KerberosInfo( - serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY) + serverPrincipal = RBFConfigKeys.DFS_ROUTER_KERBEROS_PRINCIPAL_KEY) @TokenInfo(DelegationTokenSelector.class) -@ProtocolInfo(protocolName = HdfsConstants.CLIENT_NAMENODE_PROTOCOL_NAME, +@ProtocolInfo(protocolName = HdfsConstants.ROUTER_ADMIN_PROTOCOL_NAME, protocolVersion = 1) public interface RouterAdminProtocolPB extends RouterAdminProtocolService.BlockingInterface { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index 027dd111441cd..e2d944c4d6ee8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -29,16 +29,16 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService; +import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocol; import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB; import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.RouterPolicyProvider; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo; -import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; import org.apache.hadoop.hdfs.server.federation.store.DisabledNameserviceStore; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreCache; @@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.ipc.GenericRefreshProtocol; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; @@ -89,8 +88,7 @@ * router. It is created, started, and stopped by {@link Router}. */ public class RouterAdminServer extends AbstractService - implements MountTableManager, RouterStateManager, NameserviceManager, - GenericRefreshProtocol { + implements RouterAdminProtocol { private static final Logger LOG = LoggerFactory.getLogger(RouterAdminServer.class); @@ -159,7 +157,7 @@ public RouterAdminServer(Configuration conf, Router router) // Set service-level authorization security policy if (conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false)) { - this.adminServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); + this.adminServer.refreshServiceAcl(conf, new RouterPolicyProvider()); } // The RPC-server port can be ephemeral... ensure we have the correct info diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index ad5980b8d36b3..0d4f94c5a20db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -62,7 +62,6 @@ import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.hdfs.AddBlockFlag; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.inotify.EventBatchList; import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; @@ -103,6 +102,7 @@ import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.RouterPolicyProvider; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -275,7 +275,7 @@ public RouterRpcServer(Configuration configuration, Router router, this.serviceAuthEnabled = conf.getBoolean( HADOOP_SECURITY_AUTHORIZATION, false); if (this.serviceAuthEnabled) { - rpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); + rpcServer.refreshServiceAcl(conf, new RouterPolicyProvider()); } // We don't want the server to log the full stack trace for some exceptions diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java index 510cb95ee19f8..46339a388b884 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java @@ -90,6 +90,10 @@ public static MiniDFSCluster getCluster() { return cluster.getCluster(); } + public static MiniRouterDFSCluster getRouterCluster() { + return cluster; + } + public static FileSystem getFileSystem() throws IOException { //assumes cluster is not null Assert.assertNotNull("cluster not created", cluster); From 7b61cbf6729a2c5e29b68d5ee62d9729f8a6556f Mon Sep 17 00:00:00 2001 From: Surendra Singh Lilhore Date: Wed, 16 Jan 2019 11:42:17 +0530 Subject: [PATCH 0288/1308] HDFS-14129. addendum to HDFS-14129. Contributed by Ranith Sardar. --- .../hdfs/protocolPB/RouterAdminProtocol.java | 34 ++++++ .../hdfs/protocolPB/RouterPolicyProvider.java | 52 +++++++++ .../router/TestRouterPolicyProvider.java | 108 ++++++++++++++++++ 3 files changed, 194 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocol.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterPolicyProvider.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocol.java new file mode 100644 index 0000000000000..d885989efa2e3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocol.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocolPB; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; +import org.apache.hadoop.hdfs.server.federation.router.NameserviceManager; +import org.apache.hadoop.hdfs.server.federation.router.RouterStateManager; +import org.apache.hadoop.ipc.GenericRefreshProtocol; + +/** + * Protocol used by routeradmin to communicate with statestore. + */ +@InterfaceAudience.Private +@InterfaceStability.Stable +public interface RouterAdminProtocol extends MountTableManager, + RouterStateManager, NameserviceManager, GenericRefreshProtocol { +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterPolicyProvider.java new file mode 100644 index 0000000000000..af391ff1dfa38 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterPolicyProvider.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocolPB; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hdfs.HDFSPolicyProvider; +import org.apache.hadoop.security.authorize.Service; + +/** + * {@link HDFSPolicyProvider} for RBF protocols. + */ +@InterfaceAudience.Private +public class RouterPolicyProvider extends HDFSPolicyProvider { + + private static final Service[] RBF_SERVICES = new Service[] { + new Service(CommonConfigurationKeys.SECURITY_ROUTER_ADMIN_PROTOCOL_ACL, + RouterAdminProtocol.class) }; + + private final Service[] services; + + public RouterPolicyProvider() { + List list = new ArrayList<>(); + list.addAll(Arrays.asList(super.getServices())); + list.addAll(Arrays.asList(RBF_SERVICES)); + services = list.toArray(new Service[list.size()]); + } + + @Override + public Service[] getServices() { + return Arrays.copyOf(services, services.length); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java new file mode 100644 index 0000000000000..36a00e507633e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer; + +import static org.junit.Assert.*; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.commons.lang3.ClassUtils; +import org.apache.hadoop.hdfs.protocolPB.RouterPolicyProvider; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.security.authorize.Service; + +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Sets; + +/** + * Test suite covering RouterPolicyProvider. We expect that it contains a + * security policy definition for every RPC protocol used in HDFS. The test + * suite works by scanning an RPC server's class to find the protocol interfaces + * it implements, and then comparing that to the protocol interfaces covered in + * RouterPolicyProvider. This is a parameterized test repeated for multiple HDFS + * RPC server classes. + */ +@RunWith(Parameterized.class) +public class TestRouterPolicyProvider { + private static final Logger LOG = LoggerFactory.getLogger( + TestRouterPolicyProvider.class); + + private static Set> policyProviderProtocols; + + @Rule + public TestName testName = new TestName(); + + private final Class rpcServerClass; + + @BeforeClass + public static void initialize() { + Service[] services = new RouterPolicyProvider().getServices(); + policyProviderProtocols = new HashSet<>(services.length); + for (Service service : services) { + policyProviderProtocols.add(service.getProtocol()); + } + } + + public TestRouterPolicyProvider(Class rpcServerClass) { + this.rpcServerClass = rpcServerClass; + } + + @Parameters(name = "protocolsForServer-{0}") + public static List[]> data() { + return Arrays.asList(new Class[][] {{RouterRpcServer.class}, + {NameNodeRpcServer.class}, {DataNode.class}, + {RouterAdminServer.class}}); + } + + @Test + public void testPolicyProviderForServer() { + List ifaces = ClassUtils.getAllInterfaces(rpcServerClass); + Set> serverProtocols = new HashSet<>(ifaces.size()); + for (Object obj : ifaces) { + Class iface = (Class) obj; + if (iface.getSimpleName().endsWith("Protocol")) { + serverProtocols.add(iface); + } + } + LOG.info("Running test {} for RPC server {}. Found server protocols {} " + + "and policy provider protocols {}.", testName.getMethodName(), + rpcServerClass.getName(), serverProtocols, policyProviderProtocols); + assertFalse("Expected to find at least one protocol in server.", + serverProtocols.isEmpty()); + final Set> differenceSet = Sets.difference(serverProtocols, + policyProviderProtocols); + assertTrue(String.format( + "Following protocols for server %s are not defined in " + "%s: %s", + rpcServerClass.getName(), RouterPolicyProvider.class.getName(), Arrays + .toString(differenceSet.toArray())), differenceSet.isEmpty()); + } +} \ No newline at end of file From c012b09fb678db11a10d0a8e1f64ee1a7229a417 Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Wed, 16 Jan 2019 18:06:17 +0530 Subject: [PATCH 0289/1308] HDFS-14193. RBF: Inconsistency with the Default Namespace. Contributed by Ayush Saxena. --- .../resolver/MountTableResolver.java | 27 ++++------------ .../TestInitializeMountTableResolver.java | 32 ++++++------------- 2 files changed, 16 insertions(+), 43 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index 9e69840af9c73..da585515c35f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs.server.federation.resolver; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT; @@ -50,8 +48,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; @@ -163,33 +159,22 @@ private void registerCacheExternal() { * @param conf Configuration for this resolver. */ private void initDefaultNameService(Configuration conf) { - this.defaultNameService = conf.get( - DFS_ROUTER_DEFAULT_NAMESERVICE, - DFSUtil.getNamenodeNameServiceId(conf)); - this.defaultNSEnable = conf.getBoolean( DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE, DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT); - if (defaultNameService == null) { - LOG.warn( - "{} and {} is not set. Fallback to {} as the default name service.", - DFS_ROUTER_DEFAULT_NAMESERVICE, DFS_NAMESERVICE_ID, DFS_NAMESERVICES); - Collection nsIds = DFSUtilClient.getNameServiceIds(conf); - if (nsIds.isEmpty()) { - this.defaultNameService = ""; - } else { - this.defaultNameService = nsIds.iterator().next(); - } + if (!this.defaultNSEnable) { + LOG.warn("Default name service is disabled."); + return; } + this.defaultNameService = conf.get(DFS_ROUTER_DEFAULT_NAMESERVICE, ""); if (this.defaultNameService.equals("")) { this.defaultNSEnable = false; LOG.warn("Default name service is not set."); } else { - String enable = this.defaultNSEnable ? "enabled" : "disabled"; - LOG.info("Default name service: {}, {} to read or write", - this.defaultNameService, enable); + LOG.info("Default name service: {}, enabled to read or write", + this.defaultNameService); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java index 5db7531c4dddb..8a22ade0743ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java @@ -23,7 +23,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; /** * Test {@link MountTableResolver} initialization. @@ -43,40 +45,26 @@ public void testDefaultNameserviceWithEmptyString() { conf.set(DFS_ROUTER_DEFAULT_NAMESERVICE, ""); MountTableResolver mountTable = new MountTableResolver(conf); assertEquals("", mountTable.getDefaultNamespace()); + assertFalse("Default NS should be disabled if default NS is set empty", + mountTable.isDefaultNSEnable()); } @Test public void testRouterDefaultNameservice() { Configuration conf = new Configuration(); - conf.set(DFS_ROUTER_DEFAULT_NAMESERVICE, "router_ns"); // this is priority - conf.set(DFS_NAMESERVICE_ID, "ns_id"); - conf.set(DFS_NAMESERVICES, "nss"); + conf.set(DFS_ROUTER_DEFAULT_NAMESERVICE, "router_ns"); MountTableResolver mountTable = new MountTableResolver(conf); assertEquals("router_ns", mountTable.getDefaultNamespace()); } + // Default NS should be empty if configured false. @Test - public void testNameserviceID() { + public void testRouterDefaultNameserviceDisabled() { Configuration conf = new Configuration(); - conf.set(DFS_NAMESERVICE_ID, "ns_id"); // this is priority + conf.setBoolean(DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE, false); + conf.set(DFS_NAMESERVICE_ID, "ns_id"); conf.set(DFS_NAMESERVICES, "nss"); MountTableResolver mountTable = new MountTableResolver(conf); - assertEquals("ns_id", mountTable.getDefaultNamespace()); - } - - @Test - public void testSingleNameservices() { - Configuration conf = new Configuration(); - conf.set(DFS_NAMESERVICES, "ns1"); - MountTableResolver mountTable = new MountTableResolver(conf); - assertEquals("ns1", mountTable.getDefaultNamespace()); - } - - @Test - public void testMultipleNameservices() { - Configuration conf = new Configuration(); - conf.set(DFS_NAMESERVICES, "ns1,ns2"); - MountTableResolver mountTable = new MountTableResolver(conf); - assertEquals("ns1", mountTable.getDefaultNamespace()); + assertEquals("", mountTable.getDefaultNamespace()); } } \ No newline at end of file From 235406d9047af2039090ad48fc708368046df008 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Sat, 19 Jan 2019 15:23:15 -0800 Subject: [PATCH 0290/1308] HDFS-14156. RBF: rollEdit() command fails with Router. Contributed by Shubham Dewan. --- .../router/RouterClientProtocol.java | 2 +- .../federation/router/RouterRpcClient.java | 4 +- .../federation/router/TestRouterRpc.java | 27 +++ .../router/TestRouterRpcSingleNS.java | 211 ++++++++++++++++++ 4 files changed, 241 insertions(+), 3 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcSingleNS.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 13f8f3774976e..9a29c03975557 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -875,7 +875,7 @@ public boolean saveNamespace(long timeWindow, long txGap) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.UNCHECKED); RemoteMethod method = new RemoteMethod("saveNamespace", - new Class[] {Long.class, Long.class}, timeWindow, txGap); + new Class[] {long.class, long.class}, timeWindow, txGap); final Set nss = namenodeResolver.getNamespaces(); Map ret = rpcClient.invokeConcurrent(nss, method, true, false, boolean.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index c4d3a20178381..0b153339428e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -1045,7 +1045,7 @@ public Map invokeConcurrent( Class proto = method.getProtocol(); Object[] paramList = method.getParams(location); Object result = invokeMethod(ugi, namenodes, proto, m, paramList); - return Collections.singletonMap(location, clazz.cast(result)); + return Collections.singletonMap(location, (R) result); } List orderedLocations = new LinkedList<>(); @@ -1103,7 +1103,7 @@ public Object call() throws Exception { try { Future future = futures.get(i); Object result = future.get(); - results.put(location, clazz.cast(result)); + results.put(location, (R) result); } catch (CancellationException ce) { T loc = orderedLocations.get(i); String msg = "Invocation to \"" + loc + "\" for \"" diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index 8632203b06d01..760d755cb8f81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -111,6 +111,8 @@ /** * The the RPC interface of the {@link Router} implemented by * {@link RouterRpcServer}. + * Tests covering the functionality of RouterRPCServer with + * multi nameServices. */ public class TestRouterRpc { @@ -1255,6 +1257,31 @@ public void testErasureCoding() throws IOException { assertEquals(statsNamenode.toString(), statsRouter.toString()); } + @Test + public void testGetCurrentTXIDandRollEdits() throws IOException { + Long rollEdits = routerProtocol.rollEdits(); + Long currentTXID = routerProtocol.getCurrentEditLogTxid(); + + assertEquals(rollEdits, currentTXID); + } + + @Test + public void testSaveNamespace() throws IOException { + cluster.getCluster().getFileSystem(0) + .setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); + cluster.getCluster().getFileSystem(1) + .setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); + + Boolean saveNamespace = routerProtocol.saveNamespace(0, 0); + + assertTrue(saveNamespace); + + cluster.getCluster().getFileSystem(0) + .setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); + cluster.getCluster().getFileSystem(1) + .setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); + } + @Test public void testNamenodeMetrics() throws Exception { final NamenodeBeanMetrics metrics = diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcSingleNS.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcSingleNS.java new file mode 100644 index 0000000000000..ae0afa42e4bf9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcSingleNS.java @@ -0,0 +1,211 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.NameNodeProxies; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; + +/** + * The the RPC interface of the {@link Router} implemented by + * {@link RouterRpcServer}. + * Tests covering the functionality of RouterRPCServer with + * single nameService. + */ +public class TestRouterRpcSingleNS { + + /** + * Federated HDFS cluster. + */ + private static MiniRouterDFSCluster cluster; + + /** + * Random Router for this federated cluster. + */ + private MiniRouterDFSCluster.RouterContext router; + + /** + * Random nameservice in the federated cluster. + */ + private String ns; + /** + * First namenode in the nameservice. + */ + private MiniRouterDFSCluster.NamenodeContext namenode; + + /** + * Client interface to the Router. + */ + private ClientProtocol routerProtocol; + /** + * Client interface to the Namenode. + */ + private ClientProtocol nnProtocol; + + /** + * NameNodeProtocol interface to the Router. + */ + private NamenodeProtocol routerNamenodeProtocol; + /** + * NameNodeProtocol interface to the Namenode. + */ + private NamenodeProtocol nnNamenodeProtocol; + + /** + * Filesystem interface to the Router. + */ + private FileSystem routerFS; + /** + * Filesystem interface to the Namenode. + */ + private FileSystem nnFS; + + /** + * File in the Router. + */ + private String routerFile; + /** + * File in the Namenode. + */ + private String nnFile; + + @BeforeClass + public static void globalSetUp() throws Exception { + cluster = new MiniRouterDFSCluster(false, 1); + cluster.setNumDatanodesPerNameservice(2); + + // Start NNs and DNs and wait until ready + cluster.startCluster(); + + // Start routers with only an RPC service + Configuration routerConf = new RouterConfigBuilder().metrics().rpc() + .build(); + // We decrease the DN cache times to make the test faster + routerConf.setTimeDuration(RBFConfigKeys.DN_REPORT_CACHE_EXPIRE, 1, + TimeUnit.SECONDS); + cluster.addRouterOverrides(routerConf); + cluster.startRouters(); + + // Register and verify all NNs with all routers + cluster.registerNamenodes(); + cluster.waitNamenodeRegistration(); + } + + @AfterClass + public static void tearDown() { + cluster.shutdown(); + } + + @Before + public void testSetup() throws Exception { + + // Create mock locations + cluster.installMockLocations(); + + // Delete all files via the NNs and verify + cluster.deleteAllFiles(); + + // Create test fixtures on NN + cluster.createTestDirectoriesNamenode(); + + // Wait to ensure NN has fully created its test directories + Thread.sleep(100); + + // Random router for this test + MiniRouterDFSCluster.RouterContext rndRouter = cluster.getRandomRouter(); + this.setRouter(rndRouter); + + // Pick a namenode for this test + String ns0 = cluster.getNameservices().get(0); + this.setNs(ns0); + this.setNamenode(cluster.getNamenode(ns0, null)); + + // Create a test file on the NN + Random rnd = new Random(); + String randomFile = "testfile-" + rnd.nextInt(); + this.nnFile = cluster.getNamenodeTestDirectoryForNS(ns) + "/" + randomFile; + this.routerFile = cluster.getFederatedTestDirectoryForNS(ns) + "/" + + randomFile; + + createFile(nnFS, nnFile, 32); + verifyFileExists(nnFS, nnFile); + } + + protected void setRouter(MiniRouterDFSCluster.RouterContext r) + throws IOException, URISyntaxException { + this.router = r; + this.routerProtocol = r.getClient().getNamenode(); + this.routerFS = r.getFileSystem(); + this.routerNamenodeProtocol = NameNodeProxies.createProxy(router.getConf(), + router.getFileSystem().getUri(), NamenodeProtocol.class).getProxy(); + } + + protected void setNs(String nameservice) { + this.ns = nameservice; + } + + protected void setNamenode(MiniRouterDFSCluster.NamenodeContext nn) + throws IOException, URISyntaxException { + this.namenode = nn; + this.nnProtocol = nn.getClient().getNamenode(); + this.nnFS = nn.getFileSystem(); + + // Namenode from the default namespace + String ns0 = cluster.getNameservices().get(0); + MiniRouterDFSCluster.NamenodeContext nn0 = cluster.getNamenode(ns0, null); + this.nnNamenodeProtocol = NameNodeProxies.createProxy(nn0.getConf(), + nn0.getFileSystem().getUri(), NamenodeProtocol.class).getProxy(); + } + + @Test + public void testGetCurrentTXIDandRollEdits() throws IOException { + Long rollEdits = routerProtocol.rollEdits(); + Long currentTXID = routerProtocol.getCurrentEditLogTxid(); + + assertEquals(rollEdits, currentTXID); + } + + @Test + public void testSaveNamespace() throws IOException { + cluster.getCluster().getFileSystem() + .setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); + Boolean saveNamespace = routerProtocol.saveNamespace(0, 0); + + assertTrue(saveNamespace); + } +} \ No newline at end of file From 020f83f51fa79a8049bbaf224022d8214375296e Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Wed, 23 Jan 2019 22:59:43 +0800 Subject: [PATCH 0291/1308] HDFS-14209. RBF: setQuota() through router is working for only the mount Points under the Source column in MountTable. Contributed by Shubham Dewan. --- .../hdfs/server/federation/router/Quota.java | 7 +++- .../federation/router/TestRouterQuota.java | 32 ++++++++++++++++++- 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java index cfb538f8f6ada..a6f5baba39316 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java @@ -216,6 +216,11 @@ private List getQuotaRemoteLocations(String path) locations.addAll(rpcServer.getLocationsForPath(childPath, true, false)); } } - return locations; + if (locations.size() >= 1) { + return locations; + } else { + locations.addAll(rpcServer.getLocationsForPath(path, true, false)); + return locations; + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java index 656b401ec2482..034023c47aaf6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java @@ -755,4 +755,34 @@ public void testClearQuotaDefAfterRemovingMountTable() throws Exception { assertEquals(HdfsConstants.QUOTA_RESET, subClusterQuota.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, subClusterQuota.getSpaceQuota()); } -} \ No newline at end of file + + @Test + public void testSetQuotaNotMountTable() throws Exception { + long nsQuota = 5; + long ssQuota = 100; + final FileSystem nnFs1 = nnContext1.getFileSystem(); + + // setQuota should run for any directory + MountTable mountTable1 = MountTable.newInstance("/setquotanmt", + Collections.singletonMap("ns0", "/testdir16")); + + addMountTable(mountTable1); + + // Add a directory not present in mount table. + nnFs1.mkdirs(new Path("/testdir16/testdir17")); + + routerContext.getRouter().getRpcServer().setQuota("/setquotanmt/testdir17", + nsQuota, ssQuota, null); + + RouterQuotaUpdateService updateService = routerContext.getRouter() + .getQuotaCacheUpdateService(); + // ensure setQuota RPC call was invoked + updateService.periodicInvoke(); + + ClientProtocol client1 = nnContext1.getClient().getNamenode(); + final QuotaUsage quota1 = client1.getQuotaUsage("/testdir16/testdir17"); + + assertEquals(nsQuota, quota1.getQuota()); + assertEquals(ssQuota, quota1.getSpaceQuota()); + } +} From 8b9b58b58ac91599ada33e6501995d06d8758a3f Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Fri, 25 Jan 2019 11:28:48 +0530 Subject: [PATCH 0292/1308] HDFS-14223. RBF: Add configuration documents for using multiple sub-clusters. Contributed by Takanobu Asanuma. --- .../hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml | 3 ++- .../hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml index 20ae778ef1d46..afe3ad155b827 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml @@ -275,7 +275,8 @@ dfs.federation.router.file.resolver.client.class org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver - Class to resolve files to subclusters. + Class to resolve files to subclusters. To enable multiple subclusters for a mount point, + set to org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index bcf8fa9c31cc9..2ae0c2bed0cb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -404,7 +404,7 @@ Forwarding client requests to the right subcluster. | Property | Default | Description| |:---- |:---- |:---- | -| dfs.federation.router.file.resolver.client.class | `org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver` | Class to resolve files to subclusters. | +| dfs.federation.router.file.resolver.client.class | `org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver` | Class to resolve files to subclusters. To enable multiple subclusters for a mount point, set to org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver. | | dfs.federation.router.namenode.resolver.client.class | `org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver` | Class to resolve the namenode for a subcluster. | ### Namenode monitoring From acdf911c014e6820866f3451c7ae09163119337c Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Mon, 28 Jan 2019 09:03:32 +0530 Subject: [PATCH 0293/1308] HDFS-14224. RBF: NPE in getContentSummary() for getEcPolicy() in case of multiple destinations. Contributed by Ayush Saxena. --- .../federation/router/RouterClientProtocol.java | 7 +++++++ .../router/TestRouterRpcMultiDestination.java | 16 ++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 9a29c03975557..2a8714601138c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -1635,6 +1635,7 @@ private ContentSummary aggregateContentSummary( long quota = 0; long spaceConsumed = 0; long spaceQuota = 0; + String ecPolicy = ""; for (ContentSummary summary : summaries) { length += summary.getLength(); @@ -1643,6 +1644,11 @@ private ContentSummary aggregateContentSummary( quota += summary.getQuota(); spaceConsumed += summary.getSpaceConsumed(); spaceQuota += summary.getSpaceQuota(); + // We return from the first response as we assume that the EC policy + // of each sub-cluster is same. + if (ecPolicy.isEmpty()) { + ecPolicy = summary.getErasureCodingPolicy(); + } } ContentSummary ret = new ContentSummary.Builder() @@ -1652,6 +1658,7 @@ private ContentSummary aggregateContentSummary( .quota(quota) .spaceConsumed(spaceConsumed) .spaceQuota(spaceQuota) + .erasureCodingPolicy(ecPolicy) .build(); return ret; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 31017480b4811..3d941bbf4b907 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -229,6 +230,21 @@ public void testProxyRenameFiles() throws IOException, InterruptedException { testRename2(getRouterContext(), filename1, renamedFile, false); } + @Test + public void testGetContentSummaryEc() throws Exception { + DistributedFileSystem routerDFS = + (DistributedFileSystem) getRouterFileSystem(); + Path dir = new Path("/"); + String expectedECPolicy = "RS-6-3-1024k"; + try { + routerDFS.setErasureCodingPolicy(dir, expectedECPolicy); + assertEquals(expectedECPolicy, + routerDFS.getContentSummary(dir).getErasureCodingPolicy()); + } finally { + routerDFS.unsetErasureCodingPolicy(dir); + } + } + @Test public void testSubclusterDown() throws Exception { final int totalFiles = 6; From 9eed3a49dfaba20cb1415d0712d60d17f613e19a Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 28 Jan 2019 10:04:24 -0800 Subject: [PATCH 0294/1308] HDFS-14215. RBF: Remove dependency on availability of default namespace. Contributed by Ayush Saxena. --- .../router/RouterClientProtocol.java | 3 +- .../router/RouterNamenodeProtocol.java | 20 +--- .../federation/router/RouterRpcServer.java | 23 ++++ .../router/RouterStoragePolicy.java | 7 +- .../hdfs/server/federation/MockResolver.java | 12 ++ .../federation/router/TestRouterRpc.java | 109 ++++++++++++++++-- 6 files changed, 139 insertions(+), 35 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 2a8714601138c..6652cb26d43f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -201,8 +201,7 @@ public FsServerDefaults getServerDefaults() throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ); RemoteMethod method = new RemoteMethod("getServerDefaults"); - String ns = subclusterResolver.getDefaultNamespace(); - return (FsServerDefaults) rpcClient.invokeSingle(ns, method); + return rpcServer.invokeAtAvailableNs(method, FsServerDefaults.class); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java index bf0db6e7253df..c6b020977d56b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; -import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; @@ -45,14 +44,11 @@ public class RouterNamenodeProtocol implements NamenodeProtocol { private final RouterRpcServer rpcServer; /** RPC clients to connect to the Namenodes. */ private final RouterRpcClient rpcClient; - /** Interface to map global name space to HDFS subcluster name spaces. */ - private final FileSubclusterResolver subclusterResolver; public RouterNamenodeProtocol(RouterRpcServer server) { this.rpcServer = server; this.rpcClient = this.rpcServer.getRPCClient(); - this.subclusterResolver = this.rpcServer.getSubclusterResolver(); } @Override @@ -94,33 +90,27 @@ public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size, public ExportedBlockKeys getBlockKeys() throws IOException { rpcServer.checkOperation(OperationCategory.READ); - // We return the information from the default name space - String defaultNsId = subclusterResolver.getDefaultNamespace(); RemoteMethod method = new RemoteMethod(NamenodeProtocol.class, "getBlockKeys"); - return rpcClient.invokeSingle(defaultNsId, method, ExportedBlockKeys.class); + return rpcServer.invokeAtAvailableNs(method, ExportedBlockKeys.class); } @Override public long getTransactionID() throws IOException { rpcServer.checkOperation(OperationCategory.READ); - // We return the information from the default name space - String defaultNsId = subclusterResolver.getDefaultNamespace(); RemoteMethod method = new RemoteMethod(NamenodeProtocol.class, "getTransactionID"); - return rpcClient.invokeSingle(defaultNsId, method, long.class); + return rpcServer.invokeAtAvailableNs(method, long.class); } @Override public long getMostRecentCheckpointTxId() throws IOException { rpcServer.checkOperation(OperationCategory.READ); - // We return the information from the default name space - String defaultNsId = subclusterResolver.getDefaultNamespace(); RemoteMethod method = new RemoteMethod(NamenodeProtocol.class, "getMostRecentCheckpointTxId"); - return rpcClient.invokeSingle(defaultNsId, method, long.class); + return rpcServer.invokeAtAvailableNs(method, long.class); } @Override @@ -133,11 +123,9 @@ public CheckpointSignature rollEditLog() throws IOException { public NamespaceInfo versionRequest() throws IOException { rpcServer.checkOperation(OperationCategory.READ); - // We return the information from the default name space - String defaultNsId = subclusterResolver.getDefaultNamespace(); RemoteMethod method = new RemoteMethod(NamenodeProtocol.class, "versionRequest"); - return rpcClient.invokeSingle(defaultNsId, method, NamespaceInfo.class); + return rpcServer.invokeAtAvailableNs(method, NamespaceInfo.class); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 0d4f94c5a20db..be6a9b03c9ee3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -479,6 +479,29 @@ static String getMethodName() { return methodName; } + /** + * Invokes the method at default namespace, if default namespace is not + * available then at the first available namespace. + * @param expected return type. + * @param method the remote method. + * @return the response received after invoking method. + * @throws IOException + */ + T invokeAtAvailableNs(RemoteMethod method, Class clazz) + throws IOException { + String nsId = subclusterResolver.getDefaultNamespace(); + if (!nsId.isEmpty()) { + return rpcClient.invokeSingle(nsId, method, clazz); + } + // If default Ns is not present return result from first namespace. + Set nss = namenodeResolver.getNamespaces(); + if (nss.isEmpty()) { + throw new IOException("No namespace availaible."); + } + nsId = nss.iterator().next().getNameserviceId(); + return rpcClient.invokeSingle(nsId, method, clazz); + } + @Override // ClientProtocol public Token getDelegationToken(Text renewer) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java index 7145940cca4d4..8a55b9a6fd427 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.federation.router; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; -import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -36,13 +35,10 @@ public class RouterStoragePolicy { private final RouterRpcServer rpcServer; /** RPC clients to connect to the Namenodes. */ private final RouterRpcClient rpcClient; - /** Interface to map global name space to HDFS subcluster name spaces. */ - private final FileSubclusterResolver subclusterResolver; public RouterStoragePolicy(RouterRpcServer server) { this.rpcServer = server; this.rpcClient = this.rpcServer.getRPCClient(); - this.subclusterResolver = this.rpcServer.getSubclusterResolver(); } public void setStoragePolicy(String src, String policyName) @@ -61,8 +57,7 @@ public BlockStoragePolicy[] getStoragePolicies() throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ); RemoteMethod method = new RemoteMethod("getStoragePolicies"); - String ns = subclusterResolver.getDefaultNamespace(); - return (BlockStoragePolicy[]) rpcClient.invokeSingle(ns, method); + return rpcServer.invokeAtAvailableNs(method, BlockStoragePolicy[].class); } public void unsetStoragePolicy(String src) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java index 9bff00732ee91..cdeab46938b37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java @@ -57,6 +57,7 @@ public class MockResolver private Map> locations = new HashMap<>(); private Set namespaces = new HashSet<>(); private String defaultNamespace = null; + private boolean disableDefaultNamespace = false; public MockResolver() { this.cleanRegistrations(); @@ -322,8 +323,19 @@ public List getMountPoints(String path) throws IOException { public void setRouterId(String router) { } + /** + * Mocks the availability of default namespace. + * @param b if true default namespace is unset. + */ + public void setDisableNamespace(boolean b) { + this.disableDefaultNamespace = b; + } + @Override public String getDefaultNamespace() { + if (disableDefaultNamespace) { + return ""; + } return defaultNamespace; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index 760d755cb8f81..2d26e1142e72c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -56,6 +56,7 @@ import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; @@ -828,6 +829,40 @@ public void testProxyGetAndUnsetStoragePolicy() throws Exception { assertEquals(nnPolicy.getId(), policy.getId()); } + @Test + public void testListStoragePolicies() throws IOException, URISyntaxException { + MockResolver resolver = + (MockResolver) router.getRouter().getSubclusterResolver(); + try { + // Check with default namespace specified. + BlockStoragePolicy[] policies = namenode.getClient().getStoragePolicies(); + assertArrayEquals(policies, routerProtocol.getStoragePolicies()); + // Check with default namespace unspecified. + resolver.setDisableNamespace(true); + assertArrayEquals(policies, routerProtocol.getStoragePolicies()); + } finally { + resolver.setDisableNamespace(false); + } + } + + @Test + public void testGetServerDefaults() throws IOException, URISyntaxException { + MockResolver resolver = + (MockResolver) router.getRouter().getSubclusterResolver(); + try { + // Check with default namespace specified. + FsServerDefaults defaults = namenode.getClient().getServerDefaults(); + assertEquals(defaults.getBlockSize(), + routerProtocol.getServerDefaults().getBlockSize()); + // Check with default namespace unspecified. + resolver.setDisableNamespace(true); + assertEquals(defaults.getBlockSize(), + routerProtocol.getServerDefaults().getBlockSize()); + } finally { + resolver.setDisableNamespace(false); + } + } + @Test public void testProxyGetPreferedBlockSize() throws Exception { @@ -1012,8 +1047,23 @@ public void testProxyGetFileInfoAcessException() throws IOException { @Test public void testProxyVersionRequest() throws Exception { - NamespaceInfo rVersion = routerNamenodeProtocol.versionRequest(); - NamespaceInfo nnVersion = nnNamenodeProtocol.versionRequest(); + MockResolver resolver = + (MockResolver) router.getRouter().getSubclusterResolver(); + try { + // Check with default namespace specified. + NamespaceInfo rVersion = routerNamenodeProtocol.versionRequest(); + NamespaceInfo nnVersion = nnNamenodeProtocol.versionRequest(); + compareVersion(rVersion, nnVersion); + // Check with default namespace unspecified. + resolver.setDisableNamespace(true); + rVersion = routerNamenodeProtocol.versionRequest(); + compareVersion(rVersion, nnVersion); + } finally { + resolver.setDisableNamespace(false); + } + } + + private void compareVersion(NamespaceInfo rVersion, NamespaceInfo nnVersion) { assertEquals(nnVersion.getBlockPoolID(), rVersion.getBlockPoolID()); assertEquals(nnVersion.getNamespaceID(), rVersion.getNamespaceID()); assertEquals(nnVersion.getClusterID(), rVersion.getClusterID()); @@ -1023,8 +1073,24 @@ public void testProxyVersionRequest() throws Exception { @Test public void testProxyGetBlockKeys() throws Exception { - ExportedBlockKeys rKeys = routerNamenodeProtocol.getBlockKeys(); - ExportedBlockKeys nnKeys = nnNamenodeProtocol.getBlockKeys(); + MockResolver resolver = + (MockResolver) router.getRouter().getSubclusterResolver(); + try { + // Check with default namespace specified. + ExportedBlockKeys rKeys = routerNamenodeProtocol.getBlockKeys(); + ExportedBlockKeys nnKeys = nnNamenodeProtocol.getBlockKeys(); + compareBlockKeys(rKeys, nnKeys); + // Check with default namespace unspecified. + resolver.setDisableNamespace(true); + rKeys = routerNamenodeProtocol.getBlockKeys(); + compareBlockKeys(rKeys, nnKeys); + } finally { + resolver.setDisableNamespace(false); + } + } + + private void compareBlockKeys(ExportedBlockKeys rKeys, + ExportedBlockKeys nnKeys) { assertEquals(nnKeys.getCurrentKey(), rKeys.getCurrentKey()); assertEquals(nnKeys.getKeyUpdateInterval(), rKeys.getKeyUpdateInterval()); assertEquals(nnKeys.getTokenLifetime(), rKeys.getTokenLifetime()); @@ -1054,17 +1120,38 @@ public void testProxyGetBlocks() throws Exception { @Test public void testProxyGetTransactionID() throws IOException { - long routerTransactionID = routerNamenodeProtocol.getTransactionID(); - long nnTransactionID = nnNamenodeProtocol.getTransactionID(); - assertEquals(nnTransactionID, routerTransactionID); + MockResolver resolver = + (MockResolver) router.getRouter().getSubclusterResolver(); + try { + // Check with default namespace specified. + long routerTransactionID = routerNamenodeProtocol.getTransactionID(); + long nnTransactionID = nnNamenodeProtocol.getTransactionID(); + assertEquals(nnTransactionID, routerTransactionID); + // Check with default namespace unspecified. + resolver.setDisableNamespace(true); + routerTransactionID = routerNamenodeProtocol.getTransactionID(); + assertEquals(nnTransactionID, routerTransactionID); + } finally { + resolver.setDisableNamespace(false); + } } @Test public void testProxyGetMostRecentCheckpointTxId() throws IOException { - long routerCheckPointId = - routerNamenodeProtocol.getMostRecentCheckpointTxId(); - long nnCheckPointId = nnNamenodeProtocol.getMostRecentCheckpointTxId(); - assertEquals(nnCheckPointId, routerCheckPointId); + MockResolver resolver = + (MockResolver) router.getRouter().getSubclusterResolver(); + try { + // Check with default namespace specified. + long routerCheckPointId = + routerNamenodeProtocol.getMostRecentCheckpointTxId(); + long nnCheckPointId = nnNamenodeProtocol.getMostRecentCheckpointTxId(); + assertEquals(nnCheckPointId, routerCheckPointId); + // Check with default namespace unspecified. + resolver.setDisableNamespace(true); + routerCheckPointId = routerNamenodeProtocol.getMostRecentCheckpointTxId(); + } finally { + resolver.setDisableNamespace(false); + } } @Test From 559cb11551d7045015aa60afe7e7998fe0b2ef13 Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Tue, 5 Feb 2019 06:06:05 +0900 Subject: [PATCH 0295/1308] HDFS-13404. RBF: TestRouterWebHDFSContractAppend.testRenameFileBeingAppended fails. --- .../hadoop/fs/contract/AbstractContractAppendTest.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java index d61b635449892..02a899613c644 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java @@ -133,6 +133,12 @@ public void testRenameFileBeingAppended() throws Throwable { assertPathExists("original file does not exist", target); byte[] dataset = dataset(256, 'a', 'z'); FSDataOutputStream outputStream = getFileSystem().append(target); + if (isSupported(CREATE_VISIBILITY_DELAYED)) { + // Some filesystems like WebHDFS doesn't assure sequential consistency. + // In such a case, delay is needed. Given that we can not check the lease + // because here is closed in client side package, simply add a sleep. + Thread.sleep(10); + } outputStream.write(dataset); Path renamed = new Path(testPath, "renamed"); rename(target, renamed); From 9c4e55685be4eac5318fdc7fa2e857a57c02a7cb Mon Sep 17 00:00:00 2001 From: Surendra Singh Lilhore Date: Tue, 5 Feb 2019 10:03:04 +0530 Subject: [PATCH 0296/1308] HDFS-14225. RBF : MiniRouterDFSCluster should configure the failover proxy provider for namespace. Contributed by Ranith Sardar. --- .../hadoop/hdfs/server/federation/MiniRouterDFSCluster.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java index 2df883cff6594..f0bf271832446 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java @@ -78,6 +78,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf; import org.apache.hadoop.hdfs.MiniDFSNNTopology.NSConf; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo; @@ -87,6 +88,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RouterClient; import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.net.NetUtils; @@ -489,6 +491,9 @@ public Configuration generateNamenodeConfiguration(String nsId) { "0.0.0.0"); conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY + "." + suffix, "127.0.0.1:" + context.httpsPort); + conf.set( + HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + ns, + ConfiguredFailoverProxyProvider.class.getName()); // If the service port is enabled by default, we need to set them up boolean servicePortEnabled = false; From 912b90f91e11a183e47d9271dec089e25f159dee Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Tue, 5 Feb 2019 10:40:28 -0800 Subject: [PATCH 0297/1308] HDFS-14252. RBF : Exceptions are exposing the actual sub cluster path. Contributed by Ayush Saxena. --- .../federation/router/RouterRpcClient.java | 13 ++++-- .../router/TestRouterMountTable.java | 41 ++++++++++++------- 2 files changed, 36 insertions(+), 18 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index 0b153339428e4..f5985ee81adb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -1042,10 +1042,15 @@ public Map invokeConcurrent( String ns = location.getNameserviceId(); final List namenodes = getNamenodesForNameservice(ns); - Class proto = method.getProtocol(); - Object[] paramList = method.getParams(location); - Object result = invokeMethod(ugi, namenodes, proto, m, paramList); - return Collections.singletonMap(location, (R) result); + try { + Class proto = method.getProtocol(); + Object[] paramList = method.getParams(location); + Object result = invokeMethod(ugi, namenodes, proto, m, paramList); + return Collections.singletonMap(location, (R) result); + } catch (IOException ioe) { + // Localize the exception + throw processException(ioe, location); + } } List orderedLocations = new LinkedList<>(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java index 9538d7117060e..4f6f702d9a1fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.Collections; import java.util.HashMap; @@ -43,12 +44,14 @@ import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.Time; import org.junit.After; import org.junit.AfterClass; @@ -69,6 +72,7 @@ public class TestRouterMountTable { private static long startTime; private static FileSystem nnFs0; private static FileSystem nnFs1; + private static FileSystem routerFs; @BeforeClass public static void globalSetUp() throws Exception { @@ -92,6 +96,7 @@ public static void globalSetUp() throws Exception { nnFs0 = nnContext0.getFileSystem(); nnFs1 = nnContext1.getFileSystem(); routerContext = cluster.getRandomRouter(); + routerFs = routerContext.getFileSystem(); Router router = routerContext.getRouter(); routerProtocol = routerContext.getClient().getNamenode(); mountTable = (MountTableResolver) router.getSubclusterResolver(); @@ -136,7 +141,6 @@ public void testReadOnly() throws Exception { assertTrue(addMountTable(regularEntry)); // Create a folder which should show in all locations - final FileSystem routerFs = routerContext.getFileSystem(); assertTrue(routerFs.mkdirs(new Path("/regular/newdir"))); FileStatus dirStatusNn = @@ -261,7 +265,7 @@ public void testMountTablePermissionsNoDest() throws IOException { addEntry.setOwnerName("owner1"); addEntry.setMode(FsPermission.createImmutable((short) 0775)); assertTrue(addMountTable(addEntry)); - FileStatus[] list = routerContext.getFileSystem().listStatus(new Path("/")); + FileStatus[] list = routerFs.listStatus(new Path("/")); assertEquals("group1", list[0].getGroup()); assertEquals("owner1", list[0].getOwner()); assertEquals((short) 0775, list[0].getPermission().toShort()); @@ -282,8 +286,7 @@ public void testMountTablePermissionsWithDest() throws IOException { nnFs0.setOwner(new Path("/tmp/testdir"), "Aowner", "Agroup"); nnFs0.setPermission(new Path("/tmp/testdir"), FsPermission.createImmutable((short) 775)); - FileStatus[] list = - routerContext.getFileSystem().listStatus(new Path("/")); + FileStatus[] list = routerFs.listStatus(new Path("/")); assertEquals("Agroup", list[0].getGroup()); assertEquals("Aowner", list[0].getOwner()); assertEquals((short) 775, list[0].getPermission().toShort()); @@ -313,8 +316,7 @@ public void testMountTablePermissionsMultiDest() throws IOException { nnFs1.setOwner(new Path("/tmp/testdir01"), "Aowner", "Agroup"); nnFs1.setPermission(new Path("/tmp/testdir01"), FsPermission.createImmutable((short) 775)); - FileStatus[] list = - routerContext.getFileSystem().listStatus(new Path("/")); + FileStatus[] list = routerFs.listStatus(new Path("/")); assertEquals("Agroup", list[0].getGroup()); assertEquals("Aowner", list[0].getOwner()); assertEquals((short) 775, list[0].getPermission().toShort()); @@ -347,8 +349,7 @@ public void testMountTablePermissionsMultiDestDifferentPerm() nnFs1.setOwner(new Path("/tmp/testdir01"), "Aowner01", "Agroup01"); nnFs1.setPermission(new Path("/tmp/testdir01"), FsPermission.createImmutable((short) 755)); - FileStatus[] list = - routerContext.getFileSystem().listStatus(new Path("/")); + FileStatus[] list = routerFs.listStatus(new Path("/")); assertTrue("Agroup".equals(list[0].getGroup()) || "Agroup01".equals(list[0].getGroup())); assertTrue("Aowner".equals(list[0].getOwner()) @@ -374,8 +375,7 @@ public void testMountPointResolved() throws IOException { addEntry.setOwnerName("owner1"); assertTrue(addMountTable(addEntry)); HdfsFileStatus finfo = routerProtocol.getFileInfo("/testdir"); - FileStatus[] finfo1 = - routerContext.getFileSystem().listStatus(new Path("/")); + FileStatus[] finfo1 = routerFs.listStatus(new Path("/")); assertEquals("owner1", finfo.getOwner()); assertEquals("owner1", finfo1[0].getOwner()); assertEquals("group1", finfo.getGroup()); @@ -395,8 +395,7 @@ public void testMountPointChildren() throws IOException { nnFs0.mkdirs(new Path("/tmp/testdir")); nnFs0.mkdirs(new Path("/tmp/testdir/1")); nnFs0.mkdirs(new Path("/tmp/testdir/2")); - FileStatus[] finfo1 = - routerContext.getFileSystem().listStatus(new Path("/")); + FileStatus[] finfo1 = routerFs.listStatus(new Path("/")); assertEquals(2, ((HdfsFileStatus) finfo1[0]).getChildrenNum()); } finally { nnFs0.delete(new Path("/tmp"), true); @@ -421,12 +420,26 @@ public void testMountPointChildrenMultiDest() throws IOException { nnFs1.mkdirs(new Path("/tmp/testdir01")); nnFs0.mkdirs(new Path("/tmp/testdir/1")); nnFs1.mkdirs(new Path("/tmp/testdir01/1")); - FileStatus[] finfo1 = - routerContext.getFileSystem().listStatus(new Path("/")); + FileStatus[] finfo1 = routerFs.listStatus(new Path("/")); assertEquals(2, ((HdfsFileStatus) finfo1[0]).getChildrenNum()); } finally { nnFs0.delete(new Path("/tmp"), true); nnFs0.delete(new Path("/tmp"), true); } } + + /** + * Validates the path in the exception. The path should be with respect to the + * mount not with respect to the sub cluster. + */ + @Test + public void testPathInException() throws Exception { + MountTable addEntry = MountTable.newInstance("/mount", + Collections.singletonMap("ns0", "/tmp/testdir")); + addEntry.setDestOrder(DestinationOrder.HASH_ALL); + assertTrue(addMountTable(addEntry)); + LambdaTestUtils.intercept(FileNotFoundException.class, + "Directory/File does not exist /mount/file", + () -> routerFs.setOwner(new Path("/mount/file"), "user", "group")); + } } \ No newline at end of file From 7e63e37dc5cbe330082a6a42598ffb76e0770fc1 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Tue, 12 Feb 2019 10:44:02 -0800 Subject: [PATCH 0298/1308] HDFS-14230. RBF: Throw RetriableException instead of IOException when no namenodes available. Contributed by Fei Hui. --- .../metrics/FederationRPCMBean.java | 2 + .../metrics/FederationRPCMetrics.java | 11 +++ .../FederationRPCPerformanceMonitor.java | 5 ++ .../router/NoNamenodesAvailableException.java | 33 +++++++ .../federation/router/RouterRpcClient.java | 16 +++- .../federation/router/RouterRpcMonitor.java | 5 ++ .../federation/FederationTestUtils.java | 38 ++++++++ .../TestRouterClientRejectOverload.java | 86 +++++++++++++++++-- .../router/TestRouterRPCClientRetries.java | 2 +- 9 files changed, 188 insertions(+), 10 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NoNamenodesAvailableException.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java index 973c3983f55ac..76b3ca64044d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java @@ -46,6 +46,8 @@ public interface FederationRPCMBean { long getProxyOpRetries(); + long getProxyOpNoNamenodes(); + long getRouterFailureStateStoreOps(); long getRouterFailureReadOnlyOps(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java index cce4b86ce1f28..8e57c6b380922 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java @@ -60,6 +60,8 @@ public class FederationRPCMetrics implements FederationRPCMBean { private MutableCounterLong proxyOpNotImplemented; @Metric("Number of operation retries") private MutableCounterLong proxyOpRetries; + @Metric("Number of operations to hit no namenodes available") + private MutableCounterLong proxyOpNoNamenodes; @Metric("Failed requests due to State Store unavailable") private MutableCounterLong routerFailureStateStore; @@ -138,6 +140,15 @@ public long getProxyOpRetries() { return proxyOpRetries.value(); } + public void incrProxyOpNoNamenodes() { + proxyOpNoNamenodes.incr(); + } + + @Override + public long getProxyOpNoNamenodes() { + return proxyOpNoNamenodes.value(); + } + public void incrRouterFailureStateStore() { routerFailureStateStore.incr(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java index 15725d14ce916..cbd63de5d2e6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java @@ -170,6 +170,11 @@ public void proxyOpRetries() { metrics.incrProxyOpRetries(); } + @Override + public void proxyOpNoNamenodes() { + metrics.incrProxyOpNoNamenodes(); + } + @Override public void routerFailureStateStore() { metrics.incrRouterFailureStateStore(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NoNamenodesAvailableException.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NoNamenodesAvailableException.java new file mode 100644 index 0000000000000..7eabf00356f66 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NoNamenodesAvailableException.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import java.io.IOException; + + +/** + * Exception when no namenodes are available. + */ +public class NoNamenodesAvailableException extends IOException { + + private static final long serialVersionUID = 1L; + + public NoNamenodesAvailableException(String nsId, IOException ioe) { + super("No namenodes available under nameservice " + nsId, ioe); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index f5985ee81adb0..d21bde3d67912 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -61,6 +61,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; @@ -302,8 +303,8 @@ private static IOException toIOException(Exception e) { * @param retryCount Number of retries. * @param nsId Nameservice ID. * @return Retry decision. - * @throws IOException Original exception if the retry policy generates one - * or IOException for no available namenodes. + * @throws NoNamenodesAvailableException Exception that the retry policy + * generates for no available namenodes. */ private RetryDecision shouldRetry(final IOException ioe, final int retryCount, final String nsId) throws IOException { @@ -313,8 +314,7 @@ private RetryDecision shouldRetry(final IOException ioe, final int retryCount, if (retryCount == 0) { return RetryDecision.RETRY; } else { - throw new IOException("No namenode available under nameservice " + nsId, - ioe); + throw new NoNamenodesAvailableException(nsId, ioe); } } @@ -405,6 +405,14 @@ private Object invokeMethod( StandbyException se = new StandbyException(ioe.getMessage()); se.initCause(ioe); throw se; + } else if (ioe instanceof NoNamenodesAvailableException) { + if (this.rpcMonitor != null) { + this.rpcMonitor.proxyOpNoNamenodes(); + } + LOG.error("Can not get available namenode for {} {} error: {}", + nsId, rpcAddress, ioe.getMessage()); + // Throw RetriableException so that client can retry + throw new RetriableException(ioe); } else { // Other communication error, this is a failure // Communication retries are handled by the retry policy diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java index 7af71af079252..5a2adb9e54e41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java @@ -92,6 +92,11 @@ void init( */ void proxyOpRetries(); + /** + * Failed to proxy an operation because of no namenodes available. + */ + void proxyOpNoNamenodes(); + /** * If the Router cannot contact the State Store in an operation. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java index d92edac35b6df..54342240f1532 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java @@ -48,6 +48,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; @@ -374,4 +375,41 @@ public Object answer(InvocationOnMock invocation) throws Throwable { Whitebox.setInternalState(rpcClient, "connectionManager", spyConnectionManager); } + + /** + * Switch namenodes of all hdfs name services to standby. + * @param cluster a federated HDFS cluster + */ + public static void transitionClusterNSToStandby( + StateStoreDFSCluster cluster) { + // Name services of the cluster + List nameServiceList = cluster.getNameservices(); + + // Change namenodes of each name service to standby + for (String nameService : nameServiceList) { + List nnList = cluster.getNamenodes(nameService); + for(NamenodeContext namenodeContext : nnList) { + cluster.switchToStandby(nameService, namenodeContext.getNamenodeId()); + } + } + } + + /** + * Switch the index namenode of all hdfs name services to active. + * @param cluster a federated HDFS cluster + * @param index the index of namenodes + */ + public static void transitionClusterNSToActive( + StateStoreDFSCluster cluster, int index) { + // Name services of the cluster + List nameServiceList = cluster.getNameservices(); + + // Change the index namenode of each name service to active + for (String nameService : nameServiceList) { + List listNamenodeContext = + cluster.getNamenodes(nameService); + cluster.switchToActive(nameService, + listNamenodeContext.get(index).getNamenodeId()); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java index 066415935191c..14bd7b0a400d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java @@ -19,6 +19,8 @@ import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.simulateSlowNamenode; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.simulateThrowExceptionRouterRpcServer; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.transitionClusterNSToStandby; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.transitionClusterNSToActive; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -27,6 +29,7 @@ import java.io.IOException; import java.net.URI; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -46,7 +49,9 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.StandbyException; import org.junit.After; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,14 +76,19 @@ public void cleanup() { } } - private void setupCluster(boolean overloadControl) throws Exception { + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + private void setupCluster(boolean overloadControl, boolean ha) + throws Exception { // Build and start a federated cluster - cluster = new StateStoreDFSCluster(false, 2); + cluster = new StateStoreDFSCluster(ha, 2); Configuration routerConf = new RouterConfigBuilder() .stateStore() .metrics() .admin() .rpc() + .heartbeat() .build(); // Reduce the number of RPC clients threads to overload the Router easy @@ -98,7 +108,7 @@ private void setupCluster(boolean overloadControl) throws Exception { @Test public void testWithoutOverloadControl() throws Exception { - setupCluster(false); + setupCluster(false, false); // Nobody should get overloaded testOverloaded(0); @@ -121,7 +131,7 @@ public void testWithoutOverloadControl() throws Exception { @Test public void testOverloadControl() throws Exception { - setupCluster(true); + setupCluster(true, false); List routers = cluster.getRouters(); FederationRPCMetrics rpcMetrics0 = @@ -244,7 +254,7 @@ public void run() { @Test public void testConnectionNullException() throws Exception { - setupCluster(false); + setupCluster(false, false); // Choose 1st router RouterContext routerContext = cluster.getRouters().get(0); @@ -280,4 +290,70 @@ public void testConnectionNullException() throws Exception { assertEquals(originalRouter1Failures, rpcMetrics1.getProxyOpFailureCommunicate()); } + + /** + * When failover occurs, no namenodes are available within a short time. + * Client will success after some retries. + */ + @Test + public void testNoNamenodesAvailable() throws Exception{ + setupCluster(false, true); + + transitionClusterNSToStandby(cluster); + + Configuration conf = cluster.getRouterClientConf(); + // Set dfs.client.failover.random.order false, to pick 1st router at first + conf.setBoolean("dfs.client.failover.random.order", false); + + // Retries is 3 (see FailoverOnNetworkExceptionRetry#shouldRetry, will fail + // when reties > max.attempts), so total access is 4. + conf.setInt("dfs.client.retry.max.attempts", 2); + DFSClient routerClient = new DFSClient(new URI("hdfs://fed"), conf); + + // Get router0 metrics + FederationRPCMetrics rpcMetrics0 = cluster.getRouters().get(0) + .getRouter().getRpcServer().getRPCMetrics(); + // Get router1 metrics + FederationRPCMetrics rpcMetrics1 = cluster.getRouters().get(1) + .getRouter().getRpcServer().getRPCMetrics(); + + // Original failures + long originalRouter0Failures = rpcMetrics0.getProxyOpNoNamenodes(); + long originalRouter1Failures = rpcMetrics1.getProxyOpNoNamenodes(); + + // GetFileInfo will throw Exception + String exceptionMessage = "org.apache.hadoop.hdfs.server.federation." + + "router.NoNamenodesAvailableException: No namenodes available " + + "under nameservice ns0"; + exceptionRule.expect(RemoteException.class); + exceptionRule.expectMessage(exceptionMessage); + routerClient.getFileInfo("/"); + + // Router 0 failures will increase + assertEquals(originalRouter0Failures + 4, + rpcMetrics0.getProxyOpNoNamenodes()); + // Router 1 failures do not change + assertEquals(originalRouter1Failures, + rpcMetrics1.getProxyOpNoNamenodes()); + + // Make name services available + transitionClusterNSToActive(cluster, 0); + for (RouterContext routerContext : cluster.getRouters()) { + // Manually trigger the heartbeat + Collection heartbeatServices = routerContext + .getRouter().getNamenodeHearbeatServices(); + for (NamenodeHeartbeatService service : heartbeatServices) { + service.periodicInvoke(); + } + // Update service cache + routerContext.getRouter().getStateStore().refreshCaches(true); + } + + originalRouter0Failures = rpcMetrics0.getProxyOpNoNamenodes(); + + // RPC call must be successful + routerClient.getFileInfo("/"); + // Router 0 failures do not change + assertEquals(originalRouter0Failures, rpcMetrics0.getProxyOpNoNamenodes()); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java index f84e9a03ee88b..8772e2fe22e56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java @@ -133,7 +133,7 @@ public void testRetryWhenAllNameServiceDown() throws Exception { } catch (RemoteException e) { String ns0 = cluster.getNameservices().get(0); assertExceptionContains( - "No namenode available under nameservice " + ns0, e); + "No namenodes available under nameservice " + ns0, e); } // Verify the retry times, it should only retry one time. From 75f8b6ccfa6160e695ce8f7ad13c6e3624e9e7aa Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Thu, 14 Feb 2019 08:16:45 +0530 Subject: [PATCH 0299/1308] HDFS-13358. RBF: Support for Delegation Token (RPC). Contributed by CR Hota. --- .../federation/router/RBFConfigKeys.java | 9 + .../router/RouterClientProtocol.java | 16 +- .../federation/router/RouterRpcServer.java | 21 +- .../security/RouterSecurityManager.java | 239 ++++++++++++++++++ .../router/security/package-info.java | 28 ++ .../ZKDelegationTokenSecretManagerImpl.java | 56 ++++ .../router/security/token/package-info.java | 29 +++ .../src/main/resources/hdfs-rbf-default.xml | 11 +- .../fs/contract/router/SecurityConfUtil.java | 4 + ...TestRouterHDFSContractDelegationToken.java | 101 ++++++++ .../MockDelegationTokenSecretManager.java | 52 ++++ .../security/TestRouterSecurityManager.java | 93 +++++++ 12 files changed, 652 insertions(+), 7 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/package-info.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/package-info.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/MockDelegationTokenSecretManager.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterSecurityManager.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java index 5e907c8a55e24..657b6cfc12365 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java @@ -28,6 +28,8 @@ import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl; import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; +import org.apache.hadoop.hdfs.server.federation.router.security.token.ZKDelegationTokenSecretManagerImpl; import java.util.concurrent.TimeUnit; @@ -294,4 +296,11 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic { public static final String DFS_ROUTER_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY = FEDERATION_ROUTER_PREFIX + "kerberos.internal.spnego.principal"; + + // HDFS Router secret manager for delegation token + public static final String DFS_ROUTER_DELEGATION_TOKEN_DRIVER_CLASS = + FEDERATION_ROUTER_PREFIX + "secret.manager.class"; + public static final Class + DFS_ROUTER_DELEGATION_TOKEN_DRIVER_CLASS_DEFAULT = + ZKDelegationTokenSecretManagerImpl.class; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 6652cb26d43f4..abab51111c16a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -77,6 +77,7 @@ import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.federation.router.security.RouterSecurityManager; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; @@ -124,6 +125,8 @@ public class RouterClientProtocol implements ClientProtocol { private final ErasureCoding erasureCoding; /** StoragePolicy calls. **/ private final RouterStoragePolicy storagePolicy; + /** Router security manager to handle token operations. */ + private RouterSecurityManager securityManager = null; RouterClientProtocol(Configuration conf, RouterRpcServer rpcServer) { this.rpcServer = rpcServer; @@ -148,13 +151,14 @@ public class RouterClientProtocol implements ClientProtocol { DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT); this.erasureCoding = new ErasureCoding(rpcServer); this.storagePolicy = new RouterStoragePolicy(rpcServer); + this.securityManager = rpcServer.getRouterSecurityManager(); } @Override public Token getDelegationToken(Text renewer) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); - return null; + rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true); + return this.securityManager.getDelegationToken(renewer); } /** @@ -173,14 +177,16 @@ public Token getDelegationToken(Text renewer) @Override public long renewDelegationToken(Token token) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); - return 0; + rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true); + return this.securityManager.renewDelegationToken(token); } @Override public void cancelDelegationToken(Token token) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); + rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true); + this.securityManager.cancelDelegationToken(token); + return; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index be6a9b03c9ee3..a312d4b3a6b0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -114,6 +114,7 @@ import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.hdfs.server.federation.router.security.RouterSecurityManager; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; @@ -197,6 +198,8 @@ public class RouterRpcServer extends AbstractService private final RouterNamenodeProtocol nnProto; /** ClientProtocol calls. */ private final RouterClientProtocol clientProto; + /** Router security manager to handle token operations. */ + private RouterSecurityManager securityManager = null; /** * Construct a router RPC server. @@ -256,6 +259,9 @@ public RouterRpcServer(Configuration configuration, Router router, LOG.info("RPC server binding to {} with {} handlers for Router {}", confRpcAddress, handlerCount, this.router.getRouterId()); + // Create security manager + this.securityManager = new RouterSecurityManager(this.conf); + this.rpcServer = new RPC.Builder(this.conf) .setProtocol(ClientNamenodeProtocolPB.class) .setInstance(clientNNPbService) @@ -265,6 +271,7 @@ public RouterRpcServer(Configuration configuration, Router router, .setnumReaders(readerCount) .setQueueSizePerHandler(handlerQueueSize) .setVerbose(false) + .setSecretManager(this.securityManager.getSecretManager()) .build(); // Add all the RPC protocols that the Router implements @@ -344,9 +351,21 @@ protected void serviceStop() throws Exception { if (rpcMonitor != null) { this.rpcMonitor.close(); } + if (securityManager != null) { + this.securityManager.stop(); + } super.serviceStop(); } + /** + * Get the RPC security manager. + * + * @return RPC security manager. + */ + public RouterSecurityManager getRouterSecurityManager() { + return this.securityManager; + } + /** * Get the RPC client to the Namenode. * @@ -1457,7 +1476,7 @@ private boolean isPathReadOnly(final String path) { * @return Remote user group information. * @throws IOException If we cannot get the user information. */ - static UserGroupInformation getRemoteUser() throws IOException { + public static UserGroupInformation getRemoteUser() throws IOException { UserGroupInformation ugi = Server.getRemoteUser(); return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java new file mode 100644 index 0000000000000..0f0089aab005a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java @@ -0,0 +1,239 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.router.security; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; +import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.apache.hadoop.security.token.SecretManager; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.lang.reflect.Constructor; + +/** + * Manager to hold underlying delegation token secret manager implementations. + */ +public class RouterSecurityManager { + + private static final Logger LOG = + LoggerFactory.getLogger(RouterSecurityManager.class); + + private AbstractDelegationTokenSecretManager + dtSecretManager = null; + + public RouterSecurityManager(Configuration conf) { + this.dtSecretManager = newSecretManager(conf); + } + + @VisibleForTesting + public RouterSecurityManager(AbstractDelegationTokenSecretManager + dtSecretManager) { + this.dtSecretManager = dtSecretManager; + } + + /** + * Creates an instance of a SecretManager from the configuration. + * + * @param conf Configuration that defines the secret manager class. + * @return New secret manager. + */ + public static AbstractDelegationTokenSecretManager + newSecretManager(Configuration conf) { + Class clazz = + conf.getClass( + RBFConfigKeys.DFS_ROUTER_DELEGATION_TOKEN_DRIVER_CLASS, + RBFConfigKeys.DFS_ROUTER_DELEGATION_TOKEN_DRIVER_CLASS_DEFAULT, + AbstractDelegationTokenSecretManager.class); + AbstractDelegationTokenSecretManager secretManager; + try { + Constructor constructor = clazz.getConstructor(Configuration.class); + secretManager = (AbstractDelegationTokenSecretManager) + constructor.newInstance(conf); + LOG.info("Delegation token secret manager object instantiated"); + } catch (ReflectiveOperationException e) { + LOG.error("Could not instantiate: {}", clazz.getSimpleName(), e); + return null; + } catch (RuntimeException e) { + LOG.error("RuntimeException to instantiate: {}", + clazz.getSimpleName(), e); + return null; + } + return secretManager; + } + + public AbstractDelegationTokenSecretManager + getSecretManager() { + return this.dtSecretManager; + } + + public void stop() { + LOG.info("Stopping security manager"); + if(this.dtSecretManager != null) { + this.dtSecretManager.stopThreads(); + } + } + + private static UserGroupInformation getRemoteUser() throws IOException { + return RouterRpcServer.getRemoteUser(); + } + /** + * Returns authentication method used to establish the connection. + * @return AuthenticationMethod used to establish connection. + * @throws IOException + */ + private UserGroupInformation.AuthenticationMethod + getConnectionAuthenticationMethod() throws IOException { + UserGroupInformation ugi = getRemoteUser(); + UserGroupInformation.AuthenticationMethod authMethod + = ugi.getAuthenticationMethod(); + if (authMethod == UserGroupInformation.AuthenticationMethod.PROXY) { + authMethod = ugi.getRealUser().getAuthenticationMethod(); + } + return authMethod; + } + + /** + * + * @return true if delegation token operation is allowed + */ + private boolean isAllowedDelegationTokenOp() throws IOException { + AuthenticationMethod authMethod = getConnectionAuthenticationMethod(); + if (UserGroupInformation.isSecurityEnabled() + && (authMethod != AuthenticationMethod.KERBEROS) + && (authMethod != AuthenticationMethod.KERBEROS_SSL) + && (authMethod != AuthenticationMethod.CERTIFICATE)) { + return false; + } + return true; + } + + /** + * @param renewer Renewer information + * @return delegation token + * @throws IOException on error + */ + public Token getDelegationToken(Text renewer) + throws IOException { + LOG.debug("Generate delegation token with renewer " + renewer); + final String operationName = "getDelegationToken"; + boolean success = false; + String tokenId = ""; + Token token; + try { + if (!isAllowedDelegationTokenOp()) { + throw new IOException( + "Delegation Token can be issued only " + + "with kerberos or web authentication"); + } + if (dtSecretManager == null || !dtSecretManager.isRunning()) { + LOG.warn("trying to get DT with no secret manager running"); + return null; + } + UserGroupInformation ugi = getRemoteUser(); + String user = ugi.getUserName(); + Text owner = new Text(user); + Text realUser = null; + if (ugi.getRealUser() != null) { + realUser = new Text(ugi.getRealUser().getUserName()); + } + DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, + renewer, realUser); + token = new Token( + dtId, dtSecretManager); + tokenId = dtId.toStringStable(); + success = true; + } finally { + logAuditEvent(success, operationName, tokenId); + } + return token; + } + + public long renewDelegationToken(Token token) + throws SecretManager.InvalidToken, IOException { + LOG.debug("Renew delegation token"); + final String operationName = "renewDelegationToken"; + boolean success = false; + String tokenId = ""; + long expiryTime; + try { + if (!isAllowedDelegationTokenOp()) { + throw new IOException( + "Delegation Token can be renewed only " + + "with kerberos or web authentication"); + } + String renewer = getRemoteUser().getShortUserName(); + expiryTime = dtSecretManager.renewToken(token, renewer); + final DelegationTokenIdentifier id = DFSUtil.decodeDelegationToken(token); + tokenId = id.toStringStable(); + success = true; + } catch (AccessControlException ace) { + final DelegationTokenIdentifier id = DFSUtil.decodeDelegationToken(token); + tokenId = id.toStringStable(); + throw ace; + } finally { + logAuditEvent(success, operationName, tokenId); + } + return expiryTime; + } + + public void cancelDelegationToken(Token token) + throws IOException { + LOG.debug("Cancel delegation token"); + final String operationName = "cancelDelegationToken"; + boolean success = false; + String tokenId = ""; + try { + String canceller = getRemoteUser().getUserName(); + LOG.info("Cancel request by " + canceller); + DelegationTokenIdentifier id = + dtSecretManager.cancelToken(token, canceller); + tokenId = id.toStringStable(); + success = true; + } catch (AccessControlException ace) { + final DelegationTokenIdentifier id = DFSUtil.decodeDelegationToken(token); + tokenId = id.toStringStable(); + throw ace; + } finally { + logAuditEvent(success, operationName, tokenId); + } + } + + /** + * Log status of delegation token related operation. + * Extend in future to use audit logger instead of local logging. + */ + void logAuditEvent(boolean succeeded, String cmd, String tokenId) + throws IOException { + LOG.debug( + "Operation:" + cmd + + " Status:" + succeeded + + " TokenId:" + tokenId); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/package-info.java new file mode 100644 index 0000000000000..9dd12ec7513b5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/package-info.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Includes router security manager and token store implementations. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving + +package org.apache.hadoop.hdfs.server.federation.router.security; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java new file mode 100644 index 0000000000000..3da63f80bc2c0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.router.security.token; + +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; +import org.apache.hadoop.security.token.delegation.ZKDelegationTokenSecretManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; + +import java.io.IOException; + +/** + * Zookeeper based router delegation token store implementation. + */ +public class ZKDelegationTokenSecretManagerImpl extends + ZKDelegationTokenSecretManager { + + private static final Logger LOG = + LoggerFactory.getLogger(ZKDelegationTokenSecretManagerImpl.class); + + private Configuration conf = null; + + public ZKDelegationTokenSecretManagerImpl(Configuration conf) { + super(conf); + this.conf = conf; + try { + super.startThreads(); + } catch (IOException e) { + LOG.error("Error starting threads for zkDelegationTokens "); + } + LOG.info("Zookeeper delegation token secret manager instantiated"); + } + + @Override + public DelegationTokenIdentifier createIdentifier() { + return new DelegationTokenIdentifier(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/package-info.java new file mode 100644 index 0000000000000..a51e4552955a5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/package-info.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Includes implementations of token secret managers. + * Implementations should extend {@link AbstractDelegationTokenSecretManager}. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving + +package org.apache.hadoop.hdfs.server.federation.router.security.token; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml index afe3ad155b827..1034c87ff8f20 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml @@ -584,4 +584,13 @@ - \ No newline at end of file + + dfs.federation.router.secret.manager.class + org.apache.hadoop.hdfs.server.federation.router.security.token.ZKDelegationTokenSecretManagerImpl + + Class to implement state store to delegation tokens. + Default implementation uses zookeeper as the backend to store delegation tokens. + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java index 100313e151038..d6ee3c7d71fb5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java @@ -31,6 +31,7 @@ import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DELEGATION_TOKEN_DRIVER_CLASS; import static org.junit.Assert.assertTrue; import java.io.File; @@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl; +import org.apache.hadoop.hdfs.server.federation.security.MockDelegationTokenSecretManager; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.SecurityUtil; @@ -144,6 +146,8 @@ public static Configuration initSecurity() throws Exception { // We need to specify the host to prevent 0.0.0.0 as the host address conf.set(DFS_ROUTER_RPC_BIND_HOST_KEY, "localhost"); + conf.set(DFS_ROUTER_DELEGATION_TOKEN_DRIVER_CLASS, + MockDelegationTokenSecretManager.class.getName()); return conf; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java new file mode 100644 index 0000000000000..e4c03e462e09b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.contract.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.fs.contract.AbstractFSContractTestBase; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.security.token.SecretManager; +import org.apache.hadoop.security.token.Token; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.io.IOException; +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; + +/** + * Test to verify router contracts for delegation token operations. + */ +public class TestRouterHDFSContractDelegationToken + extends AbstractFSContractTestBase { + + @BeforeClass + public static void createCluster() throws Exception { + RouterHDFSContract.createCluster(initSecurity()); + } + + @AfterClass + public static void teardownCluster() throws IOException { + RouterHDFSContract.destroyCluster(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RouterHDFSContract(conf); + } + + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + @Test + public void testRouterDelegationToken() throws Exception { + // Generate delegation token + Token token = + (Token) getFileSystem() + .getDelegationToken("router"); + assertNotNull(token); + // Verify properties of the token + assertEquals("HDFS_DELEGATION_TOKEN", token.getKind().toString()); + DelegationTokenIdentifier identifier = token.decodeIdentifier(); + assertNotNull(identifier); + String owner = identifier.getOwner().toString(); + // Windows will not reverse name lookup "127.0.0.1" to "localhost". + String host = Path.WINDOWS ? "127.0.0.1" : "localhost"; + String expectedOwner = "router/"+ host + "@EXAMPLE.COM"; + assertEquals(expectedOwner, owner); + assertEquals("router", identifier.getRenewer().toString()); + int masterKeyId = identifier.getMasterKeyId(); + assertTrue(masterKeyId > 0); + int sequenceNumber = identifier.getSequenceNumber(); + assertTrue(sequenceNumber > 0); + long existingMaxTime = token.decodeIdentifier().getMaxDate(); + assertTrue(identifier.getMaxDate() >= identifier.getIssueDate()); + + // Renew delegation token + token.renew(initSecurity()); + assertNotNull(token); + assertTrue(token.decodeIdentifier().getMaxDate() >= existingMaxTime); + // Renewal should retain old master key id and sequence number + identifier = token.decodeIdentifier(); + assertEquals(identifier.getMasterKeyId(), masterKeyId); + assertEquals(identifier.getSequenceNumber(), sequenceNumber); + + // Cancel delegation token + token.cancel(initSecurity()); + + // Renew a cancelled token + exceptionRule.expect(SecretManager.InvalidToken.class); + token.renew(initSecurity()); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/MockDelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/MockDelegationTokenSecretManager.java new file mode 100644 index 0000000000000..8f89f0abad5c3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/MockDelegationTokenSecretManager.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.security; + +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; +import org.apache.hadoop.conf.Configuration; +import java.io.IOException; + +/** + * Mock functionality of AbstractDelegationTokenSecretManager. + * for testing + */ +public class MockDelegationTokenSecretManager + extends AbstractDelegationTokenSecretManager { + + public MockDelegationTokenSecretManager( + long delegationKeyUpdateInterval, + long delegationTokenMaxLifetime, + long delegationTokenRenewInterval, + long delegationTokenRemoverScanInterval) { + super(delegationKeyUpdateInterval, delegationTokenMaxLifetime, + delegationTokenRenewInterval, delegationTokenRemoverScanInterval); + } + + public MockDelegationTokenSecretManager(Configuration conf) + throws IOException { + super(100000, 100000, 100000, 100000); + this.startThreads(); + } + + @Override + public DelegationTokenIdentifier createIdentifier() { + return new DelegationTokenIdentifier(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterSecurityManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterSecurityManager.java new file mode 100644 index 0000000000000..fe6e0eea91c31 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterSecurityManager.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.security; + +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.federation.router.security.RouterSecurityManager; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.SecretManager; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; +import org.junit.rules.ExpectedException; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNotNull; + +import java.io.IOException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Test functionality of {@link RouterSecurityManager}, which manages + * delegation tokens for router. + */ +public class TestRouterSecurityManager { + + private static final Logger LOG = + LoggerFactory.getLogger(TestRouterSecurityManager.class); + + private static RouterSecurityManager securityManager = null; + + @BeforeClass + public static void createMockSecretManager() throws IOException { + AbstractDelegationTokenSecretManager + mockDelegationTokenSecretManager = + new MockDelegationTokenSecretManager(100, 100, 100, 100); + mockDelegationTokenSecretManager.startThreads(); + securityManager = + new RouterSecurityManager(mockDelegationTokenSecretManager); + } + + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + @Test + public void testDelegationTokens() throws IOException { + String[] groupsForTesting = new String[1]; + groupsForTesting[0] = "router_group"; + UserGroupInformation.setLoginUser(UserGroupInformation + .createUserForTesting("router", groupsForTesting)); + + // Get a delegation token + Token token = + securityManager.getDelegationToken(new Text("some_renewer")); + assertNotNull(token); + + // Renew the delegation token + UserGroupInformation.setLoginUser(UserGroupInformation + .createUserForTesting("some_renewer", groupsForTesting)); + long updatedExpirationTime = securityManager.renewDelegationToken(token); + assertTrue(updatedExpirationTime >= token.decodeIdentifier().getMaxDate()); + + // Cancel the delegation token + securityManager.cancelDelegationToken(token); + + String exceptionCause = "Renewal request for unknown token"; + exceptionRule.expect(SecretManager.InvalidToken.class); + exceptionRule.expectMessage(exceptionCause); + + // This throws an exception as token has been cancelled. + securityManager.renewDelegationToken(token); + } +} From e2a3c4494ba27a7b82117dac275b9d115aee7f95 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Fri, 15 Feb 2019 09:25:09 -0800 Subject: [PATCH 0300/1308] HDFS-14226. RBF: Setting attributes should set on all subclusters' directories. Contributed by Ayush Saxena. --- .../federation/router/ErasureCoding.java | 12 +- .../router/RouterClientProtocol.java | 55 +-- .../federation/router/RouterRpcServer.java | 46 +- .../router/RouterStoragePolicy.java | 12 +- ...MultipleDestinationMountTableResolver.java | 394 ++++++++++++++++++ 5 files changed, 482 insertions(+), 37 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java index 480b232ca422b..f4584b1afafb5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java @@ -157,7 +157,11 @@ public void setErasureCodingPolicy(String src, String ecPolicyName) RemoteMethod remoteMethod = new RemoteMethod("setErasureCodingPolicy", new Class[] {String.class, String.class}, new RemoteParam(), ecPolicyName); - rpcClient.invokeSequential(locations, remoteMethod, null, null); + if (rpcServer.isInvokeConcurrent(src)) { + rpcClient.invokeConcurrent(locations, remoteMethod); + } else { + rpcClient.invokeSequential(locations, remoteMethod); + } } public void unsetErasureCodingPolicy(String src) throws IOException { @@ -167,7 +171,11 @@ public void unsetErasureCodingPolicy(String src) throws IOException { rpcServer.getLocationsForPath(src, true); RemoteMethod remoteMethod = new RemoteMethod("unsetErasureCodingPolicy", new Class[] {String.class}, new RemoteParam()); - rpcClient.invokeSequential(locations, remoteMethod, null, null); + if (rpcServer.isInvokeConcurrent(src)) { + rpcClient.invokeConcurrent(locations, remoteMethod); + } else { + rpcClient.invokeSequential(locations, remoteMethod); + } } public ECBlockGroupStats getECBlockGroupStats() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index abab51111c16a..757e096960696 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -219,7 +219,7 @@ public HdfsFileStatus create(String src, FsPermission masked, throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.WRITE); - if (createParent && isPathAll(src)) { + if (createParent && rpcServer.isPathAll(src)) { int index = src.lastIndexOf(Path.SEPARATOR); String parent = src.substring(0, index); LOG.debug("Creating {} requires creating parent {}", src, parent); @@ -279,9 +279,13 @@ public boolean setReplication(String src, short replication) RemoteMethod method = new RemoteMethod("setReplication", new Class[] {String.class, short.class}, new RemoteParam(), replication); - Object result = rpcClient.invokeSequential( - locations, method, Boolean.class, Boolean.TRUE); - return (boolean) result; + if (rpcServer.isInvokeConcurrent(src)) { + return !rpcClient.invokeConcurrent(locations, method, Boolean.class) + .containsValue(false); + } else { + return rpcClient.invokeSequential(locations, method, Boolean.class, + Boolean.TRUE); + } } @Override @@ -305,7 +309,7 @@ public void setPermission(String src, FsPermission permissions) RemoteMethod method = new RemoteMethod("setPermission", new Class[] {String.class, FsPermission.class}, new RemoteParam(), permissions); - if (isPathAll(src)) { + if (rpcServer.isInvokeConcurrent(src)) { rpcClient.invokeConcurrent(locations, method); } else { rpcClient.invokeSequential(locations, method); @@ -322,7 +326,7 @@ public void setOwner(String src, String username, String groupname) RemoteMethod method = new RemoteMethod("setOwner", new Class[] {String.class, String.class, String.class}, new RemoteParam(), username, groupname); - if (isPathAll(src)) { + if (rpcServer.isInvokeConcurrent(src)) { rpcClient.invokeConcurrent(locations, method); } else { rpcClient.invokeSequential(locations, method); @@ -555,7 +559,7 @@ public boolean delete(String src, boolean recursive) throws IOException { RemoteMethod method = new RemoteMethod("delete", new Class[] {String.class, boolean.class}, new RemoteParam(), recursive); - if (isPathAll(src)) { + if (rpcServer.isPathAll(src)) { return rpcClient.invokeAll(locations, method); } else { return rpcClient.invokeSequential(locations, method, @@ -575,7 +579,7 @@ public boolean mkdirs(String src, FsPermission masked, boolean createParent) new RemoteParam(), masked, createParent); // Create in all locations - if (isPathAll(src)) { + if (rpcServer.isPathAll(src)) { return rpcClient.invokeAll(locations, method); } @@ -713,7 +717,7 @@ public HdfsFileStatus getFileInfo(String src) throws IOException { HdfsFileStatus ret = null; // If it's a directory, we check in all locations - if (isPathAll(src)) { + if (rpcServer.isPathAll(src)) { ret = getFileInfoAll(locations, method); } else { // Check for file information sequentially @@ -1315,7 +1319,11 @@ public void setXAttr(String src, XAttr xAttr, EnumSet flag) RemoteMethod method = new RemoteMethod("setXAttr", new Class[] {String.class, XAttr.class, EnumSet.class}, new RemoteParam(), xAttr, flag); - rpcClient.invokeSequential(locations, method); + if (rpcServer.isInvokeConcurrent(src)) { + rpcClient.invokeConcurrent(locations, method); + } else { + rpcClient.invokeSequential(locations, method); + } } @SuppressWarnings("unchecked") @@ -1356,7 +1364,11 @@ public void removeXAttr(String src, XAttr xAttr) throws IOException { rpcServer.getLocationsForPath(src, true); RemoteMethod method = new RemoteMethod("removeXAttr", new Class[] {String.class, XAttr.class}, new RemoteParam(), xAttr); - rpcClient.invokeSequential(locations, method); + if (rpcServer.isInvokeConcurrent(src)) { + rpcClient.invokeConcurrent(locations, method); + } else { + rpcClient.invokeSequential(locations, method); + } } @Override @@ -1718,27 +1730,6 @@ private static FsPermission getParentPermission(final FsPermission mask) { return ret; } - /** - * Check if a path should be in all subclusters. - * - * @param path Path to check. - * @return If a path should be in all subclusters. - */ - private boolean isPathAll(final String path) { - if (subclusterResolver instanceof MountTableResolver) { - try { - MountTableResolver mountTable = (MountTableResolver)subclusterResolver; - MountTable entry = mountTable.getMountPoint(path); - if (entry != null) { - return entry.isAll(); - } - } catch (IOException e) { - LOG.error("Cannot get mount point", e); - } - } - return false; - } - /** * Create a new file status for a mount point. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index a312d4b3a6b0c..e4ea58b507100 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -1541,4 +1541,48 @@ public RouterClientProtocol getClientProtocolModule() { public FederationRPCMetrics getRPCMetrics() { return this.rpcMonitor.getRPCMetrics(); } -} + + /** + * Check if a path should be in all subclusters. + * + * @param path Path to check. + * @return If a path should be in all subclusters. + */ + boolean isPathAll(final String path) { + if (subclusterResolver instanceof MountTableResolver) { + try { + MountTableResolver mountTable = (MountTableResolver) subclusterResolver; + MountTable entry = mountTable.getMountPoint(path); + if (entry != null) { + return entry.isAll(); + } + } catch (IOException e) { + LOG.error("Cannot get mount point", e); + } + } + return false; + } + + /** + * Check if call needs to be invoked to all the locations. The call is + * supposed to be invoked in all the locations in case the order of the mount + * entry is amongst HASH_ALL, RANDOM or SPACE or if the source is itself a + * mount entry. + * @param path The path on which the operation need to be invoked. + * @return true if the call is supposed to invoked on all locations. + * @throws IOException + */ + boolean isInvokeConcurrent(final String path) throws IOException { + if (subclusterResolver instanceof MountTableResolver) { + MountTableResolver mountTableResolver = + (MountTableResolver) subclusterResolver; + List mountPoints = mountTableResolver.getMountPoints(path); + // If this is a mount point, we need to invoke everywhere. + if (mountPoints != null) { + return true; + } + return isPathAll(path); + } + return false; + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java index 8a55b9a6fd427..a4538b0e6bdc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java @@ -50,7 +50,11 @@ public void setStoragePolicy(String src, String policyName) new Class[] {String.class, String.class}, new RemoteParam(), policyName); - rpcClient.invokeSequential(locations, method, null, null); + if (rpcServer.isInvokeConcurrent(src)) { + rpcClient.invokeConcurrent(locations, method); + } else { + rpcClient.invokeSequential(locations, method); + } } public BlockStoragePolicy[] getStoragePolicies() throws IOException { @@ -67,7 +71,11 @@ public void unsetStoragePolicy(String src) throws IOException { RemoteMethod method = new RemoteMethod("unsetStoragePolicy", new Class[] {String.class}, new RemoteParam()); - rpcClient.invokeSequential(locations, method); + if (rpcServer.isInvokeConcurrent(src)) { + rpcClient.invokeConcurrent(locations, method); + } else { + rpcClient.invokeSequential(locations, method); + } } public BlockStoragePolicy getStoragePolicy(String path) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java new file mode 100644 index 0000000000000..8c1515140ae6c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java @@ -0,0 +1,394 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Tests router rpc with multiple destination mount table resolver. + */ +public class TestRouterRPCMultipleDestinationMountTableResolver { + private static StateStoreDFSCluster cluster; + private static RouterContext routerContext; + private static MountTableResolver resolver; + private static DistributedFileSystem nnFs0; + private static DistributedFileSystem nnFs1; + private static DistributedFileSystem routerFs; + private static RouterRpcServer rpcServer; + + @BeforeClass + public static void setUp() throws Exception { + + // Build and start a federated cluster + cluster = new StateStoreDFSCluster(false, 2, + MultipleDestinationMountTableResolver.class); + Configuration routerConf = + new RouterConfigBuilder().stateStore().admin().quota().rpc().build(); + + Configuration hdfsConf = new Configuration(false); + + cluster.addRouterOverrides(routerConf); + cluster.addNamenodeOverrides(hdfsConf); + cluster.startCluster(); + cluster.startRouters(); + cluster.waitClusterUp(); + + routerContext = cluster.getRandomRouter(); + resolver = + (MountTableResolver) routerContext.getRouter().getSubclusterResolver(); + nnFs0 = (DistributedFileSystem) cluster + .getNamenode(cluster.getNameservices().get(0), null).getFileSystem(); + nnFs1 = (DistributedFileSystem) cluster + .getNamenode(cluster.getNameservices().get(1), null).getFileSystem(); + routerFs = (DistributedFileSystem) routerContext.getFileSystem(); + rpcServer =routerContext.getRouter().getRpcServer(); + } + + @AfterClass + public static void tearDown() { + if (cluster != null) { + cluster.stopRouter(routerContext); + cluster.shutdown(); + cluster = null; + } + } + + /** + * SetUp the mount entry , directories and file to verify invocation. + * @param order The order that the mount entry needs to follow. + * @throws Exception On account of any exception encountered during setting up + * the environment. + */ + public void setupOrderMountPath(DestinationOrder order) throws Exception { + Map destMap = new HashMap<>(); + destMap.put("ns0", "/tmp"); + destMap.put("ns1", "/tmp"); + nnFs0.mkdirs(new Path("/tmp")); + nnFs1.mkdirs(new Path("/tmp")); + MountTable addEntry = MountTable.newInstance("/mount", destMap); + addEntry.setDestOrder(order); + assertTrue(addMountTable(addEntry)); + routerFs.mkdirs(new Path("/mount/dir/dir")); + DFSTestUtil.createFile(routerFs, new Path("/mount/dir/file"), 100L, (short) 1, + 1024L); + DFSTestUtil.createFile(routerFs, new Path("/mount/file"), 100L, (short) 1, + 1024L); + } + + @After + public void resetTestEnvironment() throws IOException { + RouterClient client = routerContext.getAdminClient(); + MountTableManager mountTableManager = client.getMountTableManager(); + RemoveMountTableEntryRequest req2 = + RemoveMountTableEntryRequest.newInstance("/mount"); + mountTableManager.removeMountTableEntry(req2); + nnFs0.delete(new Path("/tmp"), true); + nnFs1.delete(new Path("/tmp"), true); + + } + + @Test + public void testInvocationSpaceOrder() throws Exception { + setupOrderMountPath(DestinationOrder.SPACE); + boolean isDirAll = rpcServer.isPathAll("/mount/dir"); + assertTrue(isDirAll); + testInvocation(isDirAll); + } + + @Test + public void testInvocationHashAllOrder() throws Exception { + setupOrderMountPath(DestinationOrder.HASH_ALL); + boolean isDirAll = rpcServer.isPathAll("/mount/dir"); + assertTrue(isDirAll); + testInvocation(isDirAll); + } + + @Test + public void testInvocationRandomOrder() throws Exception { + setupOrderMountPath(DestinationOrder.RANDOM); + boolean isDirAll = rpcServer.isPathAll("/mount/dir"); + assertTrue(isDirAll); + testInvocation(isDirAll); + } + + @Test + public void testInvocationHashOrder() throws Exception { + setupOrderMountPath(DestinationOrder.HASH); + boolean isDirAll = rpcServer.isPathAll("/mount/dir"); + assertFalse(isDirAll); + testInvocation(isDirAll); + } + + @Test + public void testInvocationLocalOrder() throws Exception { + setupOrderMountPath(DestinationOrder.LOCAL); + boolean isDirAll = rpcServer.isPathAll("/mount/dir"); + assertFalse(isDirAll); + testInvocation(isDirAll); + } + + /** + * Verifies the invocation of API's at directory level , file level and at + * mount level. + * @param dirAll if true assumes that the mount entry creates directory on all + * locations. + * @throws IOException + */ + private void testInvocation(boolean dirAll) throws IOException { + // Verify invocation on nested directory and file. + Path mountDir = new Path("/mount/dir/dir"); + Path nameSpaceFile = new Path("/tmp/dir/file"); + Path mountFile = new Path("/mount/dir/file"); + Path mountEntry = new Path("/mount"); + Path mountDest = new Path("/tmp"); + Path nameSpaceDir = new Path("/tmp/dir/dir"); + final String name = "user.a1"; + final byte[] value = {0x31, 0x32, 0x33}; + testDirectoryAndFileLevelInvocation(dirAll, mountDir, nameSpaceFile, + mountFile, nameSpaceDir, name, value); + + // Verify invocation on non nested directory and file. + mountDir = new Path("/mount/dir"); + nameSpaceFile = new Path("/tmp/file"); + mountFile = new Path("/mount/file"); + nameSpaceDir = new Path("/tmp/dir"); + testDirectoryAndFileLevelInvocation(dirAll, mountDir, nameSpaceFile, + mountFile, nameSpaceDir, name, value); + + // Check invocation directly for a mount point. + // Verify owner and permissions. + routerFs.setOwner(mountEntry, "testuser", "testgroup"); + routerFs.setPermission(mountEntry, + FsPermission.createImmutable((short) 777)); + assertEquals("testuser", routerFs.getFileStatus(mountEntry).getOwner()); + assertEquals("testuser", nnFs0.getFileStatus(mountDest).getOwner()); + assertEquals("testuser", nnFs1.getFileStatus(mountDest).getOwner()); + assertEquals((short) 777, + routerFs.getFileStatus(mountEntry).getPermission().toShort()); + assertEquals((short) 777, + nnFs0.getFileStatus(mountDest).getPermission().toShort()); + assertEquals((short) 777, + nnFs1.getFileStatus(mountDest).getPermission().toShort()); + + //Verify storage policy. + routerFs.setStoragePolicy(mountEntry, "COLD"); + assertEquals("COLD", routerFs.getStoragePolicy(mountEntry).getName()); + assertEquals("COLD", nnFs0.getStoragePolicy(mountDest).getName()); + assertEquals("COLD", nnFs1.getStoragePolicy(mountDest).getName()); + routerFs.unsetStoragePolicy(mountEntry); + assertEquals("HOT", routerFs.getStoragePolicy(mountDest).getName()); + assertEquals("HOT", nnFs0.getStoragePolicy(mountDest).getName()); + assertEquals("HOT", nnFs1.getStoragePolicy(mountDest).getName()); + + //Verify erasure coding policy. + routerFs.setErasureCodingPolicy(mountEntry, "RS-6-3-1024k"); + assertEquals("RS-6-3-1024k", + routerFs.getErasureCodingPolicy(mountEntry).getName()); + assertEquals("RS-6-3-1024k", + nnFs0.getErasureCodingPolicy(mountDest).getName()); + assertEquals("RS-6-3-1024k", + nnFs1.getErasureCodingPolicy(mountDest).getName()); + routerFs.unsetErasureCodingPolicy(mountEntry); + assertNull(routerFs.getErasureCodingPolicy(mountDest)); + assertNull(nnFs0.getErasureCodingPolicy(mountDest)); + assertNull(nnFs1.getErasureCodingPolicy(mountDest)); + + //Verify xAttr. + routerFs.setXAttr(mountEntry, name, value); + assertArrayEquals(value, routerFs.getXAttr(mountEntry, name)); + assertArrayEquals(value, nnFs0.getXAttr(mountDest, name)); + assertArrayEquals(value, nnFs1.getXAttr(mountDest, name)); + routerFs.removeXAttr(mountEntry, name); + assertEquals(0, routerFs.getXAttrs(mountEntry).size()); + assertEquals(0, nnFs0.getXAttrs(mountDest).size()); + assertEquals(0, nnFs1.getXAttrs(mountDest).size()); + } + + /** + * SetUp to verify invocations on directories and file. + */ + private void testDirectoryAndFileLevelInvocation(boolean dirAll, + Path mountDir, Path nameSpaceFile, Path mountFile, Path nameSpaceDir, + final String name, final byte[] value) throws IOException { + // Check invocation for a directory. + routerFs.setOwner(mountDir, "testuser", "testgroup"); + routerFs.setPermission(mountDir, FsPermission.createImmutable((short) 777)); + routerFs.setStoragePolicy(mountDir, "COLD"); + routerFs.setErasureCodingPolicy(mountDir, "RS-6-3-1024k"); + routerFs.setXAttr(mountDir, name, value); + + // Verify the directory level invocations were checked in case of mounts not + // creating directories in all subclusters. + boolean checkedDir1 = verifyDirectoryLevelInvocations(dirAll, nameSpaceDir, + nnFs0, name, value); + boolean checkedDir2 = verifyDirectoryLevelInvocations(dirAll, nameSpaceDir, + nnFs1, name, value); + assertTrue("The file didn't existed in either of the subclusters.", + checkedDir1 || checkedDir2); + routerFs.unsetStoragePolicy(mountDir); + routerFs.removeXAttr(mountDir, name); + routerFs.unsetErasureCodingPolicy(mountDir); + + checkedDir1 = + verifyDirectoryLevelUnsetInvocations(dirAll, nnFs0, nameSpaceDir); + checkedDir2 = + verifyDirectoryLevelUnsetInvocations(dirAll, nnFs1, nameSpaceDir); + assertTrue("The file didn't existed in either of the subclusters.", + checkedDir1 || checkedDir2); + + // Check invocation for a file. + routerFs.setOwner(mountFile, "testuser", "testgroup"); + routerFs.setPermission(mountFile, + FsPermission.createImmutable((short) 777)); + routerFs.setStoragePolicy(mountFile, "COLD"); + routerFs.setReplication(mountFile, (short) 2); + routerFs.setXAttr(mountFile, name, value); + verifyFileLevelInvocations(nameSpaceFile, nnFs0, mountFile, name, value); + verifyFileLevelInvocations(nameSpaceFile, nnFs1, mountFile, name, value); + } + + /** + * Verify invocations of API's unseting values at the directory level. + * @param dirAll true if the mount entry order creates directory in all + * locations. + * @param nameSpaceDir path of the directory in the namespace. + * @param nnFs file system where the directory level invocation needs to be + * tested. + * @throws IOException + */ + private boolean verifyDirectoryLevelUnsetInvocations(boolean dirAll, + DistributedFileSystem nnFs, Path nameSpaceDir) throws IOException { + boolean checked = false; + if (dirAll || nnFs.exists(nameSpaceDir)) { + checked = true; + assertEquals("HOT", nnFs.getStoragePolicy(nameSpaceDir).getName()); + assertNull(nnFs.getErasureCodingPolicy(nameSpaceDir)); + assertEquals(0, nnFs.getXAttrs(nameSpaceDir).size()); + } + return checked; + } + + /** + * Verify file level invocations. + * @param nameSpaceFile path of the file in the namespace. + * @param nnFs the file system where the file invocation needs to checked. + * @param mountFile path of the file w.r.t. mount table. + * @param name name of Xattr. + * @param value value of Xattr. + * @throws IOException + */ + private void verifyFileLevelInvocations(Path nameSpaceFile, + DistributedFileSystem nnFs, Path mountFile, final String name, + final byte[] value) throws IOException { + if (nnFs.exists(nameSpaceFile)) { + assertEquals("testuser", nnFs.getFileStatus(nameSpaceFile).getOwner()); + assertEquals((short) 777, + nnFs.getFileStatus(nameSpaceFile).getPermission().toShort()); + assertEquals("COLD", nnFs.getStoragePolicy(nameSpaceFile).getName()); + assertEquals((short) 2, + nnFs.getFileStatus(nameSpaceFile).getReplication()); + assertArrayEquals(value, nnFs.getXAttr(nameSpaceFile, name)); + + routerFs.unsetStoragePolicy(mountFile); + routerFs.removeXAttr(mountFile, name); + assertEquals(0, nnFs.getXAttrs(nameSpaceFile).size()); + + assertEquals("HOT", nnFs.getStoragePolicy(nameSpaceFile).getName()); + + } + } + + /** + * Verify invocations at the directory level. + * @param dirAll true if the mount entry order creates directory in all + * locations. + * @param nameSpaceDir path of the directory in the namespace. + * @param nnFs file system where the directory level invocation needs to be + * tested. + * @param name name for the Xattr. + * @param value value for the Xattr. + * @return true, if directory existed and successful verification of + * invocations. + * @throws IOException + */ + private boolean verifyDirectoryLevelInvocations(boolean dirAll, + Path nameSpaceDir, DistributedFileSystem nnFs, final String name, + final byte[] value) throws IOException { + boolean checked = false; + if (dirAll || nnFs.exists(nameSpaceDir)) { + checked = true; + assertEquals("testuser", nnFs.getFileStatus(nameSpaceDir).getOwner()); + assertEquals("COLD", nnFs.getStoragePolicy(nameSpaceDir).getName()); + assertEquals("RS-6-3-1024k", + nnFs.getErasureCodingPolicy(nameSpaceDir).getName()); + assertArrayEquals(value, nnFs.getXAttr(nameSpaceDir, name)); + assertEquals((short) 777, + nnFs.getFileStatus(nameSpaceDir).getPermission().toShort()); + } + return checked; + } + + /** + * Add a mount table entry to the mount table through the admin API. + * @param entry Mount table entry to add. + * @return If it was successfully added. + * @throws IOException + * Problems adding entries. + */ + private boolean addMountTable(final MountTable entry) throws IOException { + RouterClient client = routerContext.getAdminClient(); + MountTableManager mountTableManager = client.getMountTableManager(); + AddMountTableEntryRequest addRequest = + AddMountTableEntryRequest.newInstance(entry); + AddMountTableEntryResponse addResponse = + mountTableManager.addMountTableEntry(addRequest); + + // Reload the Router cache + resolver.loadCache(true); + + return addResponse.getStatus(); + } +} \ No newline at end of file From 50aee18a84fcbca38be4272cfa0d33aadafe076d Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Fri, 15 Feb 2019 10:47:17 -0800 Subject: [PATCH 0301/1308] HDFS-14268. RBF: Fix the location of the DNs in getDatanodeReport(). Contributed by Inigo Goiri. --- .../hdfs/protocol/ECBlockGroupStats.java | 71 +++++++++++++++++++ .../federation/router/ErasureCoding.java | 29 +------- .../federation/router/RouterRpcClient.java | 19 ++--- .../federation/router/TestRouterRpc.java | 48 ++++++++++--- 4 files changed, 114 insertions(+), 53 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java index 3dde6043468a0..1ead5c1fd3421 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs.protocol; +import java.util.Collection; + +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -103,4 +107,71 @@ public String toString() { statsBuilder.append("]"); return statsBuilder.toString(); } + + @Override + public int hashCode() { + return new HashCodeBuilder() + .append(lowRedundancyBlockGroups) + .append(corruptBlockGroups) + .append(missingBlockGroups) + .append(bytesInFutureBlockGroups) + .append(pendingDeletionBlocks) + .append(highestPriorityLowRedundancyBlocks) + .toHashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ECBlockGroupStats other = (ECBlockGroupStats)o; + return new EqualsBuilder() + .append(lowRedundancyBlockGroups, other.lowRedundancyBlockGroups) + .append(corruptBlockGroups, other.corruptBlockGroups) + .append(missingBlockGroups, other.missingBlockGroups) + .append(bytesInFutureBlockGroups, other.bytesInFutureBlockGroups) + .append(pendingDeletionBlocks, other.pendingDeletionBlocks) + .append(highestPriorityLowRedundancyBlocks, + other.highestPriorityLowRedundancyBlocks) + .isEquals(); + } + + /** + * Merge the multiple ECBlockGroupStats. + * @param stats Collection of stats to merge. + * @return A new ECBlockGroupStats merging all the input ones + */ + public static ECBlockGroupStats merge(Collection stats) { + long lowRedundancyBlockGroups = 0; + long corruptBlockGroups = 0; + long missingBlockGroups = 0; + long bytesInFutureBlockGroups = 0; + long pendingDeletionBlocks = 0; + long highestPriorityLowRedundancyBlocks = 0; + boolean hasHighestPriorityLowRedundancyBlocks = false; + + for (ECBlockGroupStats stat : stats) { + lowRedundancyBlockGroups += stat.getLowRedundancyBlockGroups(); + corruptBlockGroups += stat.getCorruptBlockGroups(); + missingBlockGroups += stat.getMissingBlockGroups(); + bytesInFutureBlockGroups += stat.getBytesInFutureBlockGroups(); + pendingDeletionBlocks += stat.getPendingDeletionBlocks(); + if (stat.hasHighestPriorityLowRedundancyBlocks()) { + hasHighestPriorityLowRedundancyBlocks = true; + highestPriorityLowRedundancyBlocks += + stat.getHighestPriorityLowRedundancyBlocks(); + } + } + if (hasHighestPriorityLowRedundancyBlocks) { + return new ECBlockGroupStats(lowRedundancyBlockGroups, corruptBlockGroups, + missingBlockGroups, bytesInFutureBlockGroups, pendingDeletionBlocks, + highestPriorityLowRedundancyBlocks); + } + return new ECBlockGroupStats(lowRedundancyBlockGroups, corruptBlockGroups, + missingBlockGroups, bytesInFutureBlockGroups, pendingDeletionBlocks); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java index f4584b1afafb5..97c5f6a601d16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java @@ -187,33 +187,6 @@ public ECBlockGroupStats getECBlockGroupStats() throws IOException { rpcClient.invokeConcurrent( nss, method, true, false, ECBlockGroupStats.class); - // Merge the stats from all the namespaces - long lowRedundancyBlockGroups = 0; - long corruptBlockGroups = 0; - long missingBlockGroups = 0; - long bytesInFutureBlockGroups = 0; - long pendingDeletionBlocks = 0; - long highestPriorityLowRedundancyBlocks = 0; - boolean hasHighestPriorityLowRedundancyBlocks = false; - - for (ECBlockGroupStats stats : allStats.values()) { - lowRedundancyBlockGroups += stats.getLowRedundancyBlockGroups(); - corruptBlockGroups += stats.getCorruptBlockGroups(); - missingBlockGroups += stats.getMissingBlockGroups(); - bytesInFutureBlockGroups += stats.getBytesInFutureBlockGroups(); - pendingDeletionBlocks += stats.getPendingDeletionBlocks(); - if (stats.hasHighestPriorityLowRedundancyBlocks()) { - hasHighestPriorityLowRedundancyBlocks = true; - highestPriorityLowRedundancyBlocks += - stats.getHighestPriorityLowRedundancyBlocks(); - } - } - if (hasHighestPriorityLowRedundancyBlocks) { - return new ECBlockGroupStats(lowRedundancyBlockGroups, corruptBlockGroups, - missingBlockGroups, bytesInFutureBlockGroups, pendingDeletionBlocks, - highestPriorityLowRedundancyBlocks); - } - return new ECBlockGroupStats(lowRedundancyBlockGroups, corruptBlockGroups, - missingBlockGroups, bytesInFutureBlockGroups, pendingDeletionBlocks); + return ECBlockGroupStats.merge(allStats.values()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index d21bde3d67912..3d80c4167d1e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -24,16 +24,15 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; @@ -1061,8 +1060,8 @@ public Map invokeConcurrent( } } - List orderedLocations = new LinkedList<>(); - Set> callables = new HashSet<>(); + List orderedLocations = new ArrayList<>(); + List> callables = new ArrayList<>(); for (final T location : locations) { String nsId = location.getNameserviceId(); final List namenodes = @@ -1080,20 +1079,12 @@ public Map invokeConcurrent( nnLocation = (T)new RemoteLocation(nsId, nnId, location.getDest()); } orderedLocations.add(nnLocation); - callables.add(new Callable() { - public Object call() throws Exception { - return invokeMethod(ugi, nnList, proto, m, paramList); - } - }); + callables.add(() -> invokeMethod(ugi, nnList, proto, m, paramList)); } } else { // Call the objectGetter in order of nameservices in the NS list orderedLocations.add(location); - callables.add(new Callable() { - public Object call() throws Exception { - return invokeMethod(ugi, namenodes, proto, m, paramList); - } - }); + callables.add(() -> invokeMethod(ugi, namenodes, proto, m, paramList)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index 2d26e1142e72c..d9430767753bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -37,6 +37,7 @@ import java.io.IOException; import java.lang.reflect.Method; import java.net.URISyntaxException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.EnumSet; @@ -47,6 +48,7 @@ import java.util.Map.Entry; import java.util.Random; import java.util.Set; +import java.util.TreeMap; import java.util.TreeSet; import java.util.concurrent.TimeUnit; @@ -120,6 +122,11 @@ public class TestRouterRpc { private static final Logger LOG = LoggerFactory.getLogger(TestRouterRpc.class); + private static final int NUM_SUBCLUSTERS = 2; + // We need at least 6 DNs to test Erasure Coding with RS-6-3-64k + private static final int NUM_DNS = 6; + + private static final Comparator EC_POLICY_CMP = new Comparator() { public int compare( @@ -165,9 +172,9 @@ public int compare( @BeforeClass public static void globalSetUp() throws Exception { - cluster = new MiniRouterDFSCluster(false, 2); - // We need 6 DNs to test Erasure Coding with RS-6-3-64k - cluster.setNumDatanodesPerNameservice(6); + cluster = new MiniRouterDFSCluster(false, NUM_SUBCLUSTERS); + cluster.setNumDatanodesPerNameservice(NUM_DNS); + cluster.setIndependentDNs(); // Start NNs and DNs and wait until ready cluster.startCluster(); @@ -586,8 +593,13 @@ public void testProxyGetDatanodeReport() throws Exception { DatanodeInfo[] combinedData = routerProtocol.getDatanodeReport(DatanodeReportType.ALL); + final Map routerDNMap = new TreeMap<>(); + for (DatanodeInfo dn : combinedData) { + String subcluster = dn.getNetworkLocation().split("/")[1]; + routerDNMap.put(dn.getXferPort(), subcluster); + } - Set individualData = new HashSet(); + final Map nnDNMap = new TreeMap<>(); for (String nameservice : cluster.getNameservices()) { NamenodeContext n = cluster.getNamenode(nameservice, null); DFSClient client = n.getClient(); @@ -597,10 +609,10 @@ public void testProxyGetDatanodeReport() throws Exception { for (int i = 0; i < data.length; i++) { // Collect unique DNs based on their xfer port DatanodeInfo info = data[i]; - individualData.add(info.getXferPort()); + nnDNMap.put(info.getXferPort(), nameservice); } } - assertEquals(combinedData.length, individualData.size()); + assertEquals(nnDNMap, routerDNMap); } @Test @@ -1234,7 +1246,7 @@ public void testProxyExceptionMessages() throws IOException { } @Test - public void testErasureCoding() throws IOException { + public void testErasureCoding() throws Exception { LOG.info("List the available erasurce coding policies"); ErasureCodingPolicyInfo[] policies = checkErasureCodingPolicies(); @@ -1340,8 +1352,22 @@ public void testErasureCoding() throws IOException { LOG.info("Check the stats"); ECBlockGroupStats statsRouter = routerProtocol.getECBlockGroupStats(); - ECBlockGroupStats statsNamenode = nnProtocol.getECBlockGroupStats(); - assertEquals(statsNamenode.toString(), statsRouter.toString()); + ECBlockGroupStats statsNamenode = getNamenodeECBlockGroupStats(); + assertEquals(statsNamenode, statsRouter); + } + + /** + * Get the EC stats from all namenodes and aggregate them. + * @return Aggregated EC stats from all namenodes. + * @throws Exception If we cannot get the stats. + */ + private ECBlockGroupStats getNamenodeECBlockGroupStats() throws Exception { + List nnStats = new ArrayList<>(); + for (NamenodeContext nnContext : cluster.getNamenodes()) { + ClientProtocol cp = nnContext.getClient().getNamenode(); + nnStats.add(cp.getECBlockGroupStats()); + } + return ECBlockGroupStats.merge(nnStats); } @Test @@ -1375,9 +1401,9 @@ public void testNamenodeMetrics() throws Exception { router.getRouter().getNamenodeMetrics(); final String jsonString0 = metrics.getLiveNodes(); - // We should have 12 nodes in total + // We should have the nodes in all the subclusters JSONObject jsonObject = new JSONObject(jsonString0); - assertEquals(12, jsonObject.names().length()); + assertEquals(NUM_SUBCLUSTERS * NUM_DNS, jsonObject.names().length()); // We should be caching this information String jsonString1 = metrics.getLiveNodes(); From 8b8ff5ccbc677d98a91092b2fa64999a99d1595d Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Wed, 20 Feb 2019 11:08:55 -0800 Subject: [PATCH 0302/1308] HDFS-14249. RBF: Tooling to identify the subcluster location of a file. Contributed by Inigo Goiri. --- ...erAdminProtocolServerSideTranslatorPB.java | 22 +++ .../RouterAdminProtocolTranslatorPB.java | 21 +++ .../FederationRPCPerformanceMonitor.java | 8 +- .../resolver/MountTableManager.java | 12 ++ .../federation/router/RouterAdminServer.java | 36 +++++ .../store/impl/MountTableStoreImpl.java | 7 + .../store/protocol/GetDestinationRequest.java | 57 +++++++ .../protocol/GetDestinationResponse.java | 59 +++++++ .../impl/pb/GetDestinationRequestPBImpl.java | 73 +++++++++ .../impl/pb/GetDestinationResponsePBImpl.java | 83 ++++++++++ .../hdfs/tools/federation/RouterAdmin.java | 28 +++- .../src/main/proto/FederationProtocol.proto | 8 + .../src/main/proto/RouterProtocol.proto | 5 + .../src/site/markdown/HDFSRouterFederation.md | 4 + .../federation/router/TestRouterAdminCLI.java | 64 +++++++- ...MultipleDestinationMountTableResolver.java | 144 ++++++++++++++++++ .../src/site/markdown/HDFSCommands.md | 2 + 17 files changed, 628 insertions(+), 5 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDestinationRequest.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDestinationResponse.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDestinationRequestPBImpl.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDestinationResponsePBImpl.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java index a31c46d2912c3..6f6724e738246 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java @@ -31,6 +31,8 @@ import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesRequestProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesResponseProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDestinationRequestProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDestinationResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesRequestProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeRequestProto; @@ -54,6 +56,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeRequest; @@ -76,6 +80,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnterSafeModeResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetDisabledNameservicesRequestPBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetDisabledNameservicesResponsePBImpl; +import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetDestinationRequestPBImpl; +import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetDestinationResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesRequestPBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetSafeModeRequestPBImpl; @@ -298,4 +304,20 @@ public RefreshMountTableEntriesResponseProto refreshMountTableEntries( throw new ServiceException(e); } } + + @Override + public GetDestinationResponseProto getDestination( + RpcController controller, GetDestinationRequestProto request) + throws ServiceException { + try { + GetDestinationRequest req = + new GetDestinationRequestPBImpl(request); + GetDestinationResponse response = server.getDestination(req); + GetDestinationResponsePBImpl responsePB = + (GetDestinationResponsePBImpl)response; + return responsePB.getProto(); + } catch (IOException e) { + throw new ServiceException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java index 1fbb06d2a7f61..9cdc3c1c94006 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java @@ -32,6 +32,8 @@ import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesRequestProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesResponseProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDestinationRequestProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDestinationResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesRequestProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeRequestProto; @@ -57,6 +59,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeRequest; @@ -77,6 +81,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnableNameserviceResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnterSafeModeResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetDisabledNameservicesResponsePBImpl; +import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetDestinationRequestPBImpl; +import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetDestinationResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesRequestPBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesResponsePBImpl; import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetSafeModeResponsePBImpl; @@ -288,4 +294,19 @@ public RefreshMountTableEntriesResponse refreshMountTableEntries( throw new IOException(ProtobufHelper.getRemoteException(e).getMessage()); } } + + @Override + public GetDestinationResponse getDestination( + GetDestinationRequest request) throws IOException { + GetDestinationRequestPBImpl requestPB = + (GetDestinationRequestPBImpl) request; + GetDestinationRequestProto proto = requestPB.getProto(); + try { + GetDestinationResponseProto response = + rpcProxy.getDestination(null, proto); + return new GetDestinationResponsePBImpl(response); + } catch (ServiceException e) { + throw new IOException(ProtobufHelper.getRemoteException(e).getMessage()); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java index cbd63de5d2e6e..bae83aa074659 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java @@ -129,7 +129,7 @@ public void startOp() { public long proxyOp() { PROXY_TIME.set(monotonicNow()); long processingTime = getProcessingTime(); - if (processingTime >= 0) { + if (metrics != null && processingTime >= 0) { metrics.addProcessingTime(processingTime); } return Thread.currentThread().getId(); @@ -139,7 +139,7 @@ public long proxyOp() { public void proxyOpComplete(boolean success) { if (success) { long proxyTime = getProxyTime(); - if (proxyTime >= 0) { + if (metrics != null && proxyTime >= 0) { metrics.addProxyTime(proxyTime); } } @@ -147,7 +147,9 @@ public void proxyOpComplete(boolean success) { @Override public void proxyOpFailureStandby() { - metrics.incrProxyOpFailureStandby(); + if (metrics != null) { + metrics.incrProxyOpFailureStandby(); + } } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableManager.java index 9a1e4160245aa..5ff2e28329260 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableManager.java @@ -21,6 +21,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest; @@ -93,4 +95,14 @@ GetMountTableEntriesResponse getMountTableEntries( */ RefreshMountTableEntriesResponse refreshMountTableEntries( RefreshMountTableEntriesRequest request) throws IOException; + + /** + * Get the destination subcluster (namespace) of a file/directory. + * + * @param request Fully populated request object including the file to check. + * @return The response including the subcluster where the input file is. + * @throws IOException Throws exception if the data store is not initialized. + */ + GetDestinationResponse getDestination( + GetDestinationRequest request) throws IOException; } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index e2d944c4d6ee8..a2a5a4239f735 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -23,7 +23,10 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.Collection; +import java.util.List; +import java.util.Map; import java.util.Set; import com.google.common.base.Preconditions; @@ -39,6 +42,7 @@ import org.apache.hadoop.hdfs.protocolPB.RouterPolicyProvider; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; import org.apache.hadoop.hdfs.server.federation.store.DisabledNameserviceStore; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreCache; @@ -52,6 +56,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeRequest; @@ -378,6 +384,36 @@ public RefreshMountTableEntriesResponse refreshMountTableEntries( } } + @Override + public GetDestinationResponse getDestination( + GetDestinationRequest request) throws IOException { + final String src = request.getSrcPath(); + final List nsIds = new ArrayList<>(); + RouterRpcServer rpcServer = this.router.getRpcServer(); + List locations = rpcServer.getLocationsForPath(src, false); + RouterRpcClient rpcClient = rpcServer.getRPCClient(); + RemoteMethod method = new RemoteMethod("getFileInfo", + new Class[] {String.class}, new RemoteParam()); + try { + Map responses = + rpcClient.invokeConcurrent( + locations, method, false, false, HdfsFileStatus.class); + for (RemoteLocation location : locations) { + if (responses.get(location) != null) { + nsIds.add(location.getNameserviceId()); + } + } + } catch (IOException ioe) { + LOG.error("Cannot get location for {}: {}", + src, ioe.getMessage()); + } + if (nsIds.isEmpty() && !locations.isEmpty()) { + String nsId = locations.get(0).getNameserviceId(); + nsIds.add(nsId); + } + return GetDestinationResponse.newInstance(nsIds); + } + /** * Verify if Router set safe mode state correctly. * @param isInSafeMode Expected state to be set. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java index 76c7e781ab9ed..d5e1857a8c11c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java @@ -31,6 +31,8 @@ import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest; @@ -169,4 +171,9 @@ public RefreshMountTableEntriesResponse refreshMountTableEntries( return response; } + @Override + public GetDestinationResponse getDestination( + GetDestinationRequest request) throws IOException { + throw new UnsupportedOperationException("Requires the RouterRpcServer"); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDestinationRequest.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDestinationRequest.java new file mode 100644 index 0000000000000..0d5074b844318 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDestinationRequest.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer; + +/** + * API request for getting the destination subcluster of a file. + */ +public abstract class GetDestinationRequest { + + public static GetDestinationRequest newInstance() + throws IOException { + return StateStoreSerializer + .newRecord(GetDestinationRequest.class); + } + + public static GetDestinationRequest newInstance(String srcPath) + throws IOException { + GetDestinationRequest request = newInstance(); + request.setSrcPath(srcPath); + return request; + } + + public static GetDestinationRequest newInstance(Path srcPath) + throws IOException { + return newInstance(srcPath.toString()); + } + + @Public + @Unstable + public abstract String getSrcPath(); + + @Public + @Unstable + public abstract void setSrcPath(String srcPath); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDestinationResponse.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDestinationResponse.java new file mode 100644 index 0000000000000..534b673829600 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDestinationResponse.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer; + +/** + * API response for getting the destination subcluster of a file. + */ +public abstract class GetDestinationResponse { + + public static GetDestinationResponse newInstance() + throws IOException { + return StateStoreSerializer + .newRecord(GetDestinationResponse.class); + } + + public static GetDestinationResponse newInstance( + Collection nsIds) throws IOException { + GetDestinationResponse request = newInstance(); + request.setDestinations(nsIds); + return request; + } + + @Public + @Unstable + public abstract Collection getDestinations(); + + @Public + @Unstable + public void setDestination(String nsId) { + setDestinations(Collections.singletonList(nsId)); + } + + @Public + @Unstable + public abstract void setDestinations(Collection nsIds); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDestinationRequestPBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDestinationRequestPBImpl.java new file mode 100644 index 0000000000000..b97f455cdd8b3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDestinationRequestPBImpl.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb; + +import java.io.IOException; + +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDestinationRequestProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDestinationRequestProtoOrBuilder; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDestinationRequestProto.Builder; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationRequest; +import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord; + +import com.google.protobuf.Message; + +/** + * Protobuf implementation of the state store API object + * GetDestinationRequest. + */ +public class GetDestinationRequestPBImpl extends GetDestinationRequest + implements PBRecord { + + private FederationProtocolPBTranslator translator = + new FederationProtocolPBTranslator<>( + GetDestinationRequestProto.class); + + public GetDestinationRequestPBImpl() { + } + + public GetDestinationRequestPBImpl(GetDestinationRequestProto proto) { + this.translator.setProto(proto); + } + + @Override + public GetDestinationRequestProto getProto() { + return this.translator.build(); + } + + @Override + public void setProto(Message proto) { + this.translator.setProto(proto); + } + + @Override + public void readInstance(String base64String) throws IOException { + this.translator.readInstance(base64String); + } + + @Override + public String getSrcPath() { + return this.translator.getProtoOrBuilder().getSrcPath(); + } + + @Override + public void setSrcPath(String path) { + this.translator.getBuilder().setSrcPath(path); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDestinationResponsePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDestinationResponsePBImpl.java new file mode 100644 index 0000000000000..f758f99365514 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDestinationResponsePBImpl.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; + +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDestinationResponseProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDestinationResponseProto.Builder; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDestinationResponseProtoOrBuilder; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationResponse; +import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord; + +import com.google.protobuf.Message; + +/** + * Protobuf implementation of the state store API object + * GetDestinationResponse. + */ +public class GetDestinationResponsePBImpl + extends GetDestinationResponse implements PBRecord { + + private FederationProtocolPBTranslator translator = + new FederationProtocolPBTranslator<>( + GetDestinationResponseProto.class); + + public GetDestinationResponsePBImpl() { + } + + public GetDestinationResponsePBImpl( + GetDestinationResponseProto proto) { + this.translator.setProto(proto); + } + + @Override + public GetDestinationResponseProto getProto() { + // if builder is null build() returns null, calling getBuilder() to + // instantiate builder + this.translator.getBuilder(); + return this.translator.build(); + } + + @Override + public void setProto(Message proto) { + this.translator.setProto(proto); + } + + @Override + public void readInstance(String base64String) throws IOException { + this.translator.readInstance(base64String); + } + + @Override + public Collection getDestinations() { + return new ArrayList<>( + this.translator.getProtoOrBuilder().getDestinationsList()); + } + + @Override + public void setDestinations(Collection nsIds) { + this.translator.getBuilder().clearDestinations(); + for (String nsId : nsIds) { + this.translator.getBuilder().addDestinations(nsId); + } + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 37aad88565a0f..b04b0692b0a18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -52,6 +52,8 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeRequest; @@ -117,7 +119,8 @@ private void printUsage(String cmd) { private String getUsage(String cmd) { if (cmd == null) { String[] commands = - {"-add", "-update", "-rm", "-ls", "-setQuota", "-clrQuota", + {"-add", "-update", "-rm", "-ls", "-getDestination", + "-setQuota", "-clrQuota", "-safemode", "-nameservice", "-getDisabledNameservices", "-refresh"}; StringBuilder usage = new StringBuilder(); @@ -143,6 +146,8 @@ private String getUsage(String cmd) { return "\t[-rm ]"; } else if (cmd.equals("-ls")) { return "\t[-ls ]"; + } else if (cmd.equals("-getDestination")) { + return "\t[-getDestination ]"; } else if (cmd.equals("-setQuota")) { return "\t[-setQuota -nsQuota -ssQuota " + "]"; @@ -172,6 +177,11 @@ private void validateMax(String[] arg) { throw new IllegalArgumentException( "Too many arguments, Max=1 argument allowed"); } + } else if (arg[0].equals("-getDestination")) { + if (arg.length > 2) { + throw new IllegalArgumentException( + "Too many arguments, Max=1 argument allowed only"); + } } else if (arg[0].equals("-safemode")) { if (arg.length > 2) { throw new IllegalArgumentException( @@ -208,6 +218,10 @@ private boolean validateMin(String[] argv) { if (argv.length < 2) { return false; } + } else if ("-getDestination".equals(cmd)) { + if (argv.length < 2) { + return false; + } } else if ("-setQuota".equals(cmd)) { if (argv.length < 4) { return false; @@ -302,6 +316,8 @@ public int run(String[] argv) throws Exception { } else { listMounts("/"); } + } else if ("-getDestination".equals(cmd)) { + getDestination(argv[i]); } else if ("-setQuota".equals(cmd)) { if (setQuota(argv, i)) { System.out.println( @@ -709,6 +725,16 @@ private static void printMounts(List entries) { } } + private void getDestination(String path) throws IOException { + path = normalizeFileSystemPath(path); + MountTableManager mountTable = client.getMountTableManager(); + GetDestinationRequest request = + GetDestinationRequest.newInstance(path); + GetDestinationResponse response = mountTable.getDestination(request); + System.out.println("Destination: " + + StringUtils.join(",", response.getDestinations())); + } + /** * Set quota for a mount table entry. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto index 1e5e37b3e2283..9e9fd4899c261 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto @@ -175,6 +175,14 @@ message GetMountTableEntriesResponseProto { optional uint64 timestamp = 2; } +message GetDestinationRequestProto { + optional string srcPath = 1; +} + +message GetDestinationResponseProto { + repeated string destinations = 1; +} + ///////////////////////////////////////////////// // Routers diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto index 34a012acd8739..d6aff49830f9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto @@ -79,4 +79,9 @@ service RouterAdminProtocolService { * Refresh mount entries */ rpc refreshMountTableEntries(RefreshMountTableEntriesRequestProto) returns(RefreshMountTableEntriesResponseProto); + + /** + * Get the destination of a file/directory in the federation. + */ + rpc getDestination(GetDestinationRequestProto) returns (GetDestinationResponseProto); } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index 2ae0c2bed0cb0..f24ff12993f5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -261,6 +261,10 @@ RANDOM can be used for reading and writing data from/into different subclusters. The common use for this approach is to have the same data in multiple subclusters and balance the reads across subclusters. For example, if thousands of containers need to read the same data (e.g., a library), one can use RANDOM to read the data from any of the subclusters. +To determine which subcluster contains a file: + + [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -getDestination /user/user1/file.txt + Note that consistency of the data across subclusters is not guaranteed by the Router. ### Disabling nameservices diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index ab733dde8dffc..9f53dd4458d66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -26,6 +26,11 @@ import java.io.PrintStream; import java.net.InetSocketAddress; import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -36,6 +41,8 @@ import org.apache.hadoop.hdfs.server.federation.metrics.FederationMetrics; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; @@ -78,7 +85,8 @@ public class TestRouterAdminCLI { @BeforeClass public static void globalSetUp() throws Exception { - cluster = new StateStoreDFSCluster(false, 1); + cluster = new StateStoreDFSCluster(false, 1, + MultipleDestinationMountTableResolver.class); // Build and start a router with State Store + admin + RPC Configuration conf = new RouterConfigBuilder() .stateStore() @@ -550,6 +558,11 @@ public void testInvalidArgumentMessage() throws Exception { .contains("\t[-nameservice enable | disable ]")); out.reset(); + argv = new String[] {"-getDestination"}; + assertEquals(-1, ToolRunner.run(admin, argv)); + assertTrue(out.toString().contains("\t[-getDestination ]")); + out.reset(); + argv = new String[] {"-Random"}; assertEquals(-1, ToolRunner.run(admin, argv)); String expected = "Usage: hdfs dfsrouteradmin :\n" @@ -560,6 +573,7 @@ public void testInvalidArgumentMessage() throws Exception { + " " + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + "-owner -group -mode ]\n" + "\t[-rm ]\n" + "\t[-ls ]\n" + + "\t[-getDestination ]\n" + "\t[-setQuota -nsQuota -ssQuota " + "]\n" + "\t[-clrQuota ]\n" + "\t[-safemode enter | leave | get]\n" @@ -1091,4 +1105,52 @@ private void testUpdateOrderMountTable(DestinationOrder order) assertEquals(dest, mountTable.getDestinations().get(0).getDest()); assertEquals(order, mountTable.getDestOrder()); } + + @Test + public void testGetDestination() throws Exception { + + // Test the basic destination feature + System.setOut(new PrintStream(out)); + String[] argv = new String[] {"-getDestination", "/file.txt"}; + assertEquals(0, ToolRunner.run(admin, argv)); + assertEquals("Destination: ns0" + System.lineSeparator(), out.toString()); + + // Add a HASH_ALL entry to check the destination changing + argv = new String[] {"-add", "/testGetDest", "ns0,ns1", + "/testGetDestination", + "-order", DestinationOrder.HASH_ALL.toString()}; + assertEquals(0, ToolRunner.run(admin, argv)); + stateStore.loadCache(MountTableStoreImpl.class, true); + MountTableResolver resolver = + (MountTableResolver) router.getSubclusterResolver(); + resolver.loadCache(true); + + // Files should be distributed across ns0 and ns1 + Map counter = new TreeMap<>(); + final Pattern p = Pattern.compile("Destination: (.*)"); + for (int i = 0; i < 10; i++) { + out.reset(); + String filename = "file" + i+ ".txt"; + argv = new String[] {"-getDestination", "/testGetDest/" + filename}; + assertEquals(0, ToolRunner.run(admin, argv)); + String outLine = out.toString(); + Matcher m = p.matcher(outLine); + assertTrue(m.find()); + String nsId = m.group(1); + if (counter.containsKey(nsId)) { + counter.get(nsId).getAndIncrement(); + } else { + counter.put(nsId, new AtomicInteger(1)); + } + } + assertEquals("Wrong counter size: " + counter, 2, counter.size()); + assertTrue(counter + " should contain ns0", counter.containsKey("ns0")); + assertTrue(counter + " should contain ns1", counter.containsKey("ns1")); + + // Bad cases + argv = new String[] {"-getDestination"}; + assertEquals(-1, ToolRunner.run(admin, argv)); + argv = new String[] {"-getDestination /file1.txt /file2.txt"}; + assertEquals(-1, ToolRunner.run(admin, argv)); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java index 8c1515140ae6c..46bfff99da03f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java @@ -23,11 +23,19 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import java.io.FileNotFoundException; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -41,8 +49,11 @@ import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.test.LambdaTestUtils; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -52,6 +63,8 @@ * Tests router rpc with multiple destination mount table resolver. */ public class TestRouterRPCMultipleDestinationMountTableResolver { + private static final List NS_IDS = Arrays.asList("ns0", "ns1"); + private static StateStoreDFSCluster cluster; private static RouterContext routerContext; private static MountTableResolver resolver; @@ -391,4 +404,135 @@ private boolean addMountTable(final MountTable entry) throws IOException { return addResponse.getStatus(); } + + @Test + public void testGetDestinationHashAll() throws Exception { + testGetDestination(DestinationOrder.HASH_ALL, + Arrays.asList("ns1"), + Arrays.asList("ns1"), + Arrays.asList("ns1", "ns0")); + } + + @Test + public void testGetDestinationHash() throws Exception { + testGetDestination(DestinationOrder.HASH, + Arrays.asList("ns1"), + Arrays.asList("ns1"), + Arrays.asList("ns1")); + } + + @Test + public void testGetDestinationRandom() throws Exception { + testGetDestination(DestinationOrder.RANDOM, + null, null, Arrays.asList("ns0", "ns1")); + } + + /** + * Generic test for getting the destination subcluster. + * @param order DestinationOrder of the mount point. + * @param expectFileLocation Expected subclusters of a file. null for any. + * @param expectNoFileLocation Expected subclusters of a non-existing file. + * @param expectDirLocation Expected subclusters of a nested directory. + * @throws Exception If the test cannot run. + */ + private void testGetDestination(DestinationOrder order, + List expectFileLocation, + List expectNoFileLocation, + List expectDirLocation) throws Exception { + setupOrderMountPath(order); + + RouterClient client = routerContext.getAdminClient(); + MountTableManager mountTableManager = client.getMountTableManager(); + + // If the file exists, it should be in the expected subcluster + final String pathFile = "dir/file"; + final Path pathRouterFile = new Path("/mount", pathFile); + final Path pathLocalFile = new Path("/tmp", pathFile); + FileStatus fileStatus = routerFs.getFileStatus(pathRouterFile); + assertTrue(fileStatus + " should be a file", fileStatus.isFile()); + GetDestinationResponse respFile = mountTableManager.getDestination( + GetDestinationRequest.newInstance(pathRouterFile)); + if (expectFileLocation != null) { + assertEquals(expectFileLocation, respFile.getDestinations()); + assertPathStatus(expectFileLocation, pathLocalFile, false); + } else { + Collection dests = respFile.getDestinations(); + assertPathStatus(dests, pathLocalFile, false); + } + + // If the file does not exist, it should give us the expected subclusters + final String pathNoFile = "dir/no-file"; + final Path pathRouterNoFile = new Path("/mount", pathNoFile); + final Path pathLocalNoFile = new Path("/tmp", pathNoFile); + LambdaTestUtils.intercept(FileNotFoundException.class, + () -> routerFs.getFileStatus(pathRouterNoFile)); + GetDestinationResponse respNoFile = mountTableManager.getDestination( + GetDestinationRequest.newInstance(pathRouterNoFile)); + if (expectNoFileLocation != null) { + assertEquals(expectNoFileLocation, respNoFile.getDestinations()); + } + assertPathStatus(Collections.emptyList(), pathLocalNoFile, false); + + // If the folder exists, it should be in the expected subcluster + final String pathNestedDir = "dir/dir"; + final Path pathRouterNestedDir = new Path("/mount", pathNestedDir); + final Path pathLocalNestedDir = new Path("/tmp", pathNestedDir); + FileStatus dirStatus = routerFs.getFileStatus(pathRouterNestedDir); + assertTrue(dirStatus + " should be a directory", dirStatus.isDirectory()); + GetDestinationResponse respDir = mountTableManager.getDestination( + GetDestinationRequest.newInstance(pathRouterNestedDir)); + assertEqualsCollection(expectDirLocation, respDir.getDestinations()); + assertPathStatus(expectDirLocation, pathLocalNestedDir, true); + } + + /** + * Assert that the status of a file in the subcluster is the expected one. + * @param expectedLocations Subclusters where the file is expected to exist. + * @param path Path of the file/directory to check. + * @param isDir If the path is expected to be a directory. + * @throws Exception If the file cannot be checked. + */ + private void assertPathStatus(Collection expectedLocations, + Path path, boolean isDir) throws Exception { + for (String nsId : NS_IDS) { + final FileSystem fs = getFileSystem(nsId); + if (expectedLocations.contains(nsId)) { + assertTrue(path + " should exist in " + nsId, fs.exists(path)); + final FileStatus status = fs.getFileStatus(path); + if (isDir) { + assertTrue(path + " should be a directory", status.isDirectory()); + } else { + assertTrue(path + " should be a file", status.isFile()); + } + } else { + assertFalse(path + " should not exist in " + nsId, fs.exists(path)); + } + } + } + + /** + * Assert if two collections are equal without checking the order. + * @param col1 First collection to compare. + * @param col2 Second collection to compare. + */ + private static void assertEqualsCollection( + Collection col1, Collection col2) { + assertEquals(new TreeSet<>(col1), new TreeSet<>(col2)); + } + + /** + * Get the filesystem for each subcluster. + * @param nsId Identifier of the name space (subcluster). + * @return The FileSystem for + */ + private static FileSystem getFileSystem(final String nsId) { + if (nsId.equals("ns0")) { + return nnFs0; + } + if (nsId.equals("ns1")) { + return nnFs1; + } + return null; + } + } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md index 421e3881db926..7ae31c83985c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md @@ -432,6 +432,7 @@ Usage: [-update [-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] -owner -group -mode ] [-rm ] [-ls ] + [-getDestination ] [-setQuota -nsQuota -ssQuota ] [-clrQuota ] [-safemode enter | leave | get] @@ -446,6 +447,7 @@ Usage: | `-update` *source* *nameservices* *destination* | Update a mount table entry or create one if it does not exist. | | `-rm` *source* | Remove mount point of specified path. | | `-ls` *path* | List mount points under specified path. | +| `-getDestination` *path* | Get the subcluster where a file is or should be created. | | `-setQuota` *path* `-nsQuota` *nsQuota* `-ssQuota` *ssQuota* | Set quota for specified path. See [HDFS Quotas Guide](./HdfsQuotaAdminGuide.html) for the quota detail. | | `-clrQuota` *path* | Clear quota of given mount point. See [HDFS Quotas Guide](./HdfsQuotaAdminGuide.html) for the quota detail. | | `-safemode` `enter` `leave` `get` | Manually set the Router entering or leaving safe mode. The option *get* will be used for verifying if the Router is in safe mode state. | From de719b08b555f4e030ea4543c40a71b4be7493c6 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 21 Feb 2019 17:23:46 -0800 Subject: [PATCH 0303/1308] HDFS-14307. RBF: Update tests to use internal Whitebox instead of Mockito. Contributed by CR Hota. --- .../federation/router/TestRouterRpcMultiDestination.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 3d941bbf4b907..306a45550660c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -25,8 +25,8 @@ import static org.mockito.Matchers.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; -import static org.mockito.internal.util.reflection.Whitebox.getInternalState; -import static org.mockito.internal.util.reflection.Whitebox.setInternalState; +import static org.apache.hadoop.test.Whitebox.getInternalState; +import static org.apache.hadoop.test.Whitebox.setInternalState; import java.io.IOException; import java.lang.reflect.Method; From c4b1fa91faf5ada6eea6b39d52e92c06816bd2c8 Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Tue, 26 Feb 2019 07:42:23 +0530 Subject: [PATCH 0304/1308] HDFS-14052. RBF: Use Router keytab for WebHDFS. Contributed by CR Hota. --- .../federation/router/RouterHttpServer.java | 4 +- .../router/web/RouterWebHDFSContract.java | 12 ++-- .../router/TestRouterWithSecureStartup.java | 69 +++++++++++++++++++ 3 files changed, 80 insertions(+), 5 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWithSecureStartup.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java index d6a51465038c9..300bc072e5c58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java @@ -88,7 +88,9 @@ protected void serviceStart() throws Exception { this.httpServer = builder.build(); - NameNodeHttpServer.initWebHdfs(conf, httpAddress.getHostName(), null, + String httpKeytab = conf.get(DFSUtil.getSpnegoKeytabKey(conf, + RBFConfigKeys.DFS_ROUTER_KEYTAB_FILE_KEY)); + NameNodeHttpServer.initWebHdfs(conf, httpAddress.getHostName(), httpKeytab, httpServer, RouterWebHdfsMethods.class.getPackage().getName()); this.httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, this.router); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java index 02e9f39a470d9..4e205df14a564 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java @@ -55,16 +55,20 @@ public RouterWebHDFSContract(Configuration conf) { } public static void createCluster() throws IOException { + createCluster(new HdfsConfiguration()); + } + + public static void createCluster(Configuration conf) throws IOException { try { - HdfsConfiguration conf = new HdfsConfiguration(); conf.addResource(CONTRACT_HDFS_XML); conf.addResource(CONTRACT_WEBHDFS_XML); - cluster = new MiniRouterDFSCluster(true, 2); + cluster = new MiniRouterDFSCluster(true, 2, conf); // Start NNs and DNs and wait until ready - cluster.startCluster(); + cluster.startCluster(conf); + cluster.addRouterOverrides(conf); // Start routers with only an RPC service cluster.startRouters(); @@ -85,7 +89,7 @@ public static void createCluster() throws IOException { cluster.waitActiveNamespaces(); } catch (Exception e) { cluster = null; - throw new IOException("Cannot start federated cluster", e); + throw new IOException(e.getCause()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWithSecureStartup.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWithSecureStartup.java new file mode 100644 index 0000000000000..7cc2c87796a36 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWithSecureStartup.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.router.web.RouterWebHDFSContract; +import org.junit.Rule; +import org.junit.Test; +import static org.junit.Assert.assertNotNull; +import org.junit.rules.ExpectedException; +import java.io.IOException; + +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_KEYTAB_FILE_KEY; + + +/** + * Test secure router start up scenarios. + */ +public class TestRouterWithSecureStartup { + + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + @Test + public void testStartupWithoutSpnegoPrincipal() throws Exception { + testCluster(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, + "Unable to initialize WebAppContext"); + } + + @Test + public void testStartupWithoutKeytab() throws Exception { + testCluster(DFS_ROUTER_KEYTAB_FILE_KEY, + "Running in secure mode, but config doesn't have a keytab"); + } + + @Test + public void testSuccessfulStartup() throws Exception { + Configuration conf = initSecurity(); + RouterWebHDFSContract.createCluster(conf); + assertNotNull(RouterWebHDFSContract.getCluster()); + } + + private void testCluster(String configToTest, String message) + throws Exception { + Configuration conf = initSecurity(); + conf.unset(configToTest); + exceptionRule.expect(IOException.class); + exceptionRule.expectMessage(message); + RouterWebHDFSContract.createCluster(conf); + } +} From 1c7ab59be35f06db4c37b40c3163f869b1b697b7 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Wed, 27 Feb 2019 18:34:42 -0800 Subject: [PATCH 0305/1308] HDFS-14322. RBF: Security manager should not load if security is disabled. Contributed by CR Hota. --- .../router/security/RouterSecurityManager.java | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java index 0f0089aab005a..dcfaa444d4de4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.SecretManager; @@ -49,7 +50,13 @@ public class RouterSecurityManager { dtSecretManager = null; public RouterSecurityManager(Configuration conf) { - this.dtSecretManager = newSecretManager(conf); + AuthenticationMethod authMethodConfigured = + SecurityUtil.getAuthenticationMethod(conf); + AuthenticationMethod authMethodToInit = + AuthenticationMethod.KERBEROS; + if (authMethodConfigured.equals(authMethodToInit)) { + this.dtSecretManager = newSecretManager(conf); + } } @VisibleForTesting @@ -78,7 +85,8 @@ public RouterSecurityManager(AbstractDelegationTokenSecretManager constructor.newInstance(conf); LOG.info("Delegation token secret manager object instantiated"); } catch (ReflectiveOperationException e) { - LOG.error("Could not instantiate: {}", clazz.getSimpleName(), e); + LOG.error("Could not instantiate: {}", clazz.getSimpleName(), + e.getCause()); return null; } catch (RuntimeException e) { LOG.error("RuntimeException to instantiate: {}", From 7bbe01a1960f1f4168b30545d448ff58ff557de4 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Sat, 2 Mar 2019 17:17:34 -0800 Subject: [PATCH 0306/1308] HDFS-14259. RBF: Fix safemode message for Router. Contributed by Ranith Sadar. --- .../server/federation/metrics/NamenodeBeanMetrics.java | 2 +- .../hdfs/server/federation/router/TestRouterAdminCLI.java | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java index b08d9608c01cd..9687af0236403 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java @@ -234,7 +234,7 @@ public long getProvidedCapacity() { @Override public String getSafemode() { try { - if (!getRouter().isRouterState(RouterServiceState.SAFEMODE)) { + if (getRouter().isRouterState(RouterServiceState.SAFEMODE)) { return "Safe mode is ON. " + this.getSafeModeTip(); } } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index 9f53dd4458d66..486d4a09b70ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -747,6 +747,9 @@ public void testSafeModeStatus() throws Exception { FederationMetrics metrics = router.getMetrics(); String jsonString = metrics.getRouterStatus(); + String result = router.getNamenodeMetrics().getSafemode(); + assertTrue("Wrong safe mode message: " + result, + result.startsWith("Safe mode is ON.")); // verify state using FederationMetrics assertEquals(RouterServiceState.SAFEMODE.toString(), jsonString); @@ -756,6 +759,9 @@ public void testSafeModeStatus() throws Exception { assertEquals(0, ToolRunner.run(admin, new String[] {"-safemode", "leave" })); jsonString = metrics.getRouterStatus(); + result = router.getNamenodeMetrics().getSafemode(); + assertEquals("Wrong safe mode message: " + result, "", result); + // verify state assertEquals(RouterServiceState.RUNNING.toString(), jsonString); assertFalse(routerContext.getRouter().getSafemodeService().isInSafeMode()); @@ -1153,4 +1159,4 @@ public void testGetDestination() throws Exception { argv = new String[] {"-getDestination /file1.txt /file2.txt"}; assertEquals(-1, ToolRunner.run(admin, argv)); } -} \ No newline at end of file +} From 1ce25e702b5086fc81f7fc23fcb87db167f3804d Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Sun, 3 Mar 2019 10:51:58 -0800 Subject: [PATCH 0307/1308] HDFS-14329. RBF: Add maintenance nodes to federation metrics. Contributed by Ayush Saxena. --- .../federation/metrics/FederationMBean.java | 18 ++++++++ .../federation/metrics/FederationMetrics.java | 18 ++++++++ .../metrics/NamenodeBeanMetrics.java | 18 ++++++++ .../resolver/MembershipNamenodeResolver.java | 6 +++ .../resolver/NamenodeStatusReport.java | 46 ++++++++++++++++++- .../router/NamenodeHeartbeatService.java | 5 +- .../store/records/MembershipStats.java | 12 +++++ .../impl/pb/MembershipStatsPBImpl.java | 33 +++++++++++++ .../src/main/proto/FederationProtocol.proto | 3 ++ .../metrics/TestFederationMetrics.java | 22 +++++++++ .../store/records/TestMembershipState.java | 9 ++++ 11 files changed, 188 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index b37f5efb94a81..8f24fcbbd1317 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -130,6 +130,24 @@ public interface FederationMBean { */ int getNumDecomDeadNodes(); + /** + * Get the number of live datanodes which are under maintenance. + * @return Number of live datanodes which are under maintenance. + */ + int getNumInMaintenanceLiveDataNodes(); + + /** + * Get the number of dead datanodes which are under maintenance. + * @return Number of dead datanodes which are under maintenance. + */ + int getNumInMaintenanceDeadDataNodes(); + + /** + * Get the number of datanodes which are entering maintenance. + * @return Number of datanodes which are entering maintenance. + */ + int getNumEnteringMaintenanceDataNodes(); + /** * Get Max, Median, Min and Standard Deviation of DataNodes usage. * @return the DataNode usage information, as a JSON string. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java index c66910cf97dc5..5ab978d7f2bda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java @@ -437,6 +437,24 @@ public int getNumDecomDeadNodes() { MembershipStats::getNumOfDecomDeadDatanodes); } + @Override + public int getNumInMaintenanceLiveDataNodes() { + return getNameserviceAggregatedInt( + MembershipStats::getNumOfInMaintenanceLiveDataNodes); + } + + @Override + public int getNumInMaintenanceDeadDataNodes() { + return getNameserviceAggregatedInt( + MembershipStats::getNumOfInMaintenanceDeadDataNodes); + } + + @Override + public int getNumEnteringMaintenanceDataNodes() { + return getNameserviceAggregatedInt( + MembershipStats::getNumOfEnteringMaintenanceDataNodes); + } + @Override // NameNodeMXBean public String getNodeUsage() { float median = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java index 9687af0236403..50ec175fa9aba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java @@ -697,16 +697,34 @@ public int getNumDecommissioningDataNodes() { @Override public int getNumInMaintenanceLiveDataNodes() { + try { + return getFederationMetrics().getNumInMaintenanceLiveDataNodes(); + } catch (IOException e) { + LOG.debug("Failed to get number of live in maintenance nodes", + e.getMessage()); + } return 0; } @Override public int getNumInMaintenanceDeadDataNodes() { + try { + return getFederationMetrics().getNumInMaintenanceDeadDataNodes(); + } catch (IOException e) { + LOG.debug("Failed to get number of dead in maintenance nodes", + e.getMessage()); + } return 0; } @Override public int getNumEnteringMaintenanceDataNodes() { + try { + return getFederationMetrics().getNumEnteringMaintenanceDataNodes(); + } catch (IOException e) { + LOG.debug("Failed to get number of entering maintenance nodes", + e.getMessage()); + } return 0; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java index 178db1b3dde55..2dd53d819c228 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java @@ -283,6 +283,12 @@ public boolean registerNamenode(NamenodeStatusReport report) stats.setNumOfStaleDatanodes(report.getNumStaleDatanodes()); stats.setNumOfDecomActiveDatanodes(report.getNumDecomLiveDatanodes()); stats.setNumOfDecomDeadDatanodes(report.getNumDecomDeadDatanodes()); + stats.setNumOfInMaintenanceLiveDataNodes( + report.getNumInMaintenanceLiveDataNodes()); + stats.setNumOfInMaintenanceDeadDataNodes( + report.getNumInMaintenanceDeadDataNodes()); + stats.setNumOfEnteringMaintenanceDataNodes( + report.getNumEnteringMaintenanceDataNodes()); record.setStats(stats); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java index 5b603facb0dcc..c82e3eb674629 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java @@ -50,6 +50,15 @@ public class NamenodeStatusReport { /** Dead decommissioned datanodes. */ private int deadDecomDatanodes = -1; + /** Live in maintenance datanodes. */ + private int inMaintenanceLiveDataNodes = -1; + + /** Dead in maintenance datanodes. */ + private int inMaintenanceDeadDataNodes = -1; + + /** Entering maintenance datanodes. */ + private int enteringMaintenanceDataNodes = -1; + /** Space stats. */ private long availableSpace = -1; private long numOfFiles = -1; @@ -228,15 +237,23 @@ public boolean getSafemode() { * @param numDecom Number of decommissioning nodes. * @param numLiveDecom Number of decommissioned live nodes. * @param numDeadDecom Number of decommissioned dead nodes. + * @param numInMaintenanceLive Number of in maintenance live nodes. + * @param numInMaintenanceDead Number of in maintenance dead nodes. + * @param numEnteringMaintenance Number of entering maintenance nodes. */ public void setDatanodeInfo(int numLive, int numDead, int numStale, - int numDecom, int numLiveDecom, int numDeadDecom) { + int numDecom, int numLiveDecom, int numDeadDecom, + int numInMaintenanceLive, int numInMaintenanceDead, + int numEnteringMaintenance) { this.liveDatanodes = numLive; this.deadDatanodes = numDead; this.staleDatanodes = numStale; this.decomDatanodes = numDecom; this.liveDecomDatanodes = numLiveDecom; this.deadDecomDatanodes = numDeadDecom; + this.inMaintenanceLiveDataNodes = numInMaintenanceLive; + this.inMaintenanceDeadDataNodes = numInMaintenanceDead; + this.enteringMaintenanceDataNodes = numEnteringMaintenance; this.statsValid = true; } @@ -294,6 +311,33 @@ public int getNumDecomDeadDatanodes() { return this.deadDecomDatanodes; } + /** + * Get the number of live in maintenance nodes. + * + * @return The number of live in maintenance nodes. + */ + public int getNumInMaintenanceLiveDataNodes() { + return this.inMaintenanceLiveDataNodes; + } + + /** + * Get the number of dead in maintenance nodes. + * + * @return The number of dead in maintenance nodes. + */ + public int getNumInMaintenanceDeadDataNodes() { + return this.inMaintenanceDeadDataNodes; + } + + /** + * Get the number of entering maintenance nodes. + * + * @return The number of entering maintenance nodes. + */ + public int getNumEnteringMaintenanceDataNodes() { + return this.enteringMaintenanceDataNodes; + } + /** * Set the filesystem information. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java index 475e90d7981e5..82b5609a8667f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java @@ -341,7 +341,10 @@ private void updateJMXParameters( jsonObject.getInt("NumStaleDataNodes"), jsonObject.getInt("NumDecommissioningDataNodes"), jsonObject.getInt("NumDecomLiveDataNodes"), - jsonObject.getInt("NumDecomDeadDataNodes")); + jsonObject.getInt("NumDecomDeadDataNodes"), + jsonObject.getInt("NumInMaintenanceLiveDataNodes"), + jsonObject.getInt("NumInMaintenanceDeadDataNodes"), + jsonObject.getInt("NumEnteringMaintenanceDataNodes")); } else if (name.equals( "Hadoop:service=NameNode,name=FSNamesystem")) { report.setNamesystemInfo( diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipStats.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipStats.java index d452cd2c4022f..95c790cc95c59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipStats.java @@ -97,6 +97,18 @@ public static MembershipStats newInstance() throws IOException { public abstract int getNumOfDecomDeadDatanodes(); + public abstract void setNumOfInMaintenanceLiveDataNodes(int nodes); + + public abstract int getNumOfInMaintenanceLiveDataNodes(); + + public abstract void setNumOfInMaintenanceDeadDataNodes(int nodes); + + public abstract int getNumOfInMaintenanceDeadDataNodes(); + + public abstract void setNumOfEnteringMaintenanceDataNodes(int nodes); + + public abstract int getNumOfEnteringMaintenanceDataNodes(); + @Override public SortedMap getPrimaryKeys() { // This record is not stored directly, no key needed diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MembershipStatsPBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MembershipStatsPBImpl.java index 50ecbf3d48a67..9a8a2bbb667fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MembershipStatsPBImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MembershipStatsPBImpl.java @@ -208,4 +208,37 @@ public void setNumOfDecomDeadDatanodes(int nodes) { public int getNumOfDecomDeadDatanodes() { return this.translator.getProtoOrBuilder().getNumOfDecomDeadDatanodes(); } + + @Override + public void setNumOfInMaintenanceLiveDataNodes(int nodes) { + this.translator.getBuilder().setNumOfInMaintenanceLiveDataNodes(nodes); + } + + @Override + public int getNumOfInMaintenanceLiveDataNodes() { + return this.translator.getProtoOrBuilder() + .getNumOfInMaintenanceLiveDataNodes(); + } + + @Override + public void setNumOfInMaintenanceDeadDataNodes(int nodes) { + this.translator.getBuilder().setNumOfInMaintenanceDeadDataNodes(nodes); + } + + @Override + public int getNumOfInMaintenanceDeadDataNodes() { + return this.translator.getProtoOrBuilder() + .getNumOfInMaintenanceDeadDataNodes(); + } + + @Override + public void setNumOfEnteringMaintenanceDataNodes(int nodes) { + this.translator.getBuilder().setNumOfEnteringMaintenanceDataNodes(nodes); + } + + @Override + public int getNumOfEnteringMaintenanceDataNodes() { + return this.translator.getProtoOrBuilder() + .getNumOfEnteringMaintenanceDataNodes(); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto index 9e9fd4899c261..a55be731a7462 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto @@ -46,6 +46,9 @@ message NamenodeMembershipStatsRecordProto { optional uint32 numOfDecomActiveDatanodes = 23; optional uint32 numOfDecomDeadDatanodes = 24; optional uint32 numOfStaleDatanodes = 25; + optional uint32 numOfInMaintenanceLiveDataNodes = 26; + optional uint32 numOfInMaintenanceDeadDataNodes = 27; + optional uint32 numOfEnteringMaintenanceDataNodes = 28; } message NamenodeMembershipRecordProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java index 5d984e8645a30..2c147ebf1d1ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java @@ -145,6 +145,12 @@ public void testNamenodeStatsDataSource() throws IOException, JSONException { stats.getNumOfDecomActiveDatanodes()); assertEquals(json.getLong("numOfDecomDeadDatanodes"), stats.getNumOfDecomDeadDatanodes()); + assertEquals(json.getLong("numOfInMaintenanceLiveDataNodes"), + stats.getNumOfInMaintenanceLiveDataNodes()); + assertEquals(json.getLong("numOfInMaintenanceDeadDataNodes"), + stats.getNumOfInMaintenanceDeadDataNodes()); + assertEquals(json.getLong("numOfEnteringMaintenanceDataNodes"), + stats.getNumOfEnteringMaintenanceDataNodes()); assertEquals(json.getLong("numOfBlocks"), stats.getNumOfBlocks()); assertEquals(json.getString("rpcAddress"), mockEntry.getRpcAddress()); assertEquals(json.getString("webAddress"), mockEntry.getWebAddress()); @@ -197,6 +203,12 @@ public void testNameserviceStatsDataSource() json.getLong("numOfDecomActiveDatanodes")); assertEquals(stats.getNumOfDecomDeadDatanodes(), json.getLong("numOfDecomDeadDatanodes")); + assertEquals(stats.getNumOfInMaintenanceLiveDataNodes(), + json.getLong("numOfInMaintenanceLiveDataNodes")); + assertEquals(stats.getNumOfInMaintenanceDeadDataNodes(), + json.getLong("numOfInMaintenanceDeadDataNodes")); + assertEquals(stats.getNumOfStaleDatanodes(), + json.getLong("numOfEnteringMaintenanceDataNodes")); assertEquals(stats.getProvidedSpace(), json.getLong("providedSpace")); nameservicesFound++; @@ -268,6 +280,9 @@ private void validateClusterStatsBean(FederationMBean bean) long numDecom = 0; long numDecomLive = 0; long numDecomDead = 0; + long numInMaintenanceLive = 0; + long numInMaintenanceDead = 0; + long numEnteringMaintenance = 0; long numFiles = 0; for (MembershipState mock : getActiveMemberships()) { MembershipStats stats = mock.getStats(); @@ -278,6 +293,9 @@ private void validateClusterStatsBean(FederationMBean bean) numDecom += stats.getNumOfDecommissioningDatanodes(); numDecomLive += stats.getNumOfDecomActiveDatanodes(); numDecomDead += stats.getNumOfDecomDeadDatanodes(); + numInMaintenanceLive += stats.getNumOfInMaintenanceLiveDataNodes(); + numInMaintenanceDead += stats.getNumOfInMaintenanceLiveDataNodes(); + numEnteringMaintenance += stats.getNumOfEnteringMaintenanceDataNodes(); } assertEquals(numBlocks, bean.getNumBlocks()); @@ -287,6 +305,10 @@ private void validateClusterStatsBean(FederationMBean bean) assertEquals(numDecom, bean.getNumDecommissioningNodes()); assertEquals(numDecomLive, bean.getNumDecomLiveNodes()); assertEquals(numDecomDead, bean.getNumDecomDeadNodes()); + assertEquals(numInMaintenanceLive, bean.getNumInMaintenanceLiveDataNodes()); + assertEquals(numInMaintenanceDead, bean.getNumInMaintenanceDeadDataNodes()); + assertEquals(numEnteringMaintenance, + bean.getNumEnteringMaintenanceDataNodes()); assertEquals(numFiles, bean.getNumFiles()); assertEquals(getActiveMemberships().size() + getStandbyMemberships().size(), bean.getNumNamenodes()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java index 1aac632784f2d..df41f461e6ded 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java @@ -52,6 +52,9 @@ public class TestMembershipState { private static final int NUM_DECOM = 700; private static final int NUM_DECOM_ACTIVE = 800; private static final int NUM_DECOM_DEAD = 900; + private static final int NUM_MAIN_LIVE = 151; + private static final int NUM_MAIN_DEAD = 303; + private static final int NUM_ENTER_MAIN = 144; private static final long NUM_BLOCK_MISSING = 1000; private static final long TOTAL_SPACE = 1100; @@ -78,6 +81,9 @@ private MembershipState createRecord() throws IOException { stats.setNumOfDecommissioningDatanodes(NUM_DECOM); stats.setNumOfDecomActiveDatanodes(NUM_DECOM_ACTIVE); stats.setNumOfDecomDeadDatanodes(NUM_DECOM_DEAD); + stats.setNumOfInMaintenanceLiveDataNodes(NUM_MAIN_LIVE); + stats.setNumOfInMaintenanceDeadDataNodes(NUM_MAIN_DEAD); + stats.setNumOfEnteringMaintenanceDataNodes(NUM_ENTER_MAIN); stats.setNumOfBlocksMissing(NUM_BLOCK_MISSING); stats.setTotalSpace(TOTAL_SPACE); stats.setAvailableSpace(AVAILABLE_SPACE); @@ -107,6 +113,9 @@ private void validateRecord(MembershipState record) throws IOException { assertEquals(NUM_DECOM, stats.getNumOfDecommissioningDatanodes()); assertEquals(NUM_DECOM_ACTIVE, stats.getNumOfDecomActiveDatanodes()); assertEquals(NUM_DECOM_DEAD, stats.getNumOfDecomDeadDatanodes()); + assertEquals(NUM_MAIN_LIVE, stats.getNumOfInMaintenanceLiveDataNodes()); + assertEquals(NUM_MAIN_DEAD, stats.getNumOfInMaintenanceDeadDataNodes()); + assertEquals(NUM_ENTER_MAIN, stats.getNumOfEnteringMaintenanceDataNodes()); assertEquals(TOTAL_SPACE, stats.getTotalSpace()); assertEquals(AVAILABLE_SPACE, stats.getAvailableSpace()); } From 6cdf8db55ca42ab11cfb21081934e20976f91f12 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 4 Mar 2019 13:57:48 -0800 Subject: [PATCH 0308/1308] HDFS-14331. RBF: IOE While Removing Mount Entry. Contributed by Ayush Saxena. --- .../server/federation/router/RouterAdminServer.java | 11 +++++++++-- .../server/federation/router/TestRouterQuota.java | 7 +++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index a2a5a4239f735..97d4d162add58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -303,8 +303,15 @@ private void synchronizeQuota(String path, long nsQuota, long ssQuota) public RemoveMountTableEntryResponse removeMountTableEntry( RemoveMountTableEntryRequest request) throws IOException { // clear sub-cluster's quota definition - synchronizeQuota(request.getSrcPath(), HdfsConstants.QUOTA_RESET, - HdfsConstants.QUOTA_RESET); + try { + synchronizeQuota(request.getSrcPath(), HdfsConstants.QUOTA_RESET, + HdfsConstants.QUOTA_RESET); + } catch (Exception e) { + // Ignore exception, if any while reseting quota. Specifically to handle + // if the actual destination doesn't exist. + LOG.warn("Unable to clear quota at the destinations for {}: {}", + request.getSrcPath(), e.getMessage()); + } return getMountTableStore().removeMountTableEntry(request); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java index 034023c47aaf6..abcbe8fdbdc82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; @@ -754,6 +755,12 @@ public void testClearQuotaDefAfterRemovingMountTable() throws Exception { assertNull(routerQuota); assertEquals(HdfsConstants.QUOTA_RESET, subClusterQuota.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, subClusterQuota.getSpaceQuota()); + + // Verify removing mount entry with actual destinations not present. + mountTable = MountTable.newInstance("/mount", + Collections.singletonMap("ns0", "/testdir16")); + addMountTable(mountTable); + assertTrue(removeMountTable("/mount")); } @Test From fcabc8f0e4097cce934308fdd28cd3bcdbb66877 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 4 Mar 2019 15:16:29 -0800 Subject: [PATCH 0309/1308] HDFS-14335. RBF: Fix heartbeat typos in the Router. Contributed by CR Hota. --- .../hdfs/server/federation/router/Router.java | 18 +++++++++--------- .../router/TestRouterClientRejectOverload.java | 2 +- .../router/TestRouterNamenodeMonitoring.java | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 0257162d1391b..7d112f90de693 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -208,9 +208,9 @@ protected void serviceInit(Configuration configuration) throws Exception { // Create status updater for each monitored Namenode this.namenodeHeartbeatServices = createNamenodeHeartbeatServices(); - for (NamenodeHeartbeatService hearbeatService : + for (NamenodeHeartbeatService heartbeatService : this.namenodeHeartbeatServices) { - addService(hearbeatService); + addService(heartbeatService); } if (this.namenodeHeartbeatServices.isEmpty()) { @@ -487,9 +487,9 @@ public InetSocketAddress getHttpServerAddress() { if (conf.getBoolean( RBFConfigKeys.DFS_ROUTER_MONITOR_LOCAL_NAMENODE, RBFConfigKeys.DFS_ROUTER_MONITOR_LOCAL_NAMENODE_DEFAULT)) { - // Create a local heartbet service + // Create a local heartbeat service NamenodeHeartbeatService localHeartbeatService = - createLocalNamenodeHearbeatService(); + createLocalNamenodeHeartbeatService(); if (localHeartbeatService != null) { String nnDesc = localHeartbeatService.getNamenodeDesc(); ret.put(nnDesc, localHeartbeatService); @@ -514,7 +514,7 @@ public InetSocketAddress getHttpServerAddress() { } if (nsId != null) { NamenodeHeartbeatService heartbeatService = - createNamenodeHearbeatService(nsId, nnId); + createNamenodeHeartbeatService(nsId, nnId); if (heartbeatService != null) { ret.put(heartbeatService.getNamenodeDesc(), heartbeatService); } @@ -530,7 +530,7 @@ public InetSocketAddress getHttpServerAddress() { * * @return Updater of the status for the local Namenode. */ - protected NamenodeHeartbeatService createLocalNamenodeHearbeatService() { + protected NamenodeHeartbeatService createLocalNamenodeHeartbeatService() { // Detect NN running in this machine String nsId = DFSUtil.getNamenodeNameServiceId(conf); String nnId = null; @@ -541,7 +541,7 @@ protected NamenodeHeartbeatService createLocalNamenodeHearbeatService() { } } - return createNamenodeHearbeatService(nsId, nnId); + return createNamenodeHeartbeatService(nsId, nnId); } /** @@ -551,7 +551,7 @@ protected NamenodeHeartbeatService createLocalNamenodeHearbeatService() { * @param nnId Identifier of the namenode (HA) to monitor. * @return Updater of the status for the specified Namenode. */ - protected NamenodeHeartbeatService createNamenodeHearbeatService( + protected NamenodeHeartbeatService createNamenodeHeartbeatService( String nsId, String nnId) { LOG.info("Creating heartbeat service for Namenode {} in {}", nnId, nsId); @@ -739,7 +739,7 @@ RouterQuotaUpdateService getQuotaCacheUpdateService() { * Get the list of namenode heartbeat service. */ @VisibleForTesting - Collection getNamenodeHearbeatServices() { + Collection getNamenodeHeartbeatServices() { return this.namenodeHeartbeatServices; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java index 14bd7b0a400d4..a4611f22e842c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java @@ -341,7 +341,7 @@ public void testNoNamenodesAvailable() throws Exception{ for (RouterContext routerContext : cluster.getRouters()) { // Manually trigger the heartbeat Collection heartbeatServices = routerContext - .getRouter().getNamenodeHearbeatServices(); + .getRouter().getNamenodeHeartbeatServices(); for (NamenodeHeartbeatService service : heartbeatServices) { service.periodicInvoke(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java index 75c267643f9a4..0bea11c017e68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java @@ -113,7 +113,7 @@ public void testNamenodeMonitoring() throws Exception { } Collection heartbeatServices = routerContext - .getRouter().getNamenodeHearbeatServices(); + .getRouter().getNamenodeHeartbeatServices(); // manually trigger the heartbeat for (NamenodeHeartbeatService service : heartbeatServices) { service.periodicInvoke(); From 2a2d5eb4418af1185011adf15d4391103d35c671 Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Tue, 5 Mar 2019 11:01:11 -0800 Subject: [PATCH 0310/1308] HDFS-14334. RBF: Use human readable format for long numbers in the Router UI. Contributed by Inigo Goiri. --- .../main/webapps/router/federationhealth.html | 16 ++++++++-------- .../src/main/webapps/static/dfs-dust.js | 16 ++++++++++++++++ 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html index 0f089fe3c2e60..c591698e4b669 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html @@ -177,10 +177,10 @@ - {numOfFiles} - {numOfBlocks} - {numOfBlocksMissing} - {numOfBlocksUnderReplicated} + {numOfFiles|fmt_human_number} + {numOfBlocks|fmt_human_number} + {numOfBlocksMissing|fmt_human_number} + {numOfBlocksUnderReplicated|fmt_human_number} {numOfActiveDatanodes} {numOfDeadDatanodes} {numOfDecommissioningDatanodes} @@ -244,10 +244,10 @@ - {numOfFiles} - {numOfBlocks} - {numOfBlocksMissing} - {numOfBlocksUnderReplicated} + {numOfFiles|fmt_human_number} + {numOfBlocks|fmt_human_number} + {numOfBlocksMissing|fmt_human_number} + {numOfBlocksUnderReplicated|fmt_human_number} {numOfActiveDatanodes} {numOfDeadDatanodes} {numOfDecommissioningDatanodes} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js index 316a9947ca90d..7772d72aef846 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js @@ -96,6 +96,22 @@ 'fmt_number': function (v) { return v.toLocaleString(); + }, + + 'fmt_human_number': function (v) { + var UNITS = ['', 'K', 'M']; + var prev = 0, i = 0; + while (Math.floor(v) > 0 && i < UNITS.length) { + prev = v; + v /= 1000; + i += 1; + } + + if (i > 0) { + v = prev; + i -= 1; + } + return Math.round(v * 100) / 100 + UNITS[i]; } }; $.extend(dust.filters, filters); From f539e2a4ee93c4ee479fe25e8062c8ab4c7f8ba8 Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Fri, 8 Mar 2019 11:35:40 -0800 Subject: [PATCH 0311/1308] HDFS-14343. RBF: Fix renaming folders spread across multiple subclusters. Contributed by Ayush Saxena. --- .../router/RouterClientProtocol.java | 47 +++++++- ...MultipleDestinationMountTableResolver.java | 112 ++++++++++++++++++ 2 files changed, 156 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 757e096960696..da601425ca152 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -88,6 +89,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.annotations.VisibleForTesting; + import java.io.FileNotFoundException; import java.io.IOException; import java.util.Collection; @@ -466,8 +469,12 @@ public boolean rename(final String src, final String dst) RemoteMethod method = new RemoteMethod("rename", new Class[] {String.class, String.class}, new RemoteParam(), dstParam); - return rpcClient.invokeSequential(locs, method, Boolean.class, - Boolean.TRUE); + if (isMultiDestDirectory(src)) { + return rpcClient.invokeAll(locs, method); + } else { + return rpcClient.invokeSequential(locs, method, Boolean.class, + Boolean.TRUE); + } } @Override @@ -488,7 +495,11 @@ public void rename2(final String src, final String dst, RemoteMethod method = new RemoteMethod("rename2", new Class[] {String.class, String.class, options.getClass()}, new RemoteParam(), dstParam, options); - rpcClient.invokeSequential(locs, method, null, null); + if (isMultiDestDirectory(src)) { + rpcClient.invokeConcurrent(locs, method); + } else { + rpcClient.invokeSequential(locs, method, null, null); + } } @Override @@ -1857,4 +1868,34 @@ private long getModifiedTime(Map ret, String path, } return modTime; } + + /** + * Checks if the path is a directory and is supposed to be present in all + * subclusters. + * @param src the source path + * @return true if the path is directory and is supposed to be present in all + * subclusters else false in all other scenarios. + * @throws IOException if unable to get the file status. + */ + @VisibleForTesting + boolean isMultiDestDirectory(String src) throws IOException { + try { + if (rpcServer.isPathAll(src)) { + List locations; + locations = rpcServer.getLocationsForPath(src, false); + RemoteMethod method = new RemoteMethod("getFileInfo", + new Class[] {String.class}, new RemoteParam()); + HdfsFileStatus fileStatus = rpcClient.invokeSequential(locations, + method, HdfsFileStatus.class, null); + if (fileStatus != null) { + return fileStatus.isDirectory(); + } else { + LOG.debug("The destination {} doesn't exist.", src); + } + } + } catch (UnresolvedPathException e) { + LOG.debug("The destination {} is a symlink.", src); + } + return false; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java index 46bfff99da03f..2cd11f080be65 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java @@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -427,6 +428,117 @@ public void testGetDestinationRandom() throws Exception { null, null, Arrays.asList("ns0", "ns1")); } + @Test + public void testIsMultiDestDir() throws Exception { + RouterClientProtocol client = + routerContext.getRouter().getRpcServer().getClientProtocolModule(); + setupOrderMountPath(DestinationOrder.HASH_ALL); + // Should be true only for directory and false for all other cases. + assertTrue(client.isMultiDestDirectory("/mount/dir")); + assertFalse(client.isMultiDestDirectory("/mount/nodir")); + assertFalse(client.isMultiDestDirectory("/mount/dir/file")); + routerFs.createSymlink(new Path("/mount/dir/file"), + new Path("/mount/dir/link"), true); + assertFalse(client.isMultiDestDirectory("/mount/dir/link")); + routerFs.createSymlink(new Path("/mount/dir/dir"), + new Path("/mount/dir/linkDir"), true); + assertFalse(client.isMultiDestDirectory("/mount/dir/linkDir")); + resetTestEnvironment(); + // Test single directory destination. Should be false for the directory. + setupOrderMountPath(DestinationOrder.HASH); + assertFalse(client.isMultiDestDirectory("/mount/dir")); + } + + @Test + public void testRenameMultipleDestDirectories() throws Exception { + // Test renaming directories using rename API. + verifyRenameOnMultiDestDirectories(DestinationOrder.HASH_ALL, false); + resetTestEnvironment(); + verifyRenameOnMultiDestDirectories(DestinationOrder.RANDOM, false); + resetTestEnvironment(); + verifyRenameOnMultiDestDirectories(DestinationOrder.SPACE, false); + resetTestEnvironment(); + // Test renaming directories using rename2 API. + verifyRenameOnMultiDestDirectories(DestinationOrder.HASH_ALL, true); + resetTestEnvironment(); + verifyRenameOnMultiDestDirectories(DestinationOrder.RANDOM, true); + resetTestEnvironment(); + verifyRenameOnMultiDestDirectories(DestinationOrder.SPACE, true); + } + + /** + * Test to verify rename operation on directories in case of multiple + * destinations. + * @param order order to be followed by the mount entry. + * @param isRename2 true if the verification is to be done using rename2(..) + * method. + * @throws Exception on account of any exception during test execution. + */ + private void verifyRenameOnMultiDestDirectories(DestinationOrder order, + boolean isRename2) throws Exception { + setupOrderMountPath(order); + Path src = new Path("/mount/dir/dir"); + Path nnSrc = new Path("/tmp/dir/dir"); + Path dst = new Path("/mount/dir/subdir"); + Path nnDst = new Path("/tmp/dir/subdir"); + Path fileSrc = new Path("/mount/dir/dir/file"); + Path nnFileSrc = new Path("/tmp/dir/dir/file"); + Path fileDst = new Path("/mount/dir/subdir/file"); + Path nnFileDst = new Path("/tmp/dir/subdir/file"); + DFSTestUtil.createFile(routerFs, fileSrc, 100L, (short) 1, 1024L); + if (isRename2) { + routerFs.rename(src, dst, Rename.NONE); + } else { + assertTrue(routerFs.rename(src, dst)); + } + assertTrue(nnFs0.exists(nnDst)); + assertTrue(nnFs1.exists(nnDst)); + assertFalse(nnFs0.exists(nnSrc)); + assertFalse(nnFs1.exists(nnSrc)); + assertFalse(routerFs.exists(fileSrc)); + assertTrue(routerFs.exists(fileDst)); + assertTrue(nnFs0.exists(nnFileDst) || nnFs1.exists(nnFileDst)); + assertFalse(nnFs0.exists(nnFileSrc) || nnFs1.exists(nnFileSrc)); + + // Verify rename file. + Path fileRenamed = new Path("/mount/dir/subdir/renamedFile"); + Path nnFileRenamed = new Path("/tmp/dir/subdir/renamedFile"); + if (isRename2) { + routerFs.rename(fileDst, fileRenamed, Rename.NONE); + } else { + assertTrue(routerFs.rename(fileDst, fileRenamed)); + } + assertTrue(routerFs.exists(fileRenamed)); + assertFalse(routerFs.exists(fileDst)); + assertTrue(nnFs0.exists(nnFileRenamed) || nnFs1.exists(nnFileRenamed)); + assertFalse(nnFs0.exists(nnFileDst) || nnFs1.exists(nnFileDst)); + + // Verify rename when one source directory is not present. + Path dst1 = new Path("/mount/dir/renameddir"); + Path nnDst1 = new Path("/tmp/dir/renameddir"); + nnFs1.delete(nnDst, true); + if (isRename2) { + routerFs.rename(dst, dst1, Rename.NONE); + } else { + assertTrue(routerFs.rename(dst, dst1)); + } + assertTrue(nnFs0.exists(nnDst1)); + assertFalse(nnFs0.exists(nnDst)); + + // Verify rename when one destination directory is already present. + Path src1 = new Path("/mount/dir"); + Path dst2 = new Path("/mount/OneDest"); + Path nnDst2 = new Path("/tmp/OneDest"); + nnFs0.mkdirs(nnDst2); + if (isRename2) { + routerFs.rename(src1, dst2, Rename.NONE); + } else { + assertTrue(routerFs.rename(src1, dst2)); + } + assertTrue(nnFs0.exists(nnDst2)); + assertTrue(nnFs1.exists(nnDst2)); + } + /** * Generic test for getting the destination subcluster. * @param order DestinationOrder of the mount point. From 9a9fbbe145432136d85d2d2e133364c7e79e65e1 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Wed, 20 Mar 2019 11:12:49 +0530 Subject: [PATCH 0312/1308] HDFS-14351. RBF: Optimize configuration item resolving for monitor namenode. Contributed by He Xiaoqiao and Inigo Goiri. --- .../hdfs/server/federation/router/Router.java | 38 ++- .../hdfs/server/federation/MockNamenode.java | 225 ++++++++++++++ .../router/TestRouterNamenodeMonitoring.java | 278 +++++++++++++----- 3 files changed, 444 insertions(+), 97 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 7d112f90de693..9e18ebfb4d80d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -497,27 +497,25 @@ public InetSocketAddress getHttpServerAddress() { } // Create heartbeat services for a list specified by the admin - String namenodes = this.conf.get( + Collection namenodes = this.conf.getTrimmedStringCollection( RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE); - if (namenodes != null) { - for (String namenode : namenodes.split(",")) { - String[] namenodeSplit = namenode.split("\\."); - String nsId = null; - String nnId = null; - if (namenodeSplit.length == 2) { - nsId = namenodeSplit[0]; - nnId = namenodeSplit[1]; - } else if (namenodeSplit.length == 1) { - nsId = namenode; - } else { - LOG.error("Wrong Namenode to monitor: {}", namenode); - } - if (nsId != null) { - NamenodeHeartbeatService heartbeatService = - createNamenodeHeartbeatService(nsId, nnId); - if (heartbeatService != null) { - ret.put(heartbeatService.getNamenodeDesc(), heartbeatService); - } + for (String namenode : namenodes) { + String[] namenodeSplit = namenode.split("\\."); + String nsId = null; + String nnId = null; + if (namenodeSplit.length == 2) { + nsId = namenodeSplit[0]; + nnId = namenodeSplit[1]; + } else if (namenodeSplit.length == 1) { + nsId = namenode; + } else { + LOG.error("Wrong Namenode to monitor: {}", namenode); + } + if (nsId != null) { + NamenodeHeartbeatService heartbeatService = + createNamenodeHeartbeatService(nsId, nnId); + if (heartbeatService != null) { + ret.put(heartbeatService.getNamenodeDesc(), heartbeatService); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java new file mode 100644 index 0000000000000..9b58fff085c9e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java @@ -0,0 +1,225 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.net.URI; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.ha.HAServiceStatus; +import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService; +import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB; +import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; +import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService; +import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService; +import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.http.HttpServer2; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.RPC.Server; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import com.google.protobuf.BlockingService; + + +/** + * Mock for the network interfaces (e.g., RPC and HTTP) of a Namenode. This is + * used by the Routers in a mock cluster. + */ +public class MockNamenode { + + /** Mock implementation of the Namenode. */ + private final NamenodeProtocols mockNn; + + /** HA state of the Namenode. */ + private HAServiceState haState = HAServiceState.STANDBY; + + /** RPC server of the Namenode that redirects calls to the mock. */ + private Server rpcServer; + /** HTTP server of the Namenode that redirects calls to the mock. */ + private HttpServer2 httpServer; + + + public MockNamenode() throws Exception { + Configuration conf = new Configuration(); + + this.mockNn = mock(NamenodeProtocols.class); + setupMock(); + setupRPCServer(conf); + setupHTTPServer(conf); + } + + /** + * Setup the mock of the Namenode. It offers the basic functionality for + * Routers to get the status. + * @throws IOException If the mock cannot be setup. + */ + protected void setupMock() throws IOException { + NamespaceInfo nsInfo = new NamespaceInfo(1, "clusterId", "bpId", 1); + when(mockNn.versionRequest()).thenReturn(nsInfo); + + when(mockNn.getServiceStatus()).thenAnswer(new Answer() { + @Override + public HAServiceStatus answer(InvocationOnMock invocation) + throws Throwable { + HAServiceStatus haStatus = new HAServiceStatus(getHAServiceState()); + haStatus.setNotReadyToBecomeActive(""); + return haStatus; + } + }); + } + + /** + * Setup the RPC server of the Namenode that redirects calls to the mock. + * @param conf Configuration of the server. + * @throws IOException If the RPC server cannot be setup. + */ + private void setupRPCServer(final Configuration conf) throws IOException { + RPC.setProtocolEngine( + conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class); + ClientNamenodeProtocolServerSideTranslatorPB + clientNNProtoXlator = + new ClientNamenodeProtocolServerSideTranslatorPB(mockNn); + BlockingService clientNNPbService = + ClientNamenodeProtocol.newReflectiveBlockingService( + clientNNProtoXlator); + + rpcServer = new RPC.Builder(conf) + .setProtocol(ClientNamenodeProtocolPB.class) + .setInstance(clientNNPbService) + .setBindAddress("0.0.0.0") + .setPort(0) + .build(); + + NamenodeProtocolServerSideTranslatorPB nnProtoXlator = + new NamenodeProtocolServerSideTranslatorPB(mockNn); + BlockingService nnProtoPbService = + NamenodeProtocolService.newReflectiveBlockingService( + nnProtoXlator); + DFSUtil.addPBProtocol( + conf, NamenodeProtocolPB.class, nnProtoPbService, rpcServer); + + DatanodeProtocolServerSideTranslatorPB dnProtoPbXlator = + new DatanodeProtocolServerSideTranslatorPB(mockNn, 1000); + BlockingService dnProtoPbService = + DatanodeProtocolService.newReflectiveBlockingService( + dnProtoPbXlator); + DFSUtil.addPBProtocol( + conf, DatanodeProtocolPB.class, dnProtoPbService, rpcServer); + + HAServiceProtocolServerSideTranslatorPB haServiceProtoXlator = + new HAServiceProtocolServerSideTranslatorPB(mockNn); + BlockingService haProtoPbService = + HAServiceProtocolService.newReflectiveBlockingService( + haServiceProtoXlator); + DFSUtil.addPBProtocol( + conf, HAServiceProtocolPB.class, haProtoPbService, rpcServer); + + rpcServer.start(); + } + + /** + * Setup the HTTP server of the Namenode that redirects calls to the mock. + * @param conf Configuration of the server. + * @throws IOException If the HTTP server cannot be setup. + */ + private void setupHTTPServer(Configuration conf) throws IOException { + HttpServer2.Builder builder = new HttpServer2.Builder() + .setName("hdfs") + .setConf(conf) + .setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))) + .addEndpoint(URI.create("http://0.0.0.0:0")); + httpServer = builder.build(); + httpServer.start(); + } + + /** + * Get the RPC port for the Mock Namenode. + * @return RPC port. + */ + public int getRPCPort() { + return rpcServer.getListenerAddress().getPort(); + } + + /** + * Get the HTTP port for the Mock Namenode. + * @return HTTP port. + */ + public int getHTTPPort() { + return httpServer.getConnectorAddress(0).getPort(); + } + + /** + * Get the Mock core. This is used to extend the mock. + * @return Mock Namenode protocol to be extended. + */ + public NamenodeProtocols getMock() { + return mockNn; + } + + /** + * Get the HA state of the Mock Namenode. + * @return HA state (ACTIVE or STANDBY). + */ + public HAServiceState getHAServiceState() { + return haState; + } + + /** + * Show the Mock Namenode as Active. + */ + public void transitionToActive() { + this.haState = HAServiceState.ACTIVE; + } + + /** + * Show the Mock Namenode as Standby. + */ + public void transitionToStandby() { + this.haState = HAServiceState.STANDBY; + } + + /** + * Stop the Mock Namenode. It stops all the servers. + * @throws Exception If it cannot stop the Namenode. + */ + public void stop() throws Exception { + if (rpcServer != null) { + rpcServer.stop(); + } + if (httpServer != null) { + httpServer.stop(); + } + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java index 0bea11c017e68..1224fa2ddcd66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java @@ -17,127 +17,251 @@ */ package org.apache.hadoop.hdfs.server.federation.router; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; -import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE; -import static org.junit.Assert.assertEquals; +import static java.util.Arrays.asList; +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration; import static org.junit.Assert.assertTrue; +import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.server.federation.MockNamenode; import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; -import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext; -import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; -import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; +import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Test namenodes monitor behavior in the Router. */ public class TestRouterNamenodeMonitoring { - private static StateStoreDFSCluster cluster; - private static RouterContext routerContext; - private static MembershipNamenodeResolver resolver; + private static final Logger LOG = + LoggerFactory.getLogger(TestRouterNamenodeMonitoring.class); - private String ns0; - private String ns1; - private long initializedTime; - @Before - public void setUp() throws Exception { - // Build and start a federated cluster with HA enabled - cluster = new StateStoreDFSCluster(true, 2); - // Enable heartbeat service and local heartbeat - Configuration routerConf = new RouterConfigBuilder() - .stateStore() - .admin() - .rpc() - .enableLocalHeartbeat(true) - .heartbeat() - .build(); + /** Router for the test. */ + private Router router; + /** Namenodes in the cluster. */ + private Map> nns = new HashMap<>(); + /** Nameservices in the federated cluster. */ + private List nsIds = asList("ns0", "ns1"); - // Specify local node (ns0.nn1) to monitor - StringBuilder sb = new StringBuilder(); - ns0 = cluster.getNameservices().get(0); - NamenodeContext context = cluster.getNamenodes(ns0).get(1); - routerConf.set(DFS_NAMESERVICE_ID, ns0); - routerConf.set(DFS_HA_NAMENODE_ID_KEY, context.getNamenodeId()); + /** Time the test starts. */ + private long initializedTime; - // Specify namenodes (ns1.nn0,ns1.nn1) to monitor - sb = new StringBuilder(); - ns1 = cluster.getNameservices().get(1); - for (NamenodeContext ctx : cluster.getNamenodes(ns1)) { - String suffix = ctx.getConfSuffix(); - if (sb.length() != 0) { - sb.append(","); + + @Before + public void setup() throws Exception { + LOG.info("Initialize the Mock Namenodes to monitor"); + for (String nsId : nsIds) { + nns.put(nsId, new HashMap<>()); + for (String nnId : asList("nn0", "nn1")) { + nns.get(nsId).put(nnId, new MockNamenode()); } - sb.append(suffix); } - // override with the namenodes: ns1.nn0,ns1.nn1 - routerConf.set(DFS_ROUTER_MONITOR_NAMENODE, sb.toString()); - cluster.addRouterOverrides(routerConf); - cluster.startCluster(); - cluster.startRouters(); - cluster.waitClusterUp(); + LOG.info("Set nn0 to active for all nameservices"); + for (Map nnNS : nns.values()) { + nnNS.get("nn0").transitionToActive(); + nnNS.get("nn1").transitionToStandby(); + } - routerContext = cluster.getRandomRouter(); - resolver = (MembershipNamenodeResolver) routerContext.getRouter() - .getNamenodeResolver(); initializedTime = Time.now(); } @After - public void tearDown() { - if (cluster != null) { - cluster.stopRouter(routerContext); - cluster.shutdown(); - cluster = null; + public void cleanup() throws Exception { + for (Map nnNS : nns.values()) { + for (MockNamenode nn : nnNS.values()) { + nn.stop(); + } + } + nns.clear(); + + if (router != null) { + router.stop(); + } + } + + /** + * Get the configuration of the cluster which contains all the Namenodes and + * their addresses. + * @return Configuration containing all the Namenodes. + */ + private Configuration getNamenodesConfig() { + final Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_NAMESERVICES, + StringUtils.join(",", nns.keySet())); + for (String nsId : nns.keySet()) { + Set nnIds = nns.get(nsId).keySet(); + + StringBuilder sb = new StringBuilder(); + sb.append(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX); + sb.append(".").append(nsId); + conf.set(sb.toString(), StringUtils.join(",", nnIds)); + + for (String nnId : nnIds) { + final MockNamenode nn = nns.get(nsId).get(nnId); + + sb = new StringBuilder(); + sb.append(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); + sb.append(".").append(nsId); + sb.append(".").append(nnId); + conf.set(sb.toString(), "localhost:" + nn.getRPCPort()); + + sb = new StringBuilder(); + sb.append(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + sb.append(".").append(nsId); + sb.append(".").append(nnId); + conf.set(sb.toString(), "localhost:" + nn.getHTTPPort()); + } } + return conf; } @Test public void testNamenodeMonitoring() throws Exception { - // Set nn0 to active for all nameservices - for (String ns : cluster.getNameservices()) { - cluster.switchToActive(ns, "nn0"); - cluster.switchToStandby(ns, "nn1"); - } + Configuration nsConf = getNamenodesConfig(); - Collection heartbeatServices = routerContext - .getRouter().getNamenodeHeartbeatServices(); - // manually trigger the heartbeat + // Setup the State Store for the Router to use + Configuration stateStoreConfig = getStateStoreConfiguration(); + stateStoreConfig.setClass( + RBFConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS, + MembershipNamenodeResolver.class, ActiveNamenodeResolver.class); + stateStoreConfig.setClass( + RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS, + MountTableResolver.class, FileSubclusterResolver.class); + + Configuration routerConf = new RouterConfigBuilder(nsConf) + .enableLocalHeartbeat(true) + .heartbeat() + .stateStore() + .rpc() + .build(); + + // Specify namenodes (ns1.nn0,ns1.nn1) to monitor + routerConf.set(RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "0.0.0.0:0"); + routerConf.set(RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE, + "ns1.nn0,ns1.nn1"); + routerConf.addResource(stateStoreConfig); + + // Specify local node (ns0.nn1) to monitor + routerConf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "ns0"); + routerConf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1"); + + // Start the Router with the namenodes to monitor + router = new Router(); + router.init(routerConf); + router.start(); + + // Manually trigger the heartbeat and update the values + Collection heartbeatServices = + router.getNamenodeHeartbeatServices(); for (NamenodeHeartbeatService service : heartbeatServices) { service.periodicInvoke(); } - + MembershipNamenodeResolver resolver = + (MembershipNamenodeResolver) router.getNamenodeResolver(); resolver.loadCache(true); - List namespaceInfo0 = - resolver.getNamenodesForNameserviceId(ns0); - List namespaceInfo1 = - resolver.getNamenodesForNameserviceId(ns1); - // The modified date won't be updated in ns0.nn0 since it isn't - // monitored by the Router. - assertEquals("nn0", namespaceInfo0.get(1).getNamenodeId()); - assertTrue(namespaceInfo0.get(1).getDateModified() < initializedTime); + // Check that the monitored values are expected + final List namespaceInfo = new ArrayList<>(); + for (String nsId : nns.keySet()) { + List nnReports = + resolver.getNamenodesForNameserviceId(nsId); + namespaceInfo.addAll(nnReports); + } + for (FederationNamenodeContext nnInfo : namespaceInfo) { + long modTime = nnInfo.getDateModified(); + long diff = modTime - initializedTime; + if ("ns0".equals(nnInfo.getNameserviceId()) && + "nn0".equals(nnInfo.getNamenodeId())) { + // The modified date won't be updated in ns0.nn0 + // since it isn't monitored by the Router. + assertTrue(nnInfo + " shouldn't be updated: " + diff, + modTime < initializedTime); + } else { + // other namnodes should be updated as expected + assertTrue(nnInfo + " should be updated: " + diff, + modTime > initializedTime); + } + } + } + + @Test + public void testNamenodeMonitoringConfig() throws Exception { + testConfig(asList(), ""); + testConfig(asList("ns1.nn0"), "ns1.nn0"); + testConfig(asList("ns1.nn0", "ns1.nn1"), "ns1.nn0,ns1.nn1"); + testConfig(asList("ns1.nn0", "ns1.nn1"), "ns1.nn0, ns1.nn1"); + testConfig(asList("ns1.nn0", "ns1.nn1"), " ns1.nn0,ns1.nn1"); + testConfig(asList("ns1.nn0", "ns1.nn1"), "ns1.nn0,ns1.nn1,"); + } + + /** + * Test if configuring a Router to monitor particular Namenodes actually + * takes effect. + * @param expectedNNs Namenodes that should be monitored. + * @param confNsIds Router configuration setting for Namenodes to monitor. + */ + private void testConfig( + Collection expectedNNs, String confNsIds) { + + // Setup and start the Router + Configuration conf = getNamenodesConfig(); + Configuration routerConf = new RouterConfigBuilder(conf) + .heartbeat(true) + .build(); + routerConf.set(RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "0.0.0.0:0"); + routerConf.set(RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE, confNsIds); + router = new Router(); + router.init(routerConf); - // other namnodes should be updated as expected - assertEquals("nn1", namespaceInfo0.get(0).getNamenodeId()); - assertTrue(namespaceInfo0.get(0).getDateModified() > initializedTime); + // Test the heartbeat services of the Router + Collection heartbeatServices = + router.getNamenodeHeartbeatServices(); + assertNamenodeHeartbeatService(expectedNNs, heartbeatServices); + } - assertEquals("nn0", namespaceInfo1.get(0).getNamenodeId()); - assertTrue(namespaceInfo1.get(0).getDateModified() > initializedTime); + /** + * Assert that the namenodes monitored by the Router are the expected. + * @param expected Expected namenodes. + * @param actual Actual heartbeat services for the Router + */ + private static void assertNamenodeHeartbeatService( + Collection expected, + Collection actual) { - assertEquals("nn1", namespaceInfo1.get(1).getNamenodeId()); - assertTrue(namespaceInfo1.get(1).getDateModified() > initializedTime); + final Set actualSet = new TreeSet<>(); + for (NamenodeHeartbeatService heartbeatService : actual) { + NamenodeStatusReport report = heartbeatService.getNamenodeStatusReport(); + StringBuilder sb = new StringBuilder(); + sb.append(report.getNameserviceId()); + sb.append("."); + sb.append(report.getNamenodeId()); + actualSet.add(sb.toString()); + } + assertTrue(expected + " does not contain all " + actualSet, + expected.containsAll(actualSet)); + assertTrue(actualSet + " does not contain all " + expected, + actualSet.containsAll(expected)); } } From 0dbd87874a16403f537ef31f45ab0fe05924af6f Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sat, 23 Mar 2019 12:16:31 +0530 Subject: [PATCH 0313/1308] HDFS-14388. RBF: Prevent loading metric system when disabled. Contributed by Inigo Goiri. --- .../FederationRPCPerformanceMonitor.java | 36 +++++++++--- .../metrics/NullStateStoreMetrics.java | 56 +++++++++++++++++++ .../federation/metrics/StateStoreMetrics.java | 4 +- .../federation/router/RouterRpcServer.java | 19 ++++--- .../federation/store/StateStoreService.java | 33 ++++++----- 5 files changed, 118 insertions(+), 30 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NullStateStoreMetrics.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java index bae83aa074659..5f06f5918ea5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java @@ -154,47 +154,65 @@ public void proxyOpFailureStandby() { @Override public void proxyOpFailureCommunicate() { - metrics.incrProxyOpFailureCommunicate(); + if (metrics != null) { + metrics.incrProxyOpFailureCommunicate(); + } } @Override public void proxyOpFailureClientOverloaded() { - metrics.incrProxyOpFailureClientOverloaded(); + if (metrics != null) { + metrics.incrProxyOpFailureClientOverloaded(); + } } @Override public void proxyOpNotImplemented() { - metrics.incrProxyOpNotImplemented(); + if (metrics != null) { + metrics.incrProxyOpNotImplemented(); + } } @Override public void proxyOpRetries() { - metrics.incrProxyOpRetries(); + if (metrics != null) { + metrics.incrProxyOpRetries(); + } } @Override public void proxyOpNoNamenodes() { - metrics.incrProxyOpNoNamenodes(); + if (metrics != null) { + metrics.incrProxyOpNoNamenodes(); + } } @Override public void routerFailureStateStore() { - metrics.incrRouterFailureStateStore(); + if (metrics != null) { + metrics.incrRouterFailureStateStore(); + } } @Override public void routerFailureSafemode() { - metrics.incrRouterFailureSafemode(); + if (metrics != null) { + metrics.incrRouterFailureSafemode(); + } } @Override public void routerFailureReadOnly() { - metrics.incrRouterFailureReadOnly(); + if (metrics != null) { + metrics.incrRouterFailureReadOnly(); + } } @Override public void routerFailureLocked() { - metrics.incrRouterFailureLocked(); + if (metrics != null) { + metrics.incrRouterFailureLocked(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NullStateStoreMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NullStateStoreMetrics.java new file mode 100644 index 0000000000000..d74aed949e846 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NullStateStoreMetrics.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.metrics; + +/** + * Implementation of the State Store metrics which does not do anything. + * This is used when the metrics are disabled (e.g., tests). + */ +public class NullStateStoreMetrics extends StateStoreMetrics { + public void addRead(long latency) {} + public long getReadOps() { + return -1; + } + public double getReadAvg() { + return -1; + } + public void addWrite(long latency) {} + public long getWriteOps() { + return -1; + } + public double getWriteAvg() { + return -1; + } + public void addFailure(long latency) { } + public long getFailureOps() { + return -1; + } + public double getFailureAvg() { + return -1; + } + public void addRemove(long latency) {} + public long getRemoveOps() { + return -1; + } + public double getRemoveAvg() { + return -1; + } + public void setCacheSize(String name, int size) {} + public void reset() {} + public void shutdown() {} +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java index 09253a26e937b..64bb10822f9f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java @@ -39,7 +39,7 @@ */ @Metrics(name = "StateStoreActivity", about = "Router metrics", context = "dfs") -public final class StateStoreMetrics implements StateStoreMBean { +public class StateStoreMetrics implements StateStoreMBean { private final MetricsRegistry registry = new MetricsRegistry("router"); @@ -54,6 +54,8 @@ public final class StateStoreMetrics implements StateStoreMBean { private Map cacheSizes; + protected StateStoreMetrics() {} + private StateStoreMetrics(Configuration conf) { registry.tag(SessionId, "RouterSession"); registry.tag(ProcessName, "Router"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index e4ea58b507100..739a2ffeb082a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -304,12 +304,17 @@ public RouterRpcServer(Configuration configuration, Router router, this.rpcAddress = new InetSocketAddress( confRpcAddress.getHostName(), listenAddress.getPort()); - // Create metrics monitor - Class rpcMonitorClass = this.conf.getClass( - RBFConfigKeys.DFS_ROUTER_METRICS_CLASS, - RBFConfigKeys.DFS_ROUTER_METRICS_CLASS_DEFAULT, - RouterRpcMonitor.class); - this.rpcMonitor = ReflectionUtils.newInstance(rpcMonitorClass, conf); + if (conf.getBoolean(RBFConfigKeys.DFS_ROUTER_METRICS_ENABLE, + RBFConfigKeys.DFS_ROUTER_METRICS_ENABLE_DEFAULT)) { + // Create metrics monitor + Class rpcMonitorClass = this.conf.getClass( + RBFConfigKeys.DFS_ROUTER_METRICS_CLASS, + RBFConfigKeys.DFS_ROUTER_METRICS_CLASS_DEFAULT, + RouterRpcMonitor.class); + this.rpcMonitor = ReflectionUtils.newInstance(rpcMonitorClass, conf); + } else { + this.rpcMonitor = null; + } // Create the client this.rpcClient = new RouterRpcClient(this.conf, this.router, @@ -326,7 +331,7 @@ protected void serviceInit(Configuration configuration) throws Exception { this.conf = configuration; if (this.rpcMonitor == null) { - LOG.error("Cannot instantiate Router RPC metrics class"); + LOG.info("Do not start Router RPC metrics"); } else { this.rpcMonitor.init(this.conf, this, this.router.getStateStore()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java index c55f4cd7fc01a..37b62fb0b0503 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java @@ -33,6 +33,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.metrics.NullStateStoreMetrics; import org.apache.hadoop.hdfs.server.federation.metrics.StateStoreMBean; import org.apache.hadoop.hdfs.server.federation.metrics.StateStoreMetrics; import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; @@ -172,19 +173,25 @@ protected void serviceInit(Configuration config) throws Exception { this.cacheUpdater = new StateStoreCacheUpdateService(this); addService(this.cacheUpdater); - // Create metrics for the State Store - this.metrics = StateStoreMetrics.create(conf); - - // Adding JMX interface - try { - StandardMBean bean = new StandardMBean(metrics, StateStoreMBean.class); - ObjectName registeredObject = - MBeans.register("Router", "StateStore", bean); - LOG.info("Registered StateStoreMBean: {}", registeredObject); - } catch (NotCompliantMBeanException e) { - throw new RuntimeException("Bad StateStoreMBean setup", e); - } catch (MetricsException e) { - LOG.error("Failed to register State Store bean {}", e.getMessage()); + if (conf.getBoolean(RBFConfigKeys.DFS_ROUTER_METRICS_ENABLE, + RBFConfigKeys.DFS_ROUTER_METRICS_ENABLE_DEFAULT)) { + // Create metrics for the State Store + this.metrics = StateStoreMetrics.create(conf); + + // Adding JMX interface + try { + StandardMBean bean = new StandardMBean(metrics, StateStoreMBean.class); + ObjectName registeredObject = + MBeans.register("Router", "StateStore", bean); + LOG.info("Registered StateStoreMBean: {}", registeredObject); + } catch (NotCompliantMBeanException e) { + throw new RuntimeException("Bad StateStoreMBean setup", e); + } catch (MetricsException e) { + LOG.error("Failed to register State Store bean {}", e.getMessage()); + } + } else { + LOG.info("State Store metrics not enabled"); + this.metrics = new NullStateStoreMetrics(); } super.serviceInit(this.conf); From 6c42d4050461ab71c88f123569649793dc53aebd Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sat, 30 Mar 2019 07:15:41 +0530 Subject: [PATCH 0314/1308] HDFS-14316. RBF: Support unavailable subclusters for mount points with multiple destinations. Contributed by Inigo Goiri. --- .../federation/metrics/FederationMetrics.java | 1 + .../resolver/order/DestinationOrder.java | 10 +- .../federation/router/RBFConfigKeys.java | 12 +- .../federation/router/RemoteMethod.java | 10 +- .../server/federation/router/RemoteParam.java | 9 + .../router/RouterClientProtocol.java | 133 +++- .../federation/router/RouterRpcClient.java | 79 ++- .../federation/router/RouterRpcServer.java | 48 +- .../store/impl/MountTableStoreImpl.java | 2 + .../federation/store/records/MountTable.java | 56 +- .../records/impl/pb/MountTablePBImpl.java | 14 + .../hdfs/tools/federation/RouterAdmin.java | 31 +- .../src/main/proto/FederationProtocol.proto | 2 + .../src/main/resources/hdfs-rbf-default.xml | 30 + .../main/webapps/router/federationhealth.html | 2 + .../main/webapps/router/federationhealth.js | 12 + .../src/main/webapps/static/rbf.css | 5 + .../src/site/markdown/HDFSRouterFederation.md | 8 + .../hdfs/server/federation/MockNamenode.java | 229 +++++- .../federation/router/TestRouterAdminCLI.java | 65 +- .../router/TestRouterFaultTolerant.java | 654 ++++++++++++++++++ .../router/TestRouterNamenodeMonitoring.java | 2 +- .../store/FederationStateStoreTestUtils.java | 6 +- .../store/TestStateStoreMountTable.java | 6 +- .../store/records/TestMountTable.java | 19 + .../src/site/markdown/HDFSCommands.md | 4 +- 26 files changed, 1371 insertions(+), 78 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java index 5ab978d7f2bda..a39f17d2a48c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java @@ -270,6 +270,7 @@ public String getMountTable() { innerInfo.put("order", ""); } innerInfo.put("readonly", entry.isReadOnly()); + innerInfo.put("faulttolerant", entry.isFaultTolerant()); info.add(Collections.unmodifiableMap(innerInfo)); } } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/DestinationOrder.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/DestinationOrder.java index 99c5e22d12a15..6a637d5e46fc1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/DestinationOrder.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/DestinationOrder.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.federation.resolver.order; +import java.util.EnumSet; + /** * Order of the destinations when we have multiple of them. When the resolver * of files to subclusters (FileSubclusterResolver) has multiple destinations, @@ -27,5 +29,11 @@ public enum DestinationOrder { LOCAL, // Local first RANDOM, // Random order HASH_ALL, // Follow consistent hashing - SPACE // Available space based order + SPACE; // Available space based order + + /** Approaches that write folders in all subclusters. */ + public static final EnumSet FOLDER_ALL = EnumSet.of( + HASH_ALL, + RANDOM, + SPACE); } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java index 657b6cfc12365..153cd6414051d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java @@ -135,7 +135,17 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic { public static final String DFS_ROUTER_ALLOW_PARTIAL_LIST = FEDERATION_ROUTER_PREFIX + "client.allow-partial-listing"; public static final boolean DFS_ROUTER_ALLOW_PARTIAL_LIST_DEFAULT = true; - + public static final String DFS_ROUTER_CLIENT_MOUNT_TIME_OUT = + FEDERATION_ROUTER_PREFIX + "client.mount-status.time-out"; + public static final long DFS_ROUTER_CLIENT_MOUNT_TIME_OUT_DEFAULT = + TimeUnit.SECONDS.toMillis(1); + public static final String DFS_ROUTER_CLIENT_MAX_RETRIES_TIME_OUT = + FEDERATION_ROUTER_PREFIX + "connect.max.retries.on.timeouts"; + public static final int DFS_ROUTER_CLIENT_MAX_RETRIES_TIME_OUT_DEFAULT = 0; + public static final String DFS_ROUTER_CLIENT_CONNECT_TIMEOUT = + FEDERATION_ROUTER_PREFIX + "connect.timeout"; + public static final long DFS_ROUTER_CLIENT_CONNECT_TIMEOUT_DEFAULT = + TimeUnit.SECONDS.toMillis(2); // HDFS Router State Store connection public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS = diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java index 6ff2b01b0b679..f7ba8123d5f77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java @@ -210,7 +210,13 @@ public Object[] getParams(RemoteLocationContext context) { @Override public String toString() { - return this.protocol.getSimpleName() + "#" + this.methodName + " " + - Arrays.toString(this.params); + return new StringBuilder() + .append(this.protocol.getSimpleName()) + .append("#") + .append(this.methodName) + .append("(") + .append(Arrays.deepToString(this.params)) + .append(")") + .toString(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteParam.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteParam.java index 8816ff6fb9f37..8b216d919ed0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteParam.java @@ -68,4 +68,13 @@ public Object getParameterForContext(RemoteLocationContext context) { return context.getDest(); } } + + @Override + public String toString() { + return new StringBuilder() + .append("RemoteParam(") + .append(this.paramMap) + .append(")") + .toString(); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index da601425ca152..6039083a73535 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -84,6 +84,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; +import org.apache.hadoop.net.ConnectTimeoutException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.slf4j.Logger; @@ -93,6 +94,8 @@ import java.io.FileNotFoundException; import java.io.IOException; +import java.net.ConnectException; +import java.util.ArrayList; import java.util.Collection; import java.util.EnumSet; import java.util.HashMap; @@ -103,6 +106,7 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.TimeUnit; /** * Module that implements all the RPC calls in {@link ClientProtocol} in the @@ -119,6 +123,8 @@ public class RouterClientProtocol implements ClientProtocol { /** If it requires response from all subclusters. */ private final boolean allowPartialList; + /** Time out when getting the mount statistics. */ + private long mountStatusTimeOut; /** Identifier for the super user. */ private String superUser; @@ -140,6 +146,10 @@ public class RouterClientProtocol implements ClientProtocol { this.allowPartialList = conf.getBoolean( RBFConfigKeys.DFS_ROUTER_ALLOW_PARTIAL_LIST, RBFConfigKeys.DFS_ROUTER_ALLOW_PARTIAL_LIST_DEFAULT); + this.mountStatusTimeOut = conf.getTimeDuration( + RBFConfigKeys.DFS_ROUTER_CLIENT_MOUNT_TIME_OUT, + RBFConfigKeys.DFS_ROUTER_CLIENT_MOUNT_TIME_OUT_DEFAULT, + TimeUnit.SECONDS); // User and group for reporting try { @@ -234,15 +244,92 @@ public HdfsFileStatus create(String src, FsPermission masked, } } - RemoteLocation createLocation = rpcServer.getCreateLocation(src); RemoteMethod method = new RemoteMethod("create", new Class[] {String.class, FsPermission.class, String.class, EnumSetWritable.class, boolean.class, short.class, long.class, CryptoProtocolVersion[].class, String.class, String.class}, - createLocation.getDest(), masked, clientName, flag, createParent, + new RemoteParam(), masked, clientName, flag, createParent, replication, blockSize, supportedVersions, ecPolicyName, storagePolicy); - return (HdfsFileStatus) rpcClient.invokeSingle(createLocation, method); + final List locations = + rpcServer.getLocationsForPath(src, true); + RemoteLocation createLocation = null; + try { + createLocation = rpcServer.getCreateLocation(src); + return (HdfsFileStatus) rpcClient.invokeSingle(createLocation, method); + } catch (IOException ioe) { + final List newLocations = checkFaultTolerantRetry( + method, src, ioe, createLocation, locations); + return rpcClient.invokeSequential( + newLocations, method, HdfsFileStatus.class, null); + } + } + + /** + * Check if an exception is caused by an unavailable subcluster or not. It + * also checks the causes. + * @param ioe IOException to check. + * @return If caused by an unavailable subcluster. False if the should not be + * retried (e.g., NSQuotaExceededException). + */ + private static boolean isUnavailableSubclusterException( + final IOException ioe) { + if (ioe instanceof ConnectException || + ioe instanceof ConnectTimeoutException || + ioe instanceof NoNamenodesAvailableException) { + return true; + } + if (ioe.getCause() instanceof IOException) { + IOException cause = (IOException)ioe.getCause(); + return isUnavailableSubclusterException(cause); + } + return false; + } + + /** + * Check if a remote method can be retried in other subclusters when it + * failed in the original destination. This method returns the list of + * locations to retry in. This is used by fault tolerant mount points. + * @param method Method that failed and might be retried. + * @param src Path where the method was invoked. + * @param e Exception that was triggered. + * @param excludeLoc Location that failed and should be excluded. + * @param locations All the locations to retry. + * @return The locations where we should retry (excluding the failed ones). + * @throws IOException If this path is not fault tolerant or the exception + * should not be retried (e.g., NSQuotaExceededException). + */ + private List checkFaultTolerantRetry( + final RemoteMethod method, final String src, final IOException ioe, + final RemoteLocation excludeLoc, final List locations) + throws IOException { + + if (!isUnavailableSubclusterException(ioe)) { + LOG.debug("{} exception cannot be retried", + ioe.getClass().getSimpleName()); + throw ioe; + } + if (!rpcServer.isPathFaultTolerant(src)) { + LOG.debug("{} does not allow retrying a failed subcluster", src); + throw ioe; + } + + final List newLocations; + if (excludeLoc == null) { + LOG.error("Cannot invoke {} for {}: {}", method, src, ioe.getMessage()); + newLocations = locations; + } else { + LOG.error("Cannot invoke {} for {} in {}: {}", + method, src, excludeLoc, ioe.getMessage()); + newLocations = new ArrayList<>(); + for (final RemoteLocation loc : locations) { + if (!loc.equals(excludeLoc)) { + newLocations.add(loc); + } + } + } + LOG.info("{} allows retrying failed subclusters in {}", src, newLocations); + return newLocations; } @Override @@ -604,13 +691,20 @@ public boolean mkdirs(String src, FsPermission masked, boolean createParent) } } catch (IOException ioe) { // Can't query if this file exists or not. - LOG.error("Error requesting file info for path {} while proxing mkdirs", - src, ioe); + LOG.error("Error getting file info for {} while proxying mkdirs: {}", + src, ioe.getMessage()); } } - RemoteLocation firstLocation = locations.get(0); - return (boolean) rpcClient.invokeSingle(firstLocation, method); + final RemoteLocation firstLocation = locations.get(0); + try { + return (boolean) rpcClient.invokeSingle(firstLocation, method); + } catch (IOException ioe) { + final List newLocations = checkFaultTolerantRetry( + method, src, ioe, firstLocation, locations); + return rpcClient.invokeSequential( + newLocations, method, Boolean.class, Boolean.TRUE); + } } @Override @@ -1702,10 +1796,26 @@ private ContentSummary aggregateContentSummary( */ private HdfsFileStatus getFileInfoAll(final List locations, final RemoteMethod method) throws IOException { + return getFileInfoAll(locations, method, -1); + } + + /** + * Get the file info from all the locations. + * + * @param locations Locations to check. + * @param method The file information method to run. + * @param timeOutMs Time out for the operation in milliseconds. + * @return The first file info if it's a file, the directory if it's + * everywhere. + * @throws IOException If all the locations throw an exception. + */ + private HdfsFileStatus getFileInfoAll(final List locations, + final RemoteMethod method, long timeOutMs) throws IOException { // Get the file info from everybody Map results = - rpcClient.invokeConcurrent(locations, method, HdfsFileStatus.class); + rpcClient.invokeConcurrent(locations, method, false, false, timeOutMs, + HdfsFileStatus.class); int children = 0; // We return the first file HdfsFileStatus dirStatus = null; @@ -1762,9 +1872,10 @@ private HdfsFileStatus getMountPointStatus( MountTableResolver mountTable = (MountTableResolver) subclusterResolver; MountTable entry = mountTable.getMountPoint(mName); if (entry != null) { - HdfsFileStatus fInfo = getFileInfoAll(entry.getDestinations(), - new RemoteMethod("getFileInfo", new Class[] {String.class}, - new RemoteParam())); + RemoteMethod method = new RemoteMethod("getFileInfo", + new Class[] {String.class}, new RemoteParam()); + HdfsFileStatus fInfo = getFileInfoAll( + entry.getDestinations(), method, mountStatusTimeOut); if (fInfo != null) { permission = fInfo.getPermission(); owner = fInfo.getOwner(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index 3d80c4167d1e6..730952b9db691 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -18,11 +18,15 @@ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_TIMEOUT_KEY; + import java.io.FileNotFoundException; import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; +import java.net.ConnectException; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Arrays; @@ -62,6 +66,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.net.ConnectTimeoutException; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -126,7 +131,8 @@ public RouterRpcClient(Configuration conf, Router router, this.namenodeResolver = resolver; - this.connectionManager = new ConnectionManager(conf); + Configuration clientConf = getClientConfiguration(conf); + this.connectionManager = new ConnectionManager(clientConf); this.connectionManager.start(); int numThreads = conf.getInt( @@ -165,6 +171,31 @@ public RouterRpcClient(Configuration conf, Router router, failoverSleepBaseMillis, failoverSleepMaxMillis); } + /** + * Get the configuration for the RPC client. It takes the Router + * configuration and transforms it into regular RPC Client configuration. + * @param conf Input configuration. + * @return Configuration for the RPC client. + */ + private Configuration getClientConfiguration(final Configuration conf) { + Configuration clientConf = new Configuration(conf); + int maxRetries = conf.getInt( + RBFConfigKeys.DFS_ROUTER_CLIENT_MAX_RETRIES_TIME_OUT, + RBFConfigKeys.DFS_ROUTER_CLIENT_MAX_RETRIES_TIME_OUT_DEFAULT); + if (maxRetries >= 0) { + clientConf.setInt( + IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, maxRetries); + } + long connectTimeOut = conf.getTimeDuration( + RBFConfigKeys.DFS_ROUTER_CLIENT_CONNECT_TIMEOUT, + RBFConfigKeys.DFS_ROUTER_CLIENT_CONNECT_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); + if (connectTimeOut >= 0) { + clientConf.setLong(IPC_CLIENT_CONNECT_TIMEOUT_KEY, connectTimeOut); + } + return clientConf; + } + /** * Get the active namenode resolver used by this client. * @return Active namenode resolver. @@ -341,17 +372,19 @@ private RetryDecision shouldRetry(final IOException ioe, final int retryCount, * @param method Remote ClientProtcol method to invoke. * @param params Variable list of parameters matching the method. * @return The result of invoking the method. - * @throws IOException + * @throws ConnectException If it cannot connect to any Namenode. + * @throws StandbyException If all Namenodes are in Standby. + * @throws IOException If it cannot invoke the method. */ private Object invokeMethod( final UserGroupInformation ugi, final List namenodes, final Class protocol, final Method method, final Object... params) - throws IOException { + throws ConnectException, StandbyException, IOException { if (namenodes == null || namenodes.isEmpty()) { throw new IOException("No namenodes to invoke " + method.getName() + - " with params " + Arrays.toString(params) + " from " + " with params " + Arrays.deepToString(params) + " from " + router.getRouterId()); } @@ -388,6 +421,12 @@ private Object invokeMethod( this.rpcMonitor.proxyOpFailureStandby(); } failover = true; + } else if (ioe instanceof ConnectException || + ioe instanceof ConnectTimeoutException) { + if (this.rpcMonitor != null) { + this.rpcMonitor.proxyOpFailureCommunicate(); + } + failover = true; } else if (ioe instanceof RemoteException) { if (this.rpcMonitor != null) { this.rpcMonitor.proxyOpComplete(true); @@ -408,7 +447,7 @@ private Object invokeMethod( if (this.rpcMonitor != null) { this.rpcMonitor.proxyOpNoNamenodes(); } - LOG.error("Can not get available namenode for {} {} error: {}", + LOG.error("Cannot get available namenode for {} {} error: {}", nsId, rpcAddress, ioe.getMessage()); // Throw RetriableException so that client can retry throw new RetriableException(ioe); @@ -433,24 +472,33 @@ private Object invokeMethod( // All namenodes were unavailable or in standby String msg = "No namenode available to invoke " + method.getName() + " " + - Arrays.toString(params); + Arrays.deepToString(params) + " in " + namenodes + " from " + + router.getRouterId(); LOG.error(msg); + int exConnect = 0; for (Entry entry : ioes.entrySet()) { FederationNamenodeContext namenode = entry.getKey(); - String nsId = namenode.getNameserviceId(); - String nnId = namenode.getNamenodeId(); + String nnKey = namenode.getNamenodeKey(); String addr = namenode.getRpcAddress(); IOException ioe = entry.getValue(); if (ioe instanceof StandbyException) { - LOG.error("{} {} at {} is in Standby: {}", nsId, nnId, addr, - ioe.getMessage()); + LOG.error("{} at {} is in Standby: {}", + nnKey, addr, ioe.getMessage()); + } else if (ioe instanceof ConnectException || + ioe instanceof ConnectTimeoutException) { + exConnect++; + LOG.error("{} at {} cannot be reached: {}", + nnKey, addr, ioe.getMessage()); } else { - LOG.error("{} {} at {} error: \"{}\"", - nsId, nnId, addr, ioe.getMessage()); + LOG.error("{} at {} error: \"{}\"", nnKey, addr, ioe.getMessage()); } } - throw new StandbyException(msg); + if (exConnect == ioes.size()) { + throw new ConnectException(msg); + } else { + throw new StandbyException(msg); + } } /** @@ -497,6 +545,9 @@ private Object invoke(String nsId, int retryCount, final Method method, // failover, invoker looks for standby exceptions for failover. if (ioe instanceof StandbyException) { throw ioe; + } else if (ioe instanceof ConnectException || + ioe instanceof ConnectTimeoutException) { + throw ioe; } else { throw new StandbyException(ioe.getMessage()); } @@ -1043,7 +1094,7 @@ public Map invokeConcurrent( if (locations.isEmpty()) { throw new IOException("No remote locations available"); - } else if (locations.size() == 1) { + } else if (locations.size() == 1 && timeOutMs <= 0) { // Shortcut, just one call T location = locations.iterator().next(); String ns = location.getNameserviceId(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 739a2ffeb082a..b934355dc9e5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -30,6 +30,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.lang.reflect.Array; +import java.net.ConnectException; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collection; @@ -133,6 +134,7 @@ import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.AccessControlException; @@ -294,7 +296,9 @@ public RouterRpcServer(Configuration configuration, Router router, AccessControlException.class, LeaseExpiredException.class, NotReplicatedYetException.class, - IOException.class); + IOException.class, + ConnectException.class, + RetriableException.class); this.rpcServer.addSuppressedLoggingExceptions( StandbyException.class); @@ -520,7 +524,7 @@ T invokeAtAvailableNs(RemoteMethod method, Class clazz) // If default Ns is not present return result from first namespace. Set nss = namenodeResolver.getNamespaces(); if (nss.isEmpty()) { - throw new IOException("No namespace availaible."); + throw new IOException("No namespace available."); } nsId = nss.iterator().next().getNameserviceId(); return rpcClient.invokeSingle(nsId, method, clazz); @@ -566,6 +570,7 @@ public HdfsFileStatus create(String src, FsPermission masked, replication, blockSize, supportedVersions, ecPolicyName, storagePolicy); } + /** * Get the location to create a file. It checks if the file already existed * in one of the locations. @@ -574,10 +579,24 @@ public HdfsFileStatus create(String src, FsPermission masked, * @return The remote location for this file. * @throws IOException If the file has no creation location. */ - RemoteLocation getCreateLocation(final String src) + RemoteLocation getCreateLocation(final String src) throws IOException { + final List locations = getLocationsForPath(src, true); + return getCreateLocation(src, locations); + } + + /** + * Get the location to create a file. It checks if the file already existed + * in one of the locations. + * + * @param src Path of the file to check. + * @param locations Prefetched locations for the file. + * @return The remote location for this file. + * @throws IOException If the file has no creation location. + */ + RemoteLocation getCreateLocation( + final String src, final List locations) throws IOException { - final List locations = getLocationsForPath(src, true); if (locations == null || locations.isEmpty()) { throw new IOException("Cannot get locations to create " + src); } @@ -1568,6 +1587,27 @@ boolean isPathAll(final String path) { return false; } + /** + * Check if a path supports failed subclusters. + * + * @param path Path to check. + * @return If a path should support failed subclusters. + */ + boolean isPathFaultTolerant(final String path) { + if (subclusterResolver instanceof MountTableResolver) { + try { + MountTableResolver mountTable = (MountTableResolver) subclusterResolver; + MountTable entry = mountTable.getMountPoint(path); + if (entry != null) { + return entry.isFaultTolerant(); + } + } catch (IOException e) { + LOG.error("Cannot get mount point", e); + } + } + return false; + } + /** * Check if call needs to be invoked to all the locations. The call is * supposed to be invoked in all the locations in case the order of the mount diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java index d5e1857a8c11c..87610385d87a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java @@ -66,6 +66,7 @@ public AddMountTableEntryResponse addMountTableEntry( if (pc != null) { pc.checkPermission(mountTable, FsAction.WRITE); } + mountTable.validate(); } boolean status = getDriver().put(mountTable, false, true); @@ -85,6 +86,7 @@ public UpdateMountTableEntryResponse updateMountTableEntry( if (pc != null) { pc.checkPermission(mountTable, FsAction.WRITE); } + mountTable.validate(); } boolean status = getDriver().put(mountTable, true, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java index c1585b06df561..d1351a340c3cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java @@ -26,6 +26,7 @@ import java.util.SortedMap; import java.util.TreeMap; +import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; @@ -59,6 +60,10 @@ public abstract class MountTable extends BaseRecord { "Invalid entry, invalid destination path "; public static final String ERROR_MSG_ALL_DEST_MUST_START_WITH_BACK_SLASH = "Invalid entry, all destination must start with / "; + private static final String ERROR_MSG_FAULT_TOLERANT_MULTI_DEST = + "Invalid entry, fault tolerance requires multiple destinations "; + private static final String ERROR_MSG_FAULT_TOLERANT_ALL = + "Invalid entry, fault tolerance only supported for ALL order "; /** Comparator for paths which considers the /. */ public static final Comparator PATH_COMPARATOR = @@ -228,6 +233,20 @@ public static MountTable newInstance(final String src, */ public abstract void setDestOrder(DestinationOrder order); + /** + * Check if the mount point supports a failed destination. + * + * @return If it supports failures. + */ + public abstract boolean isFaultTolerant(); + + /** + * Set if the mount point supports failed destinations. + * + * @param faultTolerant If it supports failures. + */ + public abstract void setFaultTolerant(boolean faultTolerant); + /** * Get owner name of this mount table entry. * @@ -321,11 +340,14 @@ public String toString() { List destinations = this.getDestinations(); sb.append(destinations); if (destinations != null && destinations.size() > 1) { - sb.append("[" + this.getDestOrder() + "]"); + sb.append("[").append(this.getDestOrder()).append("]"); } if (this.isReadOnly()) { sb.append("[RO]"); } + if (this.isFaultTolerant()) { + sb.append("[FT]"); + } if (this.getOwnerName() != null) { sb.append("[owner:").append(this.getOwnerName()).append("]"); @@ -383,6 +405,16 @@ public void validate() { ERROR_MSG_ALL_DEST_MUST_START_WITH_BACK_SLASH + this); } } + if (isFaultTolerant()) { + if (getDestinations().size() < 2) { + throw new IllegalArgumentException( + ERROR_MSG_FAULT_TOLERANT_MULTI_DEST + this); + } + if (!isAll()) { + throw new IllegalArgumentException( + ERROR_MSG_FAULT_TOLERANT_ALL + this); + } + } } @Override @@ -397,6 +429,7 @@ public int hashCode() { .append(this.getDestinations()) .append(this.isReadOnly()) .append(this.getDestOrder()) + .append(this.isFaultTolerant()) .toHashCode(); } @@ -404,16 +437,13 @@ public int hashCode() { public boolean equals(Object obj) { if (obj instanceof MountTable) { MountTable other = (MountTable)obj; - if (!this.getSourcePath().equals(other.getSourcePath())) { - return false; - } else if (!this.getDestinations().equals(other.getDestinations())) { - return false; - } else if (this.isReadOnly() != other.isReadOnly()) { - return false; - } else if (!this.getDestOrder().equals(other.getDestOrder())) { - return false; - } - return true; + return new EqualsBuilder() + .append(this.getSourcePath(), other.getSourcePath()) + .append(this.getDestinations(), other.getDestinations()) + .append(this.isReadOnly(), other.isReadOnly()) + .append(this.getDestOrder(), other.getDestOrder()) + .append(this.isFaultTolerant(), other.isFaultTolerant()) + .isEquals(); } return false; } @@ -424,9 +454,7 @@ public boolean equals(Object obj) { */ public boolean isAll() { DestinationOrder order = getDestOrder(); - return order == DestinationOrder.HASH_ALL || - order == DestinationOrder.RANDOM || - order == DestinationOrder.SPACE; + return DestinationOrder.FOLDER_ALL.contains(order); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java index 4c7622c099007..62cdc7272fedc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java @@ -195,6 +195,20 @@ public void setDestOrder(DestinationOrder order) { } } + @Override + public boolean isFaultTolerant() { + MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder(); + if (!proto.hasFaultTolerant()) { + return false; + } + return proto.getFaultTolerant(); + } + + @Override + public void setFaultTolerant(boolean faultTolerant) { + this.translator.getBuilder().setFaultTolerant(faultTolerant); + } + @Override public String getOwnerName() { MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index b04b0692b0a18..61da7e926d940 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -135,12 +135,12 @@ private String getUsage(String cmd) { } if (cmd.equals("-add")) { return "\t[-add " - + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + + "[-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + "-owner -group -mode ]"; } else if (cmd.equals("-update")) { return "\t[-update " + " " - + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + + "[-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + "-owner -group -mode ]"; } else if (cmd.equals("-rm")) { return "\t[-rm ]"; @@ -415,6 +415,7 @@ public boolean addMount(String[] parameters, int i) throws IOException { // Optional parameters boolean readOnly = false; + boolean faultTolerant = false; String owner = null; String group = null; FsPermission mode = null; @@ -422,6 +423,8 @@ public boolean addMount(String[] parameters, int i) throws IOException { while (i < parameters.length) { if (parameters[i].equals("-readonly")) { readOnly = true; + } else if (parameters[i].equals("-faulttolerant")) { + faultTolerant = true; } else if (parameters[i].equals("-order")) { i++; try { @@ -447,7 +450,7 @@ public boolean addMount(String[] parameters, int i) throws IOException { i++; } - return addMount(mount, nss, dest, readOnly, order, + return addMount(mount, nss, dest, readOnly, faultTolerant, order, new ACLEntity(owner, group, mode)); } @@ -464,7 +467,8 @@ public boolean addMount(String[] parameters, int i) throws IOException { * @throws IOException Error adding the mount point. */ public boolean addMount(String mount, String[] nss, String dest, - boolean readonly, DestinationOrder order, ACLEntity aclInfo) + boolean readonly, boolean faultTolerant, DestinationOrder order, + ACLEntity aclInfo) throws IOException { mount = normalizeFileSystemPath(mount); // Get the existing entry @@ -491,6 +495,9 @@ public boolean addMount(String mount, String[] nss, String dest, if (readonly) { newEntry.setReadOnly(true); } + if (faultTolerant) { + newEntry.setFaultTolerant(true); + } if (order != null) { newEntry.setDestOrder(order); } @@ -508,6 +515,8 @@ public boolean addMount(String mount, String[] nss, String dest, newEntry.setMode(aclInfo.getMode()); } + newEntry.validate(); + AddMountTableEntryRequest request = AddMountTableEntryRequest.newInstance(newEntry); AddMountTableEntryResponse addResponse = @@ -527,6 +536,9 @@ public boolean addMount(String mount, String[] nss, String dest, if (readonly) { existingEntry.setReadOnly(true); } + if (faultTolerant) { + existingEntry.setFaultTolerant(true); + } if (order != null) { existingEntry.setDestOrder(order); } @@ -544,6 +556,8 @@ public boolean addMount(String mount, String[] nss, String dest, existingEntry.setMode(aclInfo.getMode()); } + existingEntry.validate(); + UpdateMountTableEntryRequest updateRequest = UpdateMountTableEntryRequest.newInstance(existingEntry); UpdateMountTableEntryResponse updateResponse = @@ -572,6 +586,7 @@ public boolean updateMount(String[] parameters, int i) throws IOException { // Optional parameters boolean readOnly = false; + boolean faultTolerant = false; String owner = null; String group = null; FsPermission mode = null; @@ -579,6 +594,8 @@ public boolean updateMount(String[] parameters, int i) throws IOException { while (i < parameters.length) { if (parameters[i].equals("-readonly")) { readOnly = true; + } else if (parameters[i].equals("-faulttolerant")) { + faultTolerant = true; } else if (parameters[i].equals("-order")) { i++; try { @@ -604,7 +621,7 @@ public boolean updateMount(String[] parameters, int i) throws IOException { i++; } - return updateMount(mount, nss, dest, readOnly, order, + return updateMount(mount, nss, dest, readOnly, faultTolerant, order, new ACLEntity(owner, group, mode)); } @@ -621,7 +638,8 @@ public boolean updateMount(String[] parameters, int i) throws IOException { * @throws IOException Error updating the mount point. */ public boolean updateMount(String mount, String[] nss, String dest, - boolean readonly, DestinationOrder order, ACLEntity aclInfo) + boolean readonly, boolean faultTolerant, + DestinationOrder order, ACLEntity aclInfo) throws IOException { mount = normalizeFileSystemPath(mount); MountTableManager mountTable = client.getMountTableManager(); @@ -634,6 +652,7 @@ public boolean updateMount(String mount, String[] nss, String dest, MountTable newEntry = MountTable.newInstance(mount, destMap); newEntry.setReadOnly(readonly); + newEntry.setFaultTolerant(faultTolerant); if (order != null) { newEntry.setDestOrder(order); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto index a55be731a7462..6a60e4ad0d916 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto @@ -143,6 +143,8 @@ message MountTableRecordProto { optional int32 mode = 12; optional QuotaUsageProto quota = 13; + + optional bool faultTolerant = 14 [default = false]; } message AddMountTableEntryRequestProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml index 1034c87ff8f20..e23f863e8a633 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml @@ -503,6 +503,36 @@ + + dfs.federation.router.client.mount-status.time-out + 1s + + Set a timeout for the Router when listing folders containing mount + points. In this process, the Router checks the mount table and then it + checks permissions in the subcluster. After the time out, we return the + default values. + + + + + dfs.federation.router.connect.max.retries.on.timeouts + 0 + + Maximum number of retries for the IPC Client when connecting to the + subclusters. By default, it doesn't let the IPC retry and the Router + handles it. + + + + + dfs.federation.router.connect.timeout + 2s + + Time out for the IPC client connecting to the subclusters. This should be + short as the Router has knowledge of the state of the Routers. + + + dfs.federation.router.keytab.file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html index c591698e4b669..cf8653bc8f728 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html @@ -393,6 +393,7 @@ Target path Order Read only + Fault tolerant Owner Group Permission @@ -409,6 +410,7 @@ {path} {order} + {ownerName} {groupName} {mode} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js index 5da7b079ffe01..e655e604d8ee3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js @@ -324,8 +324,20 @@ } } + function augment_fault_tolerant(mountTable) { + for (var i = 0, e = mountTable.length; i < e; ++i) { + if (mountTable[i].faulttolerant == true) { + mountTable[i].faulttolerant = "true" + mountTable[i].ftStatus = "Fault tolerant" + } else { + mountTable[i].faulttolerant = "false" + } + } + } + resource.MountTable = JSON.parse(resource.MountTable) augment_read_only(resource.MountTable) + augment_fault_tolerant(resource.MountTable) return resource; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css index 5cdd8269ca1f9..b2eef6ad7e9d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css @@ -135,3 +135,8 @@ color: #5fa341; content: "\e033"; } + +.mount-table-fault-tolerant-true:before { + color: #5fa341; + content: "\e033"; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index f24ff12993f5e..83cecda53d7b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -266,6 +266,14 @@ To determine which subcluster contains a file: [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -getDestination /user/user1/file.txt Note that consistency of the data across subclusters is not guaranteed by the Router. +By default, if one subcluster is unavailable, writes may fail if they target that subcluster. +To allow writing in another subcluster, one can make the mount point fault tolerant: + + [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -add /data ns1,ns2 /data -order HASH_ALL -faulttolerant + +Note that this can lead to a file to be written in multiple subclusters or a folder missing in one. +One needs to be aware of the possibility of these inconsistencies and target this `faulttolerant` approach to resilient paths. +An example for this is the `/app-logs` folder which will mostly write once into a subfolder. ### Disabling nameservices diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java index 9b58fff085c9e..d8dffeedd18e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java @@ -18,19 +18,44 @@ package org.apache.hadoop.hdfs.server.federation; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyShort; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.FileNotFoundException; import java.io.IOException; +import java.net.ConnectException; import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.concurrent.ConcurrentSkipListMap; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceStatus; import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService; @@ -40,15 +65,29 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; +import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; +import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.http.HttpServer2; +import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.DataChecksum.Type; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.protobuf.BlockingService; @@ -59,9 +98,15 @@ */ public class MockNamenode { + private static final Logger LOG = + LoggerFactory.getLogger(MockNamenode.class); + + /** Mock implementation of the Namenode. */ private final NamenodeProtocols mockNn; + /** Name service identifier (subcluster). */ + private String nsId; /** HA state of the Namenode. */ private HAServiceState haState = HAServiceState.STANDBY; @@ -71,9 +116,13 @@ public class MockNamenode { private HttpServer2 httpServer; - public MockNamenode() throws Exception { - Configuration conf = new Configuration(); + public MockNamenode(final String nsIdentifier) throws IOException { + this(nsIdentifier, new HdfsConfiguration()); + } + public MockNamenode(final String nsIdentifier, final Configuration conf) + throws IOException { + this.nsId = nsIdentifier; this.mockNn = mock(NamenodeProtocols.class); setupMock(); setupRPCServer(conf); @@ -86,7 +135,7 @@ public MockNamenode() throws Exception { * @throws IOException If the mock cannot be setup. */ protected void setupMock() throws IOException { - NamespaceInfo nsInfo = new NamespaceInfo(1, "clusterId", "bpId", 1); + NamespaceInfo nsInfo = new NamespaceInfo(1, this.nsId, this.nsId, 1); when(mockNn.versionRequest()).thenReturn(nsInfo); when(mockNn.getServiceStatus()).thenAnswer(new Answer() { @@ -115,11 +164,16 @@ private void setupRPCServer(final Configuration conf) throws IOException { ClientNamenodeProtocol.newReflectiveBlockingService( clientNNProtoXlator); + int numHandlers = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, + DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_DEFAULT); + rpcServer = new RPC.Builder(conf) .setProtocol(ClientNamenodeProtocolPB.class) .setInstance(clientNNPbService) .setBindAddress("0.0.0.0") .setPort(0) + .setNumHandlers(numHandlers) .build(); NamenodeProtocolServerSideTranslatorPB nnProtoXlator = @@ -146,6 +200,18 @@ private void setupRPCServer(final Configuration conf) throws IOException { DFSUtil.addPBProtocol( conf, HAServiceProtocolPB.class, haProtoPbService, rpcServer); + this.rpcServer.addTerseExceptions( + RemoteException.class, + SafeModeException.class, + FileNotFoundException.class, + FileAlreadyExistsException.class, + AccessControlException.class, + LeaseExpiredException.class, + NotReplicatedYetException.class, + IOException.class, + ConnectException.class, + StandbyException.class); + rpcServer.start(); } @@ -188,6 +254,14 @@ public NamenodeProtocols getMock() { return mockNn; } + /** + * Get the name service id (subcluster) of the Mock Namenode. + * @return Name service identifier. + */ + public String getNameserviceId() { + return nsId; + } + /** * Get the HA state of the Mock Namenode. * @return HA state (ACTIVE or STANDBY). @@ -217,9 +291,158 @@ public void transitionToStandby() { public void stop() throws Exception { if (rpcServer != null) { rpcServer.stop(); + rpcServer = null; } if (httpServer != null) { httpServer.stop(); + httpServer = null; } } + + /** + * Add the mock for the FileSystem calls in ClientProtocol. + * @throws IOException If it cannot be setup. + */ + public void addFileSystemMock() throws IOException { + final SortedMap fs = + new ConcurrentSkipListMap(); + + DirectoryListing l = mockNn.getListing(anyString(), any(), anyBoolean()); + when(l).thenAnswer(invocation -> { + String src = getSrc(invocation); + LOG.info("{} getListing({})", nsId, src); + if (!src.endsWith("/")) { + src += "/"; + } + Map files = + fs.subMap(src, src + Character.MAX_VALUE); + List list = new ArrayList<>(); + for (String file : files.keySet()) { + if (file.substring(src.length()).indexOf('/') < 0) { + HdfsFileStatus fileStatus = + getMockHdfsFileStatus(file, fs.get(file)); + list.add(fileStatus); + } + } + HdfsFileStatus[] array = list.toArray( + new HdfsFileStatus[list.size()]); + return new DirectoryListing(array, 0); + }); + when(mockNn.getFileInfo(anyString())).thenAnswer(invocation -> { + String src = getSrc(invocation); + LOG.info("{} getFileInfo({})", nsId, src); + return getMockHdfsFileStatus(src, fs.get(src)); + }); + HdfsFileStatus c = mockNn.create(anyString(), any(), anyString(), any(), + anyBoolean(), anyShort(), anyLong(), any(), any(), any()); + when(c).thenAnswer(invocation -> { + String src = getSrc(invocation); + LOG.info("{} create({})", nsId, src); + fs.put(src, "FILE"); + return getMockHdfsFileStatus(src, "FILE"); + }); + LocatedBlocks b = mockNn.getBlockLocations( + anyString(), anyLong(), anyLong()); + when(b).thenAnswer(invocation -> { + String src = getSrc(invocation); + LOG.info("{} getBlockLocations({})", nsId, src); + if (!fs.containsKey(src)) { + LOG.error("{} cannot find {} for getBlockLocations", nsId, src); + throw new FileNotFoundException("File does not exist " + src); + } + return mock(LocatedBlocks.class); + }); + boolean f = mockNn.complete(anyString(), anyString(), any(), anyLong()); + when(f).thenAnswer(invocation -> { + String src = getSrc(invocation); + if (!fs.containsKey(src)) { + LOG.error("{} cannot find {} for complete", nsId, src); + throw new FileNotFoundException("File does not exist " + src); + } + return true; + }); + LocatedBlock a = mockNn.addBlock( + anyString(), anyString(), any(), any(), anyLong(), any(), any()); + when(a).thenAnswer(invocation -> { + String src = getSrc(invocation); + if (!fs.containsKey(src)) { + LOG.error("{} cannot find {} for addBlock", nsId, src); + throw new FileNotFoundException("File does not exist " + src); + } + return getMockLocatedBlock(nsId); + }); + boolean m = mockNn.mkdirs(anyString(), any(), anyBoolean()); + when(m).thenAnswer(invocation -> { + String src = getSrc(invocation); + LOG.info("{} mkdirs({})", nsId, src); + fs.put(src, "DIRECTORY"); + return true; + }); + when(mockNn.getServerDefaults()).thenAnswer(invocation -> { + LOG.info("{} getServerDefaults", nsId); + FsServerDefaults defaults = mock(FsServerDefaults.class); + when(defaults.getChecksumType()).thenReturn( + Type.valueOf(DataChecksum.CHECKSUM_CRC32)); + when(defaults.getKeyProviderUri()).thenReturn(nsId); + return defaults; + }); + } + + private static String getSrc(InvocationOnMock invocation) { + return (String) invocation.getArguments()[0]; + } + + /** + * Get a mock HDFS file status. + * @param filename Name of the file. + * @param type Type of the file (FILE, DIRECTORY, or null). + * @return HDFS file status + */ + private static HdfsFileStatus getMockHdfsFileStatus( + final String filename, final String type) { + if (type == null) { + return null; + } + HdfsFileStatus fileStatus = mock(HdfsFileStatus.class); + when(fileStatus.getLocalNameInBytes()).thenReturn(filename.getBytes()); + when(fileStatus.getPermission()).thenReturn(mock(FsPermission.class)); + when(fileStatus.getOwner()).thenReturn("owner"); + when(fileStatus.getGroup()).thenReturn("group"); + if (type.equals("FILE")) { + when(fileStatus.getLen()).thenReturn(100L); + when(fileStatus.getReplication()).thenReturn((short) 1); + when(fileStatus.getBlockSize()).thenReturn( + HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT); + } else if (type.equals("DIRECTORY")) { + when(fileStatus.isDir()).thenReturn(true); + when(fileStatus.isDirectory()).thenReturn(true); + } + return fileStatus; + } + + /** + * Get a mock located block pointing to one of the subclusters. It is + * allocated in a fake Datanode. + * @param nsId Name service identifier (subcluster). + * @return Mock located block. + */ + private static LocatedBlock getMockLocatedBlock(final String nsId) { + LocatedBlock lb = mock(LocatedBlock.class); + when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]); + DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", + 1111, 1112, 1113, 1114); + DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId); + when(lb.getLocations()).thenReturn(new DatanodeInfo[] {dnInfo}); + ExtendedBlock eb = mock(ExtendedBlock.class); + when(eb.getBlockPoolId()).thenReturn(nsId); + when(lb.getBlock()).thenReturn(eb); + @SuppressWarnings("unchecked") + Token tok = mock(Token.class); + when(tok.getIdentifier()).thenReturn(nsId.getBytes()); + when(tok.getPassword()).thenReturn(nsId.getBytes()); + when(tok.getKind()).thenReturn(new Text(nsId)); + when(tok.getService()).thenReturn(new Text(nsId)); + when(lb.getBlockToken()).thenReturn(tok); + return lb; + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index 486d4a09b70ee..381203b2a9145 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -152,7 +152,7 @@ public void tearDown() { @Test public void testAddMountTable() throws Exception { - String nsId = "ns0"; + String nsId = "ns0,ns1"; String src = "/test-addmounttable"; String dest = "/addmounttable"; String[] argv = new String[] {"-add", src, nsId, dest}; @@ -166,26 +166,35 @@ public void testAddMountTable() throws Exception { MountTable mountTable = getResponse.getEntries().get(0); List destinations = mountTable.getDestinations(); - assertEquals(1, destinations.size()); + assertEquals(2, destinations.size()); assertEquals(src, mountTable.getSourcePath()); - assertEquals(nsId, destinations.get(0).getNameserviceId()); + assertEquals("ns0", destinations.get(0).getNameserviceId()); assertEquals(dest, destinations.get(0).getDest()); + assertEquals("ns1", destinations.get(1).getNameserviceId()); + assertEquals(dest, destinations.get(1).getDest()); assertFalse(mountTable.isReadOnly()); + assertFalse(mountTable.isFaultTolerant()); // test mount table update behavior dest = dest + "-new"; - argv = new String[] {"-add", src, nsId, dest, "-readonly"}; + argv = new String[] {"-add", src, nsId, dest, "-readonly", + "-faulttolerant", "-order", "HASH_ALL"}; assertEquals(0, ToolRunner.run(admin, argv)); stateStore.loadCache(MountTableStoreImpl.class, true); getResponse = client.getMountTableManager() .getMountTableEntries(getRequest); mountTable = getResponse.getEntries().get(0); - assertEquals(2, mountTable.getDestinations().size()); - assertEquals(nsId, mountTable.getDestinations().get(1).getNameserviceId()); - assertEquals(dest, mountTable.getDestinations().get(1).getDest()); + assertEquals(4, mountTable.getDestinations().size()); + RemoteLocation loc2 = mountTable.getDestinations().get(2); + assertEquals("ns0", loc2.getNameserviceId()); + assertEquals(dest, loc2.getDest()); + RemoteLocation loc3 = mountTable.getDestinations().get(3); + assertEquals("ns1", loc3.getNameserviceId()); + assertEquals(dest, loc3.getDest()); assertTrue(mountTable.isReadOnly()); + assertTrue(mountTable.isFaultTolerant()); } @Test @@ -211,6 +220,7 @@ public void testAddMountTableNotNormalized() throws Exception { assertEquals(nsId, destinations.get(0).getNameserviceId()); assertEquals(dest, destinations.get(0).getDest()); assertFalse(mountTable.isReadOnly()); + assertFalse(mountTable.isFaultTolerant()); // test mount table update behavior dest = dest + "-new"; @@ -516,17 +526,19 @@ public void testInvalidArgumentMessage() throws Exception { System.setOut(new PrintStream(out)); String[] argv = new String[] {"-add", src, nsId}; assertEquals(-1, ToolRunner.run(admin, argv)); - assertTrue(out.toString().contains( + assertTrue("Wrong message: " + out, out.toString().contains( "\t[-add " - + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + + "[-readonly] [-faulttolerant] " + + "[-order HASH|LOCAL|RANDOM|HASH_ALL] " + "-owner -group -mode ]")); out.reset(); argv = new String[] {"-update", src, nsId}; assertEquals(-1, ToolRunner.run(admin, argv)); - assertTrue(out.toString().contains( + assertTrue("Wrong message: " + out, out.toString().contains( "\t[-update " - + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + + "[-readonly] [-faulttolerant] " + + "[-order HASH|LOCAL|RANDOM|HASH_ALL] " + "-owner -group -mode ]")); out.reset(); @@ -567,10 +579,11 @@ public void testInvalidArgumentMessage() throws Exception { assertEquals(-1, ToolRunner.run(admin, argv)); String expected = "Usage: hdfs dfsrouteradmin :\n" + "\t[-add " - + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + + "[-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + "-owner -group -mode ]\n" + "\t[-update " - + " " + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + + " " + + "[-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + "-owner -group -mode ]\n" + "\t[-rm ]\n" + "\t[-ls ]\n" + "\t[-getDestination ]\n" @@ -579,7 +592,7 @@ public void testInvalidArgumentMessage() throws Exception { + "\t[-safemode enter | leave | get]\n" + "\t[-nameservice enable | disable ]\n" + "\t[-getDisabledNameservices]"; - assertTrue(out.toString(), out.toString().contains(expected)); + assertTrue("Wrong message: " + out, out.toString().contains(expected)); out.reset(); } @@ -1159,4 +1172,28 @@ public void testGetDestination() throws Exception { argv = new String[] {"-getDestination /file1.txt /file2.txt"}; assertEquals(-1, ToolRunner.run(admin, argv)); } + + @Test + public void testErrorFaultTolerant() throws Exception { + + System.setErr(new PrintStream(err)); + String[] argv = new String[] {"-add", "/mntft", "ns01", "/tmp", + "-faulttolerant"}; + assertEquals(-1, ToolRunner.run(admin, argv)); + assertTrue(err.toString(), err.toString().contains( + "Invalid entry, fault tolerance requires multiple destinations")); + err.reset(); + + System.setErr(new PrintStream(err)); + argv = new String[] {"-add", "/mntft", "ns0,ns1", "/tmp", + "-order", "HASH", "-faulttolerant"}; + assertEquals(-1, ToolRunner.run(admin, argv)); + assertTrue(err.toString(), err.toString().contains( + "Invalid entry, fault tolerance only supported for ALL order")); + err.reset(); + + argv = new String[] {"-add", "/mntft", "ns0,ns1", "/tmp", + "-order", "HASH_ALL", "-faulttolerant"}; + assertEquals(0, ToolRunner.run(admin, argv)); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java new file mode 100644 index 0000000000000..c8f96c659cd9c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java @@ -0,0 +1,654 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static java.util.Arrays.asList; +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.server.federation.MockNamenode; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; +import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; +import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Test the handling of fault tolerant mount points in the Router. + */ +public class TestRouterFaultTolerant { + + private static final Logger LOG = + LoggerFactory.getLogger(TestRouterFaultTolerant.class); + + /** Number of files to create for testing. */ + private static final int NUM_FILES = 10; + /** Number of Routers for test. */ + private static final int NUM_ROUTERS = 2; + + + /** Namenodes for the test per name service id (subcluster). */ + private Map namenodes = new HashMap<>(); + /** Routers for the test. */ + private List routers = new ArrayList<>(); + + /** Run test tasks in parallel. */ + private ExecutorService service; + + + @Before + public void setup() throws Exception { + LOG.info("Start the Namenodes"); + Configuration nnConf = new HdfsConfiguration(); + nnConf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 10); + for (final String nsId : asList("ns0", "ns1")) { + MockNamenode nn = new MockNamenode(nsId, nnConf); + nn.transitionToActive(); + nn.addFileSystemMock(); + namenodes.put(nsId, nn); + } + + LOG.info("Start the Routers"); + Configuration routerConf = new RouterConfigBuilder() + .stateStore() + .admin() + .rpc() + .build(); + routerConf.set(RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "0.0.0.0:0"); + routerConf.set(RBFConfigKeys.DFS_ROUTER_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + routerConf.set(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, "0.0.0.0:0"); + // Speedup time outs + routerConf.setTimeDuration( + RBFConfigKeys.DFS_ROUTER_CLIENT_CONNECT_TIMEOUT, + 500, TimeUnit.MILLISECONDS); + + Configuration stateStoreConf = getStateStoreConfiguration(); + stateStoreConf.setClass( + RBFConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS, + MembershipNamenodeResolver.class, ActiveNamenodeResolver.class); + stateStoreConf.setClass( + RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS, + MultipleDestinationMountTableResolver.class, + FileSubclusterResolver.class); + routerConf.addResource(stateStoreConf); + + for (int i = 0; i < NUM_ROUTERS; i++) { + // router0 doesn't allow partial listing + routerConf.setBoolean( + RBFConfigKeys.DFS_ROUTER_ALLOW_PARTIAL_LIST, i != 0); + + final Router router = new Router(); + router.init(routerConf); + router.start(); + routers.add(router); + } + + LOG.info("Registering the subclusters in the Routers"); + registerSubclusters(Collections.singleton("ns1")); + + LOG.info("Stop ns1 to simulate an unavailable subcluster"); + namenodes.get("ns1").stop(); + + service = Executors.newFixedThreadPool(10); + } + + /** + * Register the subclusters in all Routers. + * @param unavailableSubclusters Set of unavailable subclusters. + * @throws IOException If it cannot register a subcluster. + */ + private void registerSubclusters(Set unavailableSubclusters) + throws IOException { + for (final Router router : routers) { + MembershipNamenodeResolver resolver = + (MembershipNamenodeResolver) router.getNamenodeResolver(); + for (final MockNamenode nn : namenodes.values()) { + String nsId = nn.getNameserviceId(); + String rpcAddress = "localhost:" + nn.getRPCPort(); + String httpAddress = "localhost:" + nn.getHTTPPort(); + NamenodeStatusReport report = new NamenodeStatusReport( + nsId, null, rpcAddress, rpcAddress, rpcAddress, httpAddress); + if (unavailableSubclusters.contains(nsId)) { + LOG.info("Register {} as UNAVAILABLE", nsId); + report.setRegistrationValid(false); + } else { + LOG.info("Register {} as ACTIVE", nsId); + report.setRegistrationValid(true); + } + report.setNamespaceInfo(new NamespaceInfo(0, nsId, nsId, 0)); + resolver.registerNamenode(report); + } + resolver.loadCache(true); + } + } + + @After + public void cleanup() throws Exception { + LOG.info("Stopping the cluster"); + for (final MockNamenode nn : namenodes.values()) { + nn.stop(); + } + namenodes.clear(); + + routers.forEach(router -> router.stop()); + routers.clear(); + + if (service != null) { + service.shutdown(); + service = null; + } + } + + /** + * Add a mount table entry in some name services and wait until it is + * available. + * @param mountPoint Name of the mount point. + * @param order Order of the mount table entry. + * @param nsIds Name service identifiers. + * @throws Exception If the entry could not be created. + */ + private void createMountTableEntry( + final String mountPoint, final DestinationOrder order, + Collection nsIds) throws Exception { + Router router = getRandomRouter(); + RouterClient admin = getAdminClient(router); + MountTableManager mountTable = admin.getMountTableManager(); + Map destMap = new HashMap<>(); + for (String nsId : nsIds) { + destMap.put(nsId, mountPoint); + } + MountTable newEntry = MountTable.newInstance(mountPoint, destMap); + newEntry.setDestOrder(order); + AddMountTableEntryRequest addRequest = + AddMountTableEntryRequest.newInstance(newEntry); + AddMountTableEntryResponse addResponse = + mountTable.addMountTableEntry(addRequest); + boolean created = addResponse.getStatus(); + assertTrue(created); + + refreshRoutersCaches(); + + // Check for the path + GetMountTableEntriesRequest getRequest = + GetMountTableEntriesRequest.newInstance(mountPoint); + GetMountTableEntriesResponse getResponse = + mountTable.getMountTableEntries(getRequest); + List entries = getResponse.getEntries(); + assertEquals("Too many entries: " + entries, 1, entries.size()); + assertEquals(mountPoint, entries.get(0).getSourcePath()); + } + + /** + * Update a mount table entry to be fault tolerant. + * @param mountPoint Mount point to update. + * @throws IOException If it cannot update the mount point. + */ + private void updateMountPointFaultTolerant(final String mountPoint) + throws IOException { + Router router = getRandomRouter(); + RouterClient admin = getAdminClient(router); + MountTableManager mountTable = admin.getMountTableManager(); + GetMountTableEntriesRequest getRequest = + GetMountTableEntriesRequest.newInstance(mountPoint); + GetMountTableEntriesResponse entries = + mountTable.getMountTableEntries(getRequest); + MountTable updateEntry = entries.getEntries().get(0); + updateEntry.setFaultTolerant(true); + UpdateMountTableEntryRequest updateRequest = + UpdateMountTableEntryRequest.newInstance(updateEntry); + UpdateMountTableEntryResponse updateResponse = + mountTable.updateMountTableEntry(updateRequest); + assertTrue(updateResponse.getStatus()); + + refreshRoutersCaches(); + } + + /** + * Refresh the caches of all Routers (to get the mount table). + */ + private void refreshRoutersCaches() { + for (final Router router : routers) { + StateStoreService stateStore = router.getStateStore(); + stateStore.refreshCaches(true); + } + } + + /** + * Test the behavior of the Router when one of the subclusters in a mount + * point fails. In particular, it checks if it can write files or not. + * Related to {@link TestRouterRpcMultiDestination#testSubclusterDown()}. + */ + @Test + public void testWriteWithFailedSubcluster() throws Exception { + + // Run the actual tests with each approach + final List> tasks = new ArrayList<>(); + final List orders = asList( + DestinationOrder.HASH_ALL, + DestinationOrder.SPACE, + DestinationOrder.RANDOM, + DestinationOrder.HASH); + for (DestinationOrder order : orders) { + tasks.add(() -> { + testWriteWithFailedSubcluster(order); + return true; + }); + } + TaskResults results = collectResults("Full tests", tasks); + assertEquals(orders.size(), results.getSuccess()); + } + + /** + * Test the behavior of the Router when one of the subclusters in a mount + * point fails. It assumes that ns1 is already down. + * @param order Destination order of the mount point. + * @throws Exception If we cannot run the test. + */ + private void testWriteWithFailedSubcluster(final DestinationOrder order) + throws Exception { + + final FileSystem router0Fs = getFileSystem(routers.get(0)); + final FileSystem router1Fs = getFileSystem(routers.get(1)); + final FileSystem ns0Fs = getFileSystem(namenodes.get("ns0").getRPCPort()); + + final String mountPoint = "/" + order + "-failsubcluster"; + final Path mountPath = new Path(mountPoint); + LOG.info("Setup {} with order {}", mountPoint, order); + createMountTableEntry(mountPoint, order, namenodes.keySet()); + + + LOG.info("Write in {} should succeed writing in ns0 and fail for ns1", + mountPath); + checkDirectoriesFaultTolerant( + mountPath, order, router0Fs, router1Fs, ns0Fs, false); + checkFilesFaultTolerant( + mountPath, order, router0Fs, router1Fs, ns0Fs, false); + + LOG.info("Make {} fault tolerant and everything succeeds", mountPath); + IOException ioe = null; + try { + updateMountPointFaultTolerant(mountPoint); + } catch (IOException e) { + ioe = e; + } + if (DestinationOrder.FOLDER_ALL.contains(order)) { + assertNull(ioe); + checkDirectoriesFaultTolerant( + mountPath, order, router0Fs, router1Fs, ns0Fs, true); + checkFilesFaultTolerant( + mountPath, order, router0Fs, router1Fs, ns0Fs, true); + } else { + assertTrue(ioe.getMessage().startsWith( + "Invalid entry, fault tolerance only supported for ALL order")); + } + } + + /** + * Check directory creation on a mount point. + * If it is fault tolerant, it should be able to write everything. + * If it is not fault tolerant, it should fail to write some. + */ + private void checkDirectoriesFaultTolerant( + Path mountPoint, DestinationOrder order, + FileSystem router0Fs, FileSystem router1Fs, FileSystem ns0Fs, + boolean faultTolerant) throws Exception { + + final FileStatus[] dirs0 = listStatus(router1Fs, mountPoint); + + LOG.info("Create directories in {}", mountPoint); + final List> tasks = new ArrayList<>(); + for (int i = 0; i < NUM_FILES; i++) { + final Path dir = new Path(mountPoint, + String.format("dir-%s-%03d", faultTolerant, i)); + FileSystem fs = getRandomRouterFileSystem(); + tasks.add(getDirCreateTask(fs, dir)); + } + TaskResults results = collectResults("Create dir " + mountPoint, tasks); + + LOG.info("Check directories results for {}: {}", mountPoint, results); + if (faultTolerant || DestinationOrder.FOLDER_ALL.contains(order)) { + assertEquals(NUM_FILES, results.getSuccess()); + assertEquals(0, results.getFailure()); + } else { + assertBothResults("check dir " + mountPoint, NUM_FILES, results); + } + + LOG.info("Check directories listing for {}", mountPoint); + tasks.add(getListFailTask(router0Fs, mountPoint)); + int filesExpected = dirs0.length + results.getSuccess(); + tasks.add(getListSuccessTask(router1Fs, mountPoint, filesExpected)); + assertEquals(2, collectResults("List " + mountPoint, tasks).getSuccess()); + } + + /** + * Check file creation on a mount point. + * If it is fault tolerant, it should be able to write everything. + * If it is not fault tolerant, it should fail to write some of the files. + */ + private void checkFilesFaultTolerant( + Path mountPoint, DestinationOrder order, + FileSystem router0Fs, FileSystem router1Fs, FileSystem ns0Fs, + boolean faultTolerant) throws Exception { + + // Get one of the existing sub directories + final FileStatus[] dirs0 = listStatus(router1Fs, mountPoint); + final Path dir0 = Path.getPathWithoutSchemeAndAuthority( + dirs0[0].getPath()); + + LOG.info("Create files in {}", dir0); + final List> tasks = new ArrayList<>(); + for (int i = 0; i < NUM_FILES; i++) { + final String newFile = String.format("%s/file-%03d.txt", dir0, i); + FileSystem fs = getRandomRouterFileSystem(); + tasks.add(getFileCreateTask(fs, newFile, ns0Fs)); + } + TaskResults results = collectResults("Create file " + dir0, tasks); + + LOG.info("Check files results for {}: {}", dir0, results); + if (faultTolerant || !DestinationOrder.FOLDER_ALL.contains(order)) { + assertEquals(NUM_FILES, results.getSuccess()); + assertEquals(0, results.getFailure()); + } else { + assertBothResults("check files " + dir0, NUM_FILES, results); + } + + LOG.info("Check files listing for {}", dir0); + tasks.add(getListFailTask(router0Fs, dir0)); + tasks.add(getListSuccessTask(router1Fs, dir0, results.getSuccess())); + assertEquals(2, collectResults("List " + dir0, tasks).getSuccess()); + } + + /** + * Get the string representation for the files. + * @param files Files to check. + * @return String representation. + */ + private static String toString(final FileStatus[] files) { + final StringBuilder sb = new StringBuilder(); + sb.append("["); + for (final FileStatus file : files) { + if (sb.length() > 1) { + sb.append(", "); + } + sb.append(Path.getPathWithoutSchemeAndAuthority(file.getPath())); + } + sb.append("]"); + return sb.toString(); + } + + /** + * List the files in a path. + * @param fs File system to check. + * @param path Path to list. + * @return List of files. + * @throws IOException If we cannot list. + */ + private FileStatus[] listStatus(final FileSystem fs, final Path path) + throws IOException { + FileStatus[] files = new FileStatus[] {}; + try { + files = fs.listStatus(path); + } catch (FileNotFoundException fnfe) { + LOG.debug("File not found: {}", fnfe.getMessage()); + } + return files; + } + + /** + * Task that creates a file and checks if it is available. + * @param file File to create. + * @param checkFs File system for checking if the file is properly created. + * @return Result of creating the file. + */ + private static Callable getFileCreateTask( + final FileSystem fs, final String file, FileSystem checkFs) { + return () -> { + try { + Path path = new Path(file); + FSDataOutputStream os = fs.create(path); + // We don't write because we have no mock Datanodes + os.close(); + FileStatus fileStatus = checkFs.getFileStatus(path); + assertTrue("File not created properly: " + fileStatus, + fileStatus.getLen() > 0); + return true; + } catch (RemoteException re) { + return false; + } + }; + } + + /** + * Task that creates a directory. + * @param dir Directory to create. + * @return Result of creating the directory.. + */ + private static Callable getDirCreateTask( + final FileSystem fs, final Path dir) { + return () -> { + try { + fs.mkdirs(dir); + return true; + } catch (RemoteException re) { + return false; + } + }; + } + + /** + * Task that lists a directory and expects to fail. + * @param fs File system to check. + * @param path Path to try to list. + * @return If the listing failed as expected. + */ + private static Callable getListFailTask(FileSystem fs, Path path) { + return () -> { + try { + fs.listStatus(path); + return false; + } catch (RemoteException re) { + return true; + } + }; + } + + /** + * Task that lists a directory and succeeds. + * @param fs File system to check. + * @param path Path to list. + * @param expected Number of files to expect to find. + * @return If the listing succeeds. + */ + private static Callable getListSuccessTask( + FileSystem fs, Path path, int expected) { + return () -> { + final FileStatus[] dirs = fs.listStatus(path); + assertEquals(toString(dirs), expected, dirs.length); + return true; + }; + } + + /** + * Invoke a set of tasks and collect their outputs. + * The tasks should do assertions. + * + * @param service Execution Service to run the tasks. + * @param tasks Tasks to run. + * @throws Exception If it cannot collect the results. + */ + private TaskResults collectResults(final String tag, + final Collection> tasks) throws Exception { + final TaskResults results = new TaskResults(); + service.invokeAll(tasks).forEach(task -> { + try { + boolean succeeded = task.get(); + if (succeeded) { + LOG.info("Got success for {}", tag); + results.incrSuccess(); + } else { + LOG.info("Got failure for {}", tag); + results.incrFailure(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + }); + tasks.clear(); + return results; + } + + /** + * Class to summarize the results of running a task. + */ + static class TaskResults { + private final AtomicInteger success = new AtomicInteger(0); + private final AtomicInteger failure = new AtomicInteger(0); + public void incrSuccess() { + success.incrementAndGet(); + } + public void incrFailure() { + failure.incrementAndGet(); + } + public int getSuccess() { + return success.get(); + } + public int getFailure() { + return failure.get(); + } + public int getTotal() { + return success.get() + failure.get(); + } + @Override + public String toString() { + return new StringBuilder() + .append("Success=").append(getSuccess()) + .append(" Failure=").append(getFailure()) + .toString(); + } + } + + /** + * Asserts that the results are the expected amount and it has both success + * and failure. + * @param msg Message to show when the assertion fails. + * @param expected Expected number of results. + * @param actual Actual results. + */ + private static void assertBothResults(String msg, + int expected, TaskResults actual) { + assertEquals(msg, expected, actual.getTotal()); + assertTrue("Expected some success for " + msg, actual.getSuccess() > 0); + assertTrue("Expected some failure for " + msg, actual.getFailure() > 0); + } + + /** + * Get a random Router from the cluster. + * @return Random Router. + */ + private Router getRandomRouter() { + Random rnd = new Random(); + int index = rnd.nextInt(routers.size()); + return routers.get(index); + } + + /** + * Get a file system from one of the Routers as a random user to allow better + * concurrency in the Router. + * @return File system from a random user. + * @throws Exception If we cannot create the file system. + */ + private FileSystem getRandomRouterFileSystem() throws Exception { + final UserGroupInformation userUgi = + UserGroupInformation.createUserForTesting( + "user-" + UUID.randomUUID(), new String[]{"group"}); + Router router = getRandomRouter(); + return userUgi.doAs( + (PrivilegedExceptionAction) () -> getFileSystem(router)); + } + + private static FileSystem getFileSystem(int rpcPort) throws IOException { + Configuration conf = new HdfsConfiguration(); + URI uri = URI.create("hdfs://localhost:" + rpcPort); + return DistributedFileSystem.get(uri, conf); + } + + private static FileSystem getFileSystem(final Router router) + throws IOException { + InetSocketAddress rpcAddress = router.getRpcServerAddress(); + int rpcPort = rpcAddress.getPort(); + return getFileSystem(rpcPort); + } + + private static RouterClient getAdminClient( + final Router router) throws IOException { + Configuration conf = new HdfsConfiguration(); + InetSocketAddress routerSocket = router.getAdminServerAddress(); + return new RouterClient(routerSocket, conf); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java index 1224fa2ddcd66..8fa3506f73cce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java @@ -74,7 +74,7 @@ public void setup() throws Exception { for (String nsId : nsIds) { nns.put(nsId, new HashMap<>()); for (String nnId : asList("nn0", "nn1")) { - nns.get(nsId).put(nnId, new MockNamenode()); + nns.get(nsId).put(nnId, new MockNamenode(nsId)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java index 2ec5d62fd5e90..98f9ebcf71a58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState; import org.apache.hadoop.hdfs.server.federation.store.records.MembershipStats; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; /** @@ -93,7 +94,7 @@ public static Configuration getStateStoreConfiguration( conf.setClass(FEDERATION_STORE_DRIVER_CLASS, clazz, StateStoreDriver.class); - if (clazz.isAssignableFrom(StateStoreFileBaseImpl.class)) { + if (StateStoreFileBaseImpl.class.isAssignableFrom(clazz)) { setFileConfiguration(conf); } return conf; @@ -178,8 +179,7 @@ public static void deleteStateStore( * @param conf Configuration to extend. */ public static void setFileConfiguration(Configuration conf) { - String workingPath = System.getProperty("user.dir"); - String stateStorePath = workingPath + "/statestore"; + String stateStorePath = GenericTestUtils.getRandomizedTempPath(); conf.set(FEDERATION_STORE_FILE_DIRECTORY, stateStorePath); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMountTable.java index d30d6baea44e5..6e5bd9ca85ffb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMountTable.java @@ -78,15 +78,17 @@ public void testStateStoreDisconnected() throws Exception { assertFalse(getStateStore().isDriverReady()); // Test APIs that access the store to check they throw the correct exception + MountTable entry = MountTable.newInstance( + "/mnt", Collections.singletonMap("ns0", "/tmp")); AddMountTableEntryRequest addRequest = - AddMountTableEntryRequest.newInstance(); + AddMountTableEntryRequest.newInstance(entry); verifyException(mountStore, "addMountTableEntry", StateStoreUnavailableException.class, new Class[] {AddMountTableEntryRequest.class}, new Object[] {addRequest}); UpdateMountTableEntryRequest updateRequest = - UpdateMountTableEntryRequest.newInstance(); + UpdateMountTableEntryRequest.newInstance(entry); verifyException(mountStore, "updateMountTableEntry", StateStoreUnavailableException.class, new Class[] {UpdateMountTableEntryRequest.class}, diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java index 055527384eb3a..339a9776ea452 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -164,6 +165,24 @@ public void testReadOnly() throws IOException { assertTrue(record2.isReadOnly()); } + @Test + public void testFaultTolerant() throws IOException { + + Map dest = new LinkedHashMap<>(); + dest.put(DST_NS_0, DST_PATH_0); + dest.put(DST_NS_1, DST_PATH_1); + MountTable record0 = MountTable.newInstance(SRC, dest); + assertFalse(record0.isFaultTolerant()); + + MountTable record1 = MountTable.newInstance(SRC, dest); + assertFalse(record1.isFaultTolerant()); + assertEquals(record0, record1); + + record1.setFaultTolerant(true); + assertTrue(record1.isFaultTolerant()); + assertNotEquals(record0, record1); + } + @Test public void testOrder() throws IOException { testOrder(DestinationOrder.HASH); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md index 7ae31c83985c9..452b2773698ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md @@ -428,8 +428,8 @@ Runs the DFS router. See [Router](../hadoop-hdfs-rbf/HDFSRouterFederation.html#R Usage: hdfs dfsrouteradmin - [-add [-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] -owner -group -mode ] - [-update [-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] -owner -group -mode ] + [-add [-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] -owner -group -mode ] + [-update [-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] -owner -group -mode ] [-rm ] [-ls ] [-getDestination ] From dd8c2b92df2d42fe8ee07032988fe1fb68161004 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Fri, 5 Apr 2019 08:11:16 +0530 Subject: [PATCH 0315/1308] HDFS-13853. RBF: RouterAdmin update cmd is overwriting the entry not updating the existing. Contributed by Ayush Saxena. --- .../hdfs/tools/federation/RouterAdmin.java | 219 ++++++++++-------- .../federation/router/TestRouterAdminCLI.java | 130 +++++++++-- .../src/site/markdown/HDFSCommands.md | 4 +- 3 files changed, 232 insertions(+), 121 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 61da7e926d940..9d03a4485be20 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -22,8 +22,10 @@ import java.util.Arrays; import java.util.Collection; import java.util.LinkedHashMap; +import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; @@ -138,9 +140,10 @@ private String getUsage(String cmd) { + "[-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + "-owner -group -mode ]"; } else if (cmd.equals("-update")) { - return "\t[-update " - + " " - + "[-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + return "\t[-update " + + " [ ] " + + "[-readonly true|false] [-faulttolerant true|false]" + + " [-order HASH|LOCAL|RANDOM|HASH_ALL] " + "-owner -group -mode ]"; } else if (cmd.equals("-rm")) { return "\t[-rm ]"; @@ -294,6 +297,8 @@ public int run(String[] argv) throws Exception { } else if ("-update".equals(cmd)) { if (updateMount(argv, i)) { System.out.println("Successfully updated mount point " + argv[i]); + System.out.println( + "WARN: Changing order/destinations may lead to inconsistencies"); } else { exitCode = -1; } @@ -366,6 +371,10 @@ public int run(String[] argv) throws Exception { e.printStackTrace(); debugException = ex; } + } catch (IOException ioe) { + exitCode = -1; + System.err.println(cmd.substring(1) + ": " + ioe.getLocalizedMessage()); + printUsage(cmd); } catch (Exception e) { exitCode = -1; debugException = e; @@ -473,17 +482,7 @@ public boolean addMount(String mount, String[] nss, String dest, mount = normalizeFileSystemPath(mount); // Get the existing entry MountTableManager mountTable = client.getMountTableManager(); - GetMountTableEntriesRequest getRequest = - GetMountTableEntriesRequest.newInstance(mount); - GetMountTableEntriesResponse getResponse = - mountTable.getMountTableEntries(getRequest); - List results = getResponse.getEntries(); - MountTable existingEntry = null; - for (MountTable result : results) { - if (mount.equals(result.getSourcePath())) { - existingEntry = result; - } - } + MountTable existingEntry = getMountEntry(mount, mountTable); if (existingEntry == null) { // Create and add the entry if it doesn't exist @@ -579,100 +578,81 @@ public boolean addMount(String mount, String[] nss, String dest, * @throws IOException If there is an error. */ public boolean updateMount(String[] parameters, int i) throws IOException { - // Mandatory parameters String mount = parameters[i++]; - String[] nss = parameters[i++].split(","); - String dest = parameters[i++]; - - // Optional parameters - boolean readOnly = false; - boolean faultTolerant = false; - String owner = null; - String group = null; - FsPermission mode = null; - DestinationOrder order = null; - while (i < parameters.length) { - if (parameters[i].equals("-readonly")) { - readOnly = true; - } else if (parameters[i].equals("-faulttolerant")) { - faultTolerant = true; - } else if (parameters[i].equals("-order")) { - i++; - try { - order = DestinationOrder.valueOf(parameters[i]); - } catch(Exception e) { - System.err.println("Cannot parse order: " + parameters[i]); - } - } else if (parameters[i].equals("-owner")) { - i++; - owner = parameters[i]; - } else if (parameters[i].equals("-group")) { - i++; - group = parameters[i]; - } else if (parameters[i].equals("-mode")) { - i++; - short modeValue = Short.parseShort(parameters[i], 8); - mode = new FsPermission(modeValue); - } else { - printUsage("-update"); - return false; - } - - i++; - } - - return updateMount(mount, nss, dest, readOnly, faultTolerant, order, - new ACLEntity(owner, group, mode)); - } - - /** - * Update a mount table entry. - * - * @param mount Mount point. - * @param nss Nameservices where this is mounted to. - * @param dest Destination path. - * @param readonly If the mount point is read only. - * @param order Order of the destination locations. - * @param aclInfo the ACL info for mount point. - * @return If the mount point was updated. - * @throws IOException Error updating the mount point. - */ - public boolean updateMount(String mount, String[] nss, String dest, - boolean readonly, boolean faultTolerant, - DestinationOrder order, ACLEntity aclInfo) - throws IOException { mount = normalizeFileSystemPath(mount); MountTableManager mountTable = client.getMountTableManager(); - - // Create a new entry - Map destMap = new LinkedHashMap<>(); - for (String ns : nss) { - destMap.put(ns, dest); - } - MountTable newEntry = MountTable.newInstance(mount, destMap); - - newEntry.setReadOnly(readonly); - newEntry.setFaultTolerant(faultTolerant); - - if (order != null) { - newEntry.setDestOrder(order); - } - - // Update ACL info of mount table entry - if (aclInfo.getOwner() != null) { - newEntry.setOwnerName(aclInfo.getOwner()); + MountTable existingEntry = getMountEntry(mount, mountTable); + if (existingEntry == null) { + throw new IOException(mount + " doesn't exist."); } + // Check if the destination needs to be updated. - if (aclInfo.getGroup() != null) { - newEntry.setGroupName(aclInfo.getGroup()); + if (!parameters[i].startsWith("-")) { + String[] nss = parameters[i++].split(","); + String dest = parameters[i++]; + Map destMap = new LinkedHashMap<>(); + for (String ns : nss) { + destMap.put(ns, dest); + } + final List locations = new LinkedList<>(); + for (Entry entry : destMap.entrySet()) { + String nsId = entry.getKey(); + String path = normalizeFileSystemPath(entry.getValue()); + RemoteLocation location = new RemoteLocation(nsId, path, mount); + locations.add(location); + } + existingEntry.setDestinations(locations); } - - if (aclInfo.getMode() != null) { - newEntry.setMode(aclInfo.getMode()); + try { + while (i < parameters.length) { + switch (parameters[i]) { + case "-readonly": + i++; + existingEntry.setReadOnly(getBooleanValue(parameters[i])); + break; + case "-faulttolerant": + i++; + existingEntry.setFaultTolerant(getBooleanValue(parameters[i])); + break; + case "-order": + i++; + try { + existingEntry.setDestOrder(DestinationOrder.valueOf(parameters[i])); + break; + } catch (Exception e) { + throw new Exception("Cannot parse order: " + parameters[i]); + } + case "-owner": + i++; + existingEntry.setOwnerName(parameters[i]); + break; + case "-group": + i++; + existingEntry.setGroupName(parameters[i]); + break; + case "-mode": + i++; + short modeValue = Short.parseShort(parameters[i], 8); + existingEntry.setMode(new FsPermission(modeValue)); + break; + default: + printUsage("-update"); + return false; + } + i++; + } + } catch (IllegalArgumentException iae) { + throw iae; + } catch (Exception e) { + String msg = "Unable to parse arguments: " + e.getMessage(); + if (e instanceof ArrayIndexOutOfBoundsException) { + msg = "Unable to parse arguments: no value provided for " + + parameters[i - 1]; + } + throw new IOException(msg); } - UpdateMountTableEntryRequest updateRequest = - UpdateMountTableEntryRequest.newInstance(newEntry); + UpdateMountTableEntryRequest.newInstance(existingEntry); UpdateMountTableEntryResponse updateResponse = mountTable.updateMountTableEntry(updateRequest); boolean updated = updateResponse.getStatus(); @@ -682,6 +662,45 @@ public boolean updateMount(String mount, String[] nss, String dest, return updated; } + /** + * Parse string to boolean. + * @param value the string to be parsed. + * @return parsed boolean value. + * @throws Exception if other than true|false is provided. + */ + private boolean getBooleanValue(String value) throws Exception { + if (value.equalsIgnoreCase("true")) { + return true; + } else if (value.equalsIgnoreCase("false")) { + return false; + } + throw new IllegalArgumentException("Invalid argument: " + value + + ". Please specify either true or false."); + } + + /** + * Gets the mount table entry. + * @param mount name of the mount entry. + * @param mountTable the mount table. + * @return corresponding mount entry. + * @throws IOException in case of failure to retrieve mount entry. + */ + private MountTable getMountEntry(String mount, MountTableManager mountTable) + throws IOException { + GetMountTableEntriesRequest getRequest = + GetMountTableEntriesRequest.newInstance(mount); + GetMountTableEntriesResponse getResponse = + mountTable.getMountTableEntries(getRequest); + List results = getResponse.getEntries(); + MountTable existingEntry = null; + for (MountTable result : results) { + if (mount.equals(result.getSourcePath())) { + existingEntry = result; + } + } + return existingEntry; + } + /** * Remove mount point. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index 381203b2a9145..ce260ec0976fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -536,8 +536,8 @@ public void testInvalidArgumentMessage() throws Exception { argv = new String[] {"-update", src, nsId}; assertEquals(-1, ToolRunner.run(admin, argv)); assertTrue("Wrong message: " + out, out.toString().contains( - "\t[-update " - + "[-readonly] [-faulttolerant] " + "\t[-update [ ] " + + "[-readonly true|false] [-faulttolerant true|false] " + "[-order HASH|LOCAL|RANDOM|HASH_ALL] " + "-owner -group -mode ]")); out.reset(); @@ -581,9 +581,9 @@ public void testInvalidArgumentMessage() throws Exception { + "\t[-add " + "[-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + "-owner -group -mode ]\n" - + "\t[-update " - + " " - + "[-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + + "\t[-update [ " + + "] [-readonly true|false]" + + " [-faulttolerant true|false] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + "-owner -group -mode ]\n" + "\t[-rm ]\n" + "\t[-ls ]\n" + "\t[-getDestination ]\n" @@ -902,23 +902,15 @@ public Boolean get() { @Test public void testUpdateNonExistingMountTable() throws Exception { - System.setOut(new PrintStream(out)); + System.setErr(new PrintStream(err)); String nsId = "ns0"; String src = "/test-updateNonExistingMounttable"; String dest = "/updateNonExistingMounttable"; String[] argv = new String[] {"-update", src, nsId, dest}; - assertEquals(0, ToolRunner.run(admin, argv)); - - stateStore.loadCache(MountTableStoreImpl.class, true); - GetMountTableEntriesRequest getRequest = - GetMountTableEntriesRequest.newInstance(src); - GetMountTableEntriesResponse getResponse = - client.getMountTableManager().getMountTableEntries(getRequest); - // Ensure the destination updated successfully - MountTable mountTable = getResponse.getEntries().get(0); - assertEquals(src, mountTable.getSourcePath()); - assertEquals(nsId, mountTable.getDestinations().get(0).getNameserviceId()); - assertEquals(dest, mountTable.getDestinations().get(0).getDest()); + // Update shall fail if the mount entry doesn't exist. + assertEquals(-1, ToolRunner.run(admin, argv)); + assertTrue(err.toString(), err.toString() + .contains("update: /test-updateNonExistingMounttable doesn't exist.")); } @Test @@ -997,6 +989,106 @@ public void testUpdateDestinationForExistingMountTableNotNormalized() throws assertEquals(newDest, mountTable.getDestinations().get(0).getDest()); } + @Test + public void testUpdateChangeAttributes() throws Exception { + // Add a mount table firstly + String nsId = "ns0"; + String src = "/mount"; + String dest = "/dest"; + String[] argv = new String[] {"-add", src, nsId, dest, "-readonly", + "-order", "HASH_ALL"}; + assertEquals(0, ToolRunner.run(admin, argv)); + + stateStore.loadCache(MountTableStoreImpl.class, true); + GetMountTableEntriesRequest getRequest = + GetMountTableEntriesRequest.newInstance(src); + GetMountTableEntriesResponse getResponse = + client.getMountTableManager().getMountTableEntries(getRequest); + // Ensure mount table added successfully + MountTable mountTable = getResponse.getEntries().get(0); + assertEquals(src, mountTable.getSourcePath()); + + // Update the destination + String newNsId = "ns0"; + String newDest = "/newDestination"; + argv = new String[] {"-update", src, newNsId, newDest}; + assertEquals(0, ToolRunner.run(admin, argv)); + + stateStore.loadCache(MountTableStoreImpl.class, true); + getResponse = + client.getMountTableManager().getMountTableEntries(getRequest); + // Ensure the destination updated successfully and other attributes are + // preserved. + mountTable = getResponse.getEntries().get(0); + assertEquals(src, mountTable.getSourcePath()); + assertEquals(newNsId, + mountTable.getDestinations().get(0).getNameserviceId()); + assertEquals(newDest, mountTable.getDestinations().get(0).getDest()); + assertTrue(mountTable.isReadOnly()); + assertEquals("HASH_ALL", mountTable.getDestOrder().toString()); + + // Update the attribute. + argv = new String[] {"-update", src, "-readonly", "false"}; + assertEquals(0, ToolRunner.run(admin, argv)); + + stateStore.loadCache(MountTableStoreImpl.class, true); + getResponse = + client.getMountTableManager().getMountTableEntries(getRequest); + + // Ensure the attribute updated successfully and destination and other + // attributes are preserved. + mountTable = getResponse.getEntries().get(0); + assertEquals(src, mountTable.getSourcePath()); + assertEquals(newNsId, + mountTable.getDestinations().get(0).getNameserviceId()); + assertEquals(newDest, mountTable.getDestinations().get(0).getDest()); + assertFalse(mountTable.isReadOnly()); + assertEquals("HASH_ALL", mountTable.getDestOrder().toString()); + + } + + @Test + public void testUpdateErrorCase() throws Exception { + // Add a mount table firstly + String nsId = "ns0"; + String src = "/mount"; + String dest = "/dest"; + String[] argv = new String[] {"-add", src, nsId, dest, "-readonly", + "-order", "HASH_ALL"}; + assertEquals(0, ToolRunner.run(admin, argv)); + stateStore.loadCache(MountTableStoreImpl.class, true); + + // Check update for non-existent mount entry. + argv = new String[] {"-update", "/noMount", "-readonly", "false"}; + System.setErr(new PrintStream(err)); + assertEquals(-1, ToolRunner.run(admin, argv)); + assertTrue(err.toString(), + err.toString().contains("update: /noMount doesn't exist.")); + err.reset(); + + // Check update if non true/false value is passed for readonly. + argv = new String[] {"-update", src, "-readonly", "check"}; + assertEquals(-1, ToolRunner.run(admin, argv)); + assertTrue(err.toString(), err.toString().contains("update: " + + "Invalid argument: check. Please specify either true or false.")); + err.reset(); + + // Check update with missing value is passed for faulttolerant. + argv = new String[] {"-update", src, "ns1", "/tmp", "-faulttolerant"}; + assertEquals(-1, ToolRunner.run(admin, argv)); + assertTrue(err.toString(), + err.toString().contains("update: Unable to parse arguments:" + + " no value provided for -faulttolerant")); + err.reset(); + + // Check update with invalid order. + argv = new String[] {"-update", src, "ns1", "/tmp", "-order", "Invalid"}; + assertEquals(-1, ToolRunner.run(admin, argv)); + assertTrue(err.toString(), err.toString().contains( + "update: Unable to parse arguments: Cannot parse order: Invalid")); + err.reset(); + } + @Test public void testUpdateReadonlyUserGroupPermissionMountable() throws Exception { @@ -1022,7 +1114,7 @@ public void testUpdateReadonlyUserGroupPermissionMountable() // Update the readonly, owner, group and permission String testOwner = "test_owner"; String testGroup = "test_group"; - argv = new String[] {"-update", src, nsId, dest, "-readonly", + argv = new String[] {"-update", src, nsId, dest, "-readonly", "true", "-owner", testOwner, "-group", testGroup, "-mode", "0455"}; assertEquals(0, ToolRunner.run(admin, argv)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md index 452b2773698ee..fd77edf754d7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md @@ -429,7 +429,7 @@ Usage: hdfs dfsrouteradmin [-add [-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] -owner -group -mode ] - [-update [-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] -owner -group -mode ] + [-update [ ] [-readonly true|false] [-faulttolerant true|false] [-order HASH|LOCAL|RANDOM|HASH_ALL] -owner -group -mode ] [-rm ] [-ls ] [-getDestination ] @@ -444,7 +444,7 @@ Usage: | COMMAND\_OPTION | Description | |:---- |:---- | | `-add` *source* *nameservices* *destination* | Add a mount table entry or update if it exists. | -| `-update` *source* *nameservices* *destination* | Update a mount table entry or create one if it does not exist. | +| `-update` *source* *nameservices* *destination* | Update a mount table entry attribures. | | `-rm` *source* | Remove mount point of specified path. | | `-ls` *path* | List mount points under specified path. | | `-getDestination` *path* | Get the subcluster where a file is or should be created. | From 0f9b8d7a753ad41b7ee7dfe3afaf34bddcbd94a8 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Mon, 8 Apr 2019 21:43:59 +0530 Subject: [PATCH 0316/1308] HDFS-14369. RBF: Fix trailing / for webhdfs. Contributed by Akira Ajisaka. --- .../resolver/MountTableResolver.java | 14 +- .../hdfs/tools/federation/RouterAdmin.java | 16 ++- .../resolver/TestMountTableResolver.java | 123 ++++++++++++++++-- .../router/TestRouterMountTable.java | 32 +++++ 4 files changed, 160 insertions(+), 25 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index da585515c35f3..03b051db34e9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.hdfs.tools.federation.RouterAdmin; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -399,12 +400,13 @@ public PathLocation call() throws Exception { /** * Build the path location to insert into the cache atomically. It must hold * the read lock. - * @param path Path to check/insert. + * @param str Path to check/insert. * @return New remote location. * @throws IOException If it cannot find the location. */ - public PathLocation lookupLocation(final String path) throws IOException { + public PathLocation lookupLocation(final String str) throws IOException { PathLocation ret = null; + final String path = RouterAdmin.normalizeFileSystemPath(str); MountTable entry = findDeepest(path); if (entry != null) { ret = buildLocation(path, entry); @@ -432,12 +434,13 @@ public PathLocation lookupLocation(final String path) throws IOException { */ public MountTable getMountPoint(final String path) throws IOException { verifyMountTable(); - return findDeepest(path); + return findDeepest(RouterAdmin.normalizeFileSystemPath(path)); } @Override - public List getMountPoints(final String path) throws IOException { + public List getMountPoints(final String str) throws IOException { verifyMountTable(); + final String path = RouterAdmin.normalizeFileSystemPath(str); Set children = new TreeSet<>(); readLock.lock(); @@ -493,8 +496,7 @@ public List getMountPoints(final String path) throws IOException { */ public List getMounts(final String path) throws IOException { verifyMountTable(); - - return getTreeValues(path, false); + return getTreeValues(RouterAdmin.normalizeFileSystemPath(path), false); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 9d03a4485be20..8f6d917d3a6ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -26,12 +26,12 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.regex.Pattern; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -93,6 +93,9 @@ public class RouterAdmin extends Configured implements Tool { private RouterClient client; + /** Pre-compiled regular expressions to detect duplicated slashes. */ + private static final Pattern SLASHES = Pattern.compile("/+"); + public static void main(String[] argv) throws Exception { Configuration conf = new HdfsConfiguration(); RouterAdmin admin = new RouterAdmin(conf); @@ -1062,12 +1065,15 @@ public int genericRefresh(String[] argv, int i) throws IOException { /** * Normalize a path for that filesystem. * - * @param path Path to normalize. + * @param str Path to normalize. The path doesn't have scheme or authority. * @return Normalized path. */ - private static String normalizeFileSystemPath(final String path) { - Path normalizedPath = new Path(path); - return normalizedPath.toString(); + public static String normalizeFileSystemPath(final String str) { + String path = SLASHES.matcher(str).replaceAll("/"); + if (path.length() > 1 && path.endsWith("/")) { + path = path.substring(0, path.length()-1); + } + return path; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java index 14ccb6112b9a5..ada2815872755 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java @@ -184,6 +184,23 @@ public void testDestination() throws IOException { } + @Test + public void testDestinationOfConsecutiveSlash() throws IOException { + // Check files + assertEquals("1->/tesfile1.txt", + mountTable.getDestinationForPath("//tesfile1.txt///").toString()); + + assertEquals("3->/user/testfile2.txt", + mountTable.getDestinationForPath("/user///testfile2.txt").toString()); + + assertEquals("2->/user/test/testfile3.txt", + mountTable.getDestinationForPath("///user/a/testfile3.txt").toString()); + + assertEquals("3->/user/b/testfile4.txt", + mountTable.getDestinationForPath("/user/b/testfile4.txt//").toString()); + } + + @Test public void testDefaultNameServiceEnable() throws IOException { assertTrue(mountTable.isDefaultNSEnable()); @@ -232,62 +249,120 @@ public void testGetMountPoint() throws IOException { // Check get the mount table entry for a path MountTable mtEntry; mtEntry = mountTable.getMountPoint("/"); - assertTrue(mtEntry.getSourcePath().equals("/")); + assertEquals("/", mtEntry.getSourcePath()); mtEntry = mountTable.getMountPoint("/user"); - assertTrue(mtEntry.getSourcePath().equals("/user")); + assertEquals("/user", mtEntry.getSourcePath()); mtEntry = mountTable.getMountPoint("/user/a"); - assertTrue(mtEntry.getSourcePath().equals("/user/a")); + assertEquals("/user/a", mtEntry.getSourcePath()); mtEntry = mountTable.getMountPoint("/user/a/"); - assertTrue(mtEntry.getSourcePath().equals("/user/a")); + assertEquals("/user/a", mtEntry.getSourcePath()); mtEntry = mountTable.getMountPoint("/user/a/11"); - assertTrue(mtEntry.getSourcePath().equals("/user/a")); + assertEquals("/user/a", mtEntry.getSourcePath()); mtEntry = mountTable.getMountPoint("/user/a1"); - assertTrue(mtEntry.getSourcePath().equals("/user")); + assertEquals("/user", mtEntry.getSourcePath()); + } + + @Test + public void testGetMountPointOfConsecutiveSlashes() throws IOException { + // Check get the mount table entry for a path + MountTable mtEntry; + mtEntry = mountTable.getMountPoint("///"); + assertEquals("/", mtEntry.getSourcePath()); + + mtEntry = mountTable.getMountPoint("///user//"); + assertEquals("/user", mtEntry.getSourcePath()); + + mtEntry = mountTable.getMountPoint("/user///a"); + assertEquals("/user/a", mtEntry.getSourcePath()); + + mtEntry = mountTable.getMountPoint("/user/a////"); + assertEquals("/user/a", mtEntry.getSourcePath()); + + mtEntry = mountTable.getMountPoint("///user/a/11//"); + assertEquals("/user/a", mtEntry.getSourcePath()); + + mtEntry = mountTable.getMountPoint("/user///a1///"); + assertEquals("/user", mtEntry.getSourcePath()); + } + + @Test + public void testTrailingSlashInInputPath() throws IOException { + // Check mount points beneath the path with trailing slash. + getMountPoints(true); } @Test public void testGetMountPoints() throws IOException { + // Check mount points beneath the path without trailing slash. + getMountPoints(false); + } + private void getMountPoints(boolean trailingSlash) throws IOException { // Check getting all mount points (virtual and real) beneath a path List mounts = mountTable.getMountPoints("/"); assertEquals(5, mounts.size()); compareLists(mounts, new String[] {"tmp", "user", "usr", "readonly", "multi"}); - mounts = mountTable.getMountPoints("/user"); + String path = trailingSlash ? "/user/" : "/user"; + mounts = mountTable.getMountPoints(path); assertEquals(2, mounts.size()); compareLists(mounts, new String[] {"a", "b"}); - mounts = mountTable.getMountPoints("/user/a"); + path = trailingSlash ? "/user/a/" : "/user/a"; + mounts = mountTable.getMountPoints(path); assertEquals(1, mounts.size()); compareLists(mounts, new String[] {"demo"}); - mounts = mountTable.getMountPoints("/user/a/demo"); + path = trailingSlash ? "/user/a/demo/" : "/user/a/demo"; + mounts = mountTable.getMountPoints(path); assertEquals(1, mounts.size()); compareLists(mounts, new String[] {"test"}); - mounts = mountTable.getMountPoints("/user/a/demo/test"); + path = trailingSlash ? "/user/a/demo/test/" : "/user/a/demo/test"; + mounts = mountTable.getMountPoints(path); assertEquals(2, mounts.size()); compareLists(mounts, new String[] {"a", "b"}); - mounts = mountTable.getMountPoints("/tmp"); + path = trailingSlash ? "/tmp/" : "/tmp"; + mounts = mountTable.getMountPoints(path); assertEquals(0, mounts.size()); - mounts = mountTable.getMountPoints("/t"); + path = trailingSlash ? "/t/" : "/t"; + mounts = mountTable.getMountPoints(path); assertNull(mounts); - mounts = mountTable.getMountPoints("/unknownpath"); + path = trailingSlash ? "/unknownpath/" : "/unknownpath"; + mounts = mountTable.getMountPoints(path); assertNull(mounts); - mounts = mountTable.getMountPoints("/multi"); + path = trailingSlash ? "/multi/" : "/multi"; + mounts = mountTable.getMountPoints(path); assertEquals(0, mounts.size()); } + @Test + public void testSuccessiveSlashesInInputPath() throws IOException { + // Check getting all mount points (virtual and real) beneath a path + List mounts = mountTable.getMountPoints("///"); + assertEquals(5, mounts.size()); + compareLists(mounts, new String[] {"tmp", "user", "usr", + "readonly", "multi"}); + String path = "///user//"; + mounts = mountTable.getMountPoints(path); + assertEquals(2, mounts.size()); + compareLists(mounts, new String[] {"a", "b"}); + path = "/user///a"; + mounts = mountTable.getMountPoints(path); + assertEquals(1, mounts.size()); + compareLists(mounts, new String[] {"demo"}); + } + private void compareRecords(List list1, String[] list2) { assertEquals(list1.size(), list2.length); for (String item : list2) { @@ -334,6 +409,26 @@ public void testGetMounts() throws IOException { compareRecords(records, new String[] {"/multi"}); } + @Test + public void testGetMountsOfConsecutiveSlashes() throws IOException { + // Check listing the mount table records at or beneath a path + List records = mountTable.getMounts("///"); + assertEquals(10, records.size()); + compareRecords(records, new String[] {"/", "/tmp", "/user", "/usr/bin", + "user/a", "/user/a/demo/a", "/user/a/demo/b", "/user/b/file1.txt", + "readonly", "multi"}); + + records = mountTable.getMounts("/user///"); + assertEquals(5, records.size()); + compareRecords(records, new String[] {"/user", "/user/a/demo/a", + "/user/a/demo/b", "user/a", "/user/b/file1.txt"}); + + records = mountTable.getMounts("///user///a"); + assertEquals(3, records.size()); + compareRecords(records, + new String[] {"/user/a/demo/a", "/user/a/demo/b", "/user/a"}); + } + @Test public void testRemoveSubTree() throws UnsupportedOperationException, IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java index 4f6f702d9a1fa..24dfc3fd31392 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java @@ -442,4 +442,36 @@ public void testPathInException() throws Exception { "Directory/File does not exist /mount/file", () -> routerFs.setOwner(new Path("/mount/file"), "user", "group")); } + + /** + * Regression test for HDFS-14369. + * Verify that getListing works with the path with trailing slash. + */ + @Test + public void testGetListingWithTrailingSlash() throws IOException { + try { + // Add mount table entry + MountTable addEntry = MountTable.newInstance("/testlist", + Collections.singletonMap("ns0", "/testlist")); + assertTrue(addMountTable(addEntry)); + addEntry = MountTable.newInstance("/testlist/tmp0", + Collections.singletonMap("ns0", "/testlist/tmp0")); + assertTrue(addMountTable(addEntry)); + addEntry = MountTable.newInstance("/testlist/tmp1", + Collections.singletonMap("ns1", "/testlist/tmp1")); + assertTrue(addMountTable(addEntry)); + + nnFs0.mkdirs(new Path("/testlist/tmp0")); + nnFs1.mkdirs(new Path("/testlist/tmp1")); + // Fetch listing + DirectoryListing list = routerProtocol.getListing( + "/testlist/", HdfsFileStatus.EMPTY_NAME, false); + HdfsFileStatus[] statuses = list.getPartialListing(); + // should return tmp0 and tmp1 + assertEquals(2, statuses.length); + } finally { + nnFs0.delete(new Path("/testlist/tmp0"), true); + nnFs1.delete(new Path("/testlist/tmp1"), true); + } + } } \ No newline at end of file From de7da9b69ed0f1f3036c31cb2c6072c02d5e76cc Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Tue, 16 Apr 2019 19:45:51 +0530 Subject: [PATCH 0317/1308] HDFS-14422. RBF: Router shouldn't allow READ operations in safe mode. Contributed by Inigo Goiri. --- .../resolver/MountTableResolver.java | 18 +++++++- .../federation/router/RouterRpcServer.java | 15 ++++++- .../federation/router/TestRouterSafemode.java | 44 +++++++++++++++++++ 3 files changed, 74 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index 03b051db34e9d..8baa5e22a1d10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -87,6 +87,8 @@ public class MountTableResolver /** If the tree has been initialized. */ private boolean init = false; + /** If the mount table is manually disabled*/ + private boolean disabled = false; /** Path -> Remote HDFS location. */ private final TreeMap tree = new TreeMap<>(); /** Path -> Remote location. */ @@ -391,7 +393,14 @@ public PathLocation call() throws Exception { }; return this.locationCache.get(path, meh); } catch (ExecutionException e) { - throw new IOException(e); + Throwable cause = e.getCause(); + final IOException ioe; + if (cause instanceof IOException) { + ioe = (IOException) cause; + } else { + ioe = new IOException(cause); + } + throw ioe; } finally { readLock.unlock(); } @@ -504,7 +513,7 @@ public List getMounts(final String path) throws IOException { * @throws StateStoreUnavailableException If it cannot connect to the store. */ private void verifyMountTable() throws StateStoreUnavailableException { - if (!this.init) { + if (!this.init || disabled) { throw new StateStoreUnavailableException("Mount Table not initialized"); } } @@ -654,4 +663,9 @@ public boolean isDefaultNSEnable() { public void setDefaultNSEnable(boolean defaultNSRWEnable) { this.defaultNSEnable = defaultNSRWEnable; } + + @VisibleForTesting + public void setDisabled(boolean disable) { + this.disabled = disable; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index b934355dc9e5b..3a2f910da471c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -114,6 +114,7 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreUnavailableException; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.hdfs.server.federation.router.security.RouterSecurityManager; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; @@ -480,17 +481,26 @@ void checkOperation(OperationCategory op) // Store the category of the operation category for this thread opCategory.set(op); - // We allow unchecked and read operations + // We allow unchecked and read operations to try, fail later if (op == OperationCategory.UNCHECKED || op == OperationCategory.READ) { return; } + checkSafeMode(); + } + /** + * Check if the Router is in safe mode. + * @throws StandbyException If the Router is in safe mode and cannot serve + * client requests. + */ + private void checkSafeMode() throws StandbyException { RouterSafemodeService safemodeService = router.getSafemodeService(); if (safemodeService != null && safemodeService.isInSafeMode()) { // Throw standby exception, router is not available if (rpcMonitor != null) { rpcMonitor.routerFailureSafemode(); } + OperationCategory op = opCategory.get(); throw new StandbyException("Router " + router.getRouterId() + " is in safe mode and cannot handle " + op + " requests"); } @@ -1469,6 +1479,9 @@ protected List getLocationsForPath(String path, if (this.rpcMonitor != null) { this.rpcMonitor.routerFailureStateStore(); } + if (ioe instanceof StateStoreUnavailableException) { + checkSafeMode(); + } throw ioe; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java index 9c1aeb2b3f06d..75104bd1933b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java @@ -34,9 +34,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreUnavailableException; +import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeRequest; import org.apache.hadoop.hdfs.tools.federation.RouterAdmin; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.service.Service.STATE; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; import org.junit.After; @@ -234,4 +238,44 @@ private void verifyRouter(RouterServiceState status) throws IllegalStateException, IOException { assertEquals(status, router.getRouterState()); } + + @Test + public void testRouterNotInitMountTable() throws Exception { + + // Manually disable the mount table to trigger unavailable exceptions + MountTableResolver mountTable = + (MountTableResolver)router.getSubclusterResolver(); + mountTable.setDisabled(true); + + // Wait until it gets out of safe mode + int interval = 2 * (int)conf.getTimeDuration(DFS_ROUTER_SAFEMODE_EXTENSION, + TimeUnit.SECONDS.toMillis(2), TimeUnit.MILLISECONDS); + GenericTestUtils.waitFor( + () -> router.getRouterState() == RouterServiceState.RUNNING, + 100, interval); + + // Getting file info should fail + try { + router.getRpcServer().getFileInfo("/mnt/file.txt"); + fail("We should have thrown StateStoreUnavailableException"); + } catch (StateStoreUnavailableException e) { + assertEquals("Mount Table not initialized", e.getMessage()); + } + + // Enter safe mode + RouterAdminServer admin = router.getAdminServer(); + EnterSafeModeRequest request = EnterSafeModeRequest.newInstance(); + admin.enterSafeMode(request); + verifyRouter(RouterServiceState.SAFEMODE); + + // This time it should report safe mode + try { + router.getRpcServer().getFileInfo("/mnt/file.txt"); + fail("We should have thrown a safe mode exception"); + } catch (StandbyException e) { + String msg = e.getMessage(); + assertTrue("Wrong message: " + msg, + msg.endsWith("is in safe mode and cannot handle READ requests")); + } + } } From 021a43b1a4bbc8a68c31461e206214a5eadc38dd Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Wed, 24 Apr 2019 19:35:03 +0530 Subject: [PATCH 0318/1308] HDFS-13972. RBF: Support for Delegation Token (WebHDFS). Contributed by CR Hota. --- .../hdfs/server/federation/router/Router.java | 11 +- .../federation/router/RouterRpcServer.java | 20 ++- .../router/RouterWebHdfsMethods.java | 159 ++++------------- .../security/RouterSecurityManager.java | 41 +++++ ...TestRouterHDFSContractDelegationToken.java | 6 +- .../TestRouterHttpDelegationToken.java | 163 ++++++++++++++++++ .../security/TestRouterSecurityManager.java | 86 ++++++++- 7 files changed, 346 insertions(+), 140 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterHttpDelegationToken.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 9e18ebfb4d80d..7f9c597c13a81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -37,6 +37,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.common.TokenVerifier; import org.apache.hadoop.hdfs.server.federation.metrics.FederationMetrics; import org.apache.hadoop.hdfs.server.federation.metrics.NamenodeBeanMetrics; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; @@ -76,7 +78,8 @@ */ @InterfaceAudience.Private @InterfaceStability.Evolving -public class Router extends CompositeService { +public class Router extends CompositeService implements + TokenVerifier { private static final Logger LOG = LoggerFactory.getLogger(Router.class); @@ -470,6 +473,12 @@ public InetSocketAddress getHttpServerAddress() { return null; } + @Override + public void verifyToken(DelegationTokenIdentifier tokenId, byte[] password) + throws IOException { + getRpcServer().getRouterSecurityManager().verifyToken(tokenId, password); + } + ///////////////////////////////////////////////////////// // Namenode heartbeat monitors ///////////////////////////////////////////////////////// diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 3a2f910da471c..d35d1f0f61c37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -203,6 +203,9 @@ public class RouterRpcServer extends AbstractService private final RouterClientProtocol clientProto; /** Router security manager to handle token operations. */ private RouterSecurityManager securityManager = null; + /** Super user credentials that a thread may use. */ + private static final ThreadLocal CUR_USER = + new ThreadLocal<>(); /** * Construct a router RPC server. @@ -1514,10 +1517,25 @@ private boolean isPathReadOnly(final String path) { * @throws IOException If we cannot get the user information. */ public static UserGroupInformation getRemoteUser() throws IOException { - UserGroupInformation ugi = Server.getRemoteUser(); + UserGroupInformation ugi = CUR_USER.get(); + ugi = (ugi != null) ? ugi : Server.getRemoteUser(); return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser(); } + /** + * Set super user credentials if needed. + */ + static void setCurrentUser(UserGroupInformation ugi) { + CUR_USER.set(ugi); + } + + /** + * Reset to discard super user credentials. + */ + static void resetCurrentUser() { + CUR_USER.set(null); + } + /** * Merge the outputs from multiple namespaces. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java index a10764a8fe7ae..6bc6bcc17dcbc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.util.StringUtils.getTrimmedStringCollection; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -27,13 +26,10 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.common.JspHelper; -import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; -import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; -import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.federation.router.security.RouterSecurityManager; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; import javax.ws.rs.Path; import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; @@ -42,7 +38,6 @@ import com.sun.jersey.spi.container.ResourceFilters; import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.hdfs.web.ParamFilter; -import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.resources.AccessTimeParam; import org.apache.hadoop.hdfs.web.resources.AclPermissionParam; @@ -91,6 +86,7 @@ import org.apache.hadoop.ipc.ExternalCall; import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.net.Node; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -99,12 +95,8 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.net.HttpURLConnection; import java.net.URI; import java.net.URISyntaxException; -import java.net.URL; -import java.net.URLDecoder; -import java.security.PrivilegedAction; import java.util.Collection; import java.util.HashSet; import java.util.List; @@ -224,7 +216,11 @@ protected Response put( case CREATE: { final Router router = getRouter(); - final URI uri = redirectURI(router, fullpath); + final URI uri = redirectURI(router, ugi, delegation, username, + doAsUser, fullpath, op.getValue(), -1L, + exclDatanodes.getValue(), permission, unmaskedPermission, + overwrite, bufferSize, replication, blockSize, createParent, + createFlagParam); if (!noredirectParam.getValue()) { return Response.temporaryRedirect(uri) .type(MediaType.APPLICATION_OCTET_STREAM).build(); @@ -366,6 +362,7 @@ protected Response get( return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } } + case GETDELEGATIONTOKEN: case GET_BLOCK_LOCATIONS: case GETFILESTATUS: case LISTSTATUS: @@ -389,104 +386,6 @@ protected Response get( } } - /** - * Get the redirect URI from the Namenode responsible for a path. - * @param router Router to check. - * @param path Path to get location for. - * @return URI returned by the Namenode. - * @throws IOException If it cannot get the redirect URI. - */ - private URI redirectURI(final Router router, final String path) - throws IOException { - // Forward the request to the proper Namenode - final HttpURLConnection conn = forwardRequest(router, path); - try { - conn.setInstanceFollowRedirects(false); - conn.setDoOutput(true); - conn.connect(); - - // Read the reply from the Namenode - int responseCode = conn.getResponseCode(); - if (responseCode != HttpServletResponse.SC_TEMPORARY_REDIRECT) { - LOG.info("We expected a redirection from the Namenode, not {}", - responseCode); - return null; - } - - // Extract the redirect location and return it - String redirectLocation = conn.getHeaderField("Location"); - try { - // We modify the namenode location and the path - redirectLocation = redirectLocation - .replaceAll("(?<=[?&;])namenoderpcaddress=.*?(?=[&;])", - "namenoderpcaddress=" + router.getRouterId()) - .replaceAll("(?<=[/])webhdfs/v1/.*?(?=[?])", - "webhdfs/v1" + path); - return new URI(redirectLocation); - } catch (URISyntaxException e) { - LOG.error("Cannot parse redirect location {}", redirectLocation); - } - } finally { - if (conn != null) { - conn.disconnect(); - } - } - return null; - } - - /** - * Forwards a request to a subcluster. - * @param router Router to check. - * @param path Path in HDFS. - * @return Reply from the subcluster. - * @throws IOException - */ - private HttpURLConnection forwardRequest( - final Router router, final String path) throws IOException { - final Configuration conf = - (Configuration)getContext().getAttribute(JspHelper.CURRENT_CONF); - URLConnectionFactory connectionFactory = - URLConnectionFactory.newDefaultURLConnectionFactory(conf); - - // Find the namespace responsible for a path - final RouterRpcServer rpcServer = getRPCServer(router); - RemoteLocation createLoc = rpcServer.getCreateLocation(path); - String nsId = createLoc.getNameserviceId(); - String dest = createLoc.getDest(); - ActiveNamenodeResolver nnResolver = router.getNamenodeResolver(); - List namenodes = - nnResolver.getNamenodesForNameserviceId(nsId); - - // Go over the namenodes responsible for that namespace - for (FederationNamenodeContext namenode : namenodes) { - try { - // Generate the request for the namenode - String nnWebAddress = namenode.getWebAddress(); - String[] nnWebAddressSplit = nnWebAddress.split(":"); - String host = nnWebAddressSplit[0]; - int port = Integer.parseInt(nnWebAddressSplit[1]); - - // Avoid double-encoding here - query = URLDecoder.decode(query, "UTF-8"); - URI uri = new URI(getScheme(), null, host, port, - reqPath + dest, query, null); - URL url = uri.toURL(); - - // Send a request to the proper Namenode - final HttpURLConnection conn = - (HttpURLConnection)connectionFactory.openConnection(url); - conn.setRequestMethod(method); - - connectionFactory.destroy(); - return conn; - } catch (Exception e) { - LOG.error("Cannot redirect request to {}", namenode, e); - } - } - connectionFactory.destroy(); - return null; - } - /** * Get a URI to redirect an operation to. * @param router Router to check. @@ -526,7 +425,7 @@ private URI redirectURI(final Router router, final UserGroupInformation ugi, } else { // generate a token final Token t = generateDelegationToken( - router, ugi, request.getUserPrincipal().getName()); + ugi, ugi.getUserName()); delegationQuery = "&delegation=" + t.encodeToUrlString(); } @@ -552,19 +451,17 @@ private DatanodeInfo chooseDatanode(final Router router, // We need to get the DNs as a privileged user final RouterRpcServer rpcServer = getRPCServer(router); UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); + RouterRpcServer.setCurrentUser(loginUser); - DatanodeInfo[] dns = loginUser.doAs( - new PrivilegedAction() { - @Override - public DatanodeInfo[] run() { - try { - return rpcServer.getDatanodeReport(DatanodeReportType.LIVE); - } catch (IOException e) { - LOG.error("Cannot get the datanodes from the RPC server", e); - return null; - } - } - }); + DatanodeInfo[] dns = null; + try { + dns = rpcServer.getDatanodeReport(DatanodeReportType.LIVE); + } catch (IOException e) { + LOG.error("Cannot get the datanodes from the RPC server", e); + } finally { + // Reset ugi to remote user for remaining operations. + RouterRpcServer.resetCurrentUser(); + } HashSet excludes = new HashSet(); if (excludeDatanodes != null) { @@ -646,17 +543,19 @@ private static DatanodeInfo getRandomDatanode( } /** - * Generate the delegation tokens for this request. - * @param router Router. + * Generate the credentials for this request. * @param ugi User group information. * @param renewer Who is asking for the renewal. - * @return The delegation tokens. - * @throws IOException If it cannot create the tokens. + * @return Credentials holding delegation token. + * @throws IOException If it cannot create the credentials. */ - private Token generateDelegationToken( - final Router router, final UserGroupInformation ugi, + @Override + public Credentials createCredentials( + final UserGroupInformation ugi, final String renewer) throws IOException { - throw new UnsupportedOperationException("TODO Generate token for ugi=" + - ugi + " request=" + request); + final Router router = (Router)getContext().getAttribute("name.node"); + final Credentials c = RouterSecurityManager.createCredentials(router, ugi, + renewer != null? renewer: ugi.getShortUserName()); + return c; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java index dcfaa444d4de4..c367ed8190756 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java @@ -24,8 +24,10 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; +import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; @@ -36,6 +38,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.net.InetSocketAddress; import java.lang.reflect.Constructor; /** @@ -183,6 +186,12 @@ public Token getDelegationToken(Text renewer) return token; } + /** + * @param token token to renew + * @return new expiryTime of the token + * @throws SecretManager.InvalidToken if {@code token} is invalid + * @throws IOException on errors + */ public long renewDelegationToken(Token token) throws SecretManager.InvalidToken, IOException { LOG.debug("Renew delegation token"); @@ -211,6 +220,10 @@ public long renewDelegationToken(Token token) return expiryTime; } + /** + * @param token token to cancel + * @throws IOException on error + */ public void cancelDelegationToken(Token token) throws IOException { LOG.debug("Cancel delegation token"); @@ -233,6 +246,34 @@ public void cancelDelegationToken(Token token) } } + /** + * A utility method for creating credentials. + * Used by web hdfs to return url encoded token. + */ + public static Credentials createCredentials( + final Router router, final UserGroupInformation ugi, + final String renewer) throws IOException { + final Token token = + router.getRpcServer().getDelegationToken(new Text(renewer)); + if (token == null) { + return null; + } + final InetSocketAddress addr = router.getRpcServerAddress(); + SecurityUtil.setTokenService(token, addr); + final Credentials c = new Credentials(); + c.addToken(new Text(ugi.getShortUserName()), token); + return c; + } + + /** + * Delegation token verification. + * Used by web hdfs to verify url encoded token. + */ + public void verifyToken(DelegationTokenIdentifier identifier, + byte[] password) throws SecretManager.InvalidToken { + this.dtSecretManager.verifyToken(identifier, password); + } + /** * Log status of delegation token related operation. * Extend in future to use audit logger instead of local logging. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java index e4c03e462e09b..062079f1760e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java @@ -83,9 +83,11 @@ public void testRouterDelegationToken() throws Exception { assertTrue(identifier.getMaxDate() >= identifier.getIssueDate()); // Renew delegation token - token.renew(initSecurity()); + long expiryTime = token.renew(initSecurity()); assertNotNull(token); - assertTrue(token.decodeIdentifier().getMaxDate() >= existingMaxTime); + assertEquals(existingMaxTime, token.decodeIdentifier().getMaxDate()); + // Expiry time after renewal should never exceed max time of the token. + assertTrue(expiryTime <= existingMaxTime); // Renewal should retain old master key id and sequence number identifier = token.decodeIdentifier(); assertEquals(identifier.getMasterKeyId(), masterKeyId); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterHttpDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterHttpDelegationToken.java new file mode 100644 index 0000000000000..409594e20dc8a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterHttpDelegationToken.java @@ -0,0 +1,163 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hadoop.hdfs.server.federation.security; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.util.Enumeration; +import java.util.Properties; + +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.SWebHdfs; +import org.apache.hadoop.fs.contract.router.SecurityConfUtil; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; +import org.apache.hadoop.hdfs.server.federation.router.Router; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; +import org.apache.hadoop.security.token.SecretManager.InvalidToken; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.test.LambdaTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + + +/** + * Test Delegation Tokens from the Router HTTP interface. + */ +public class TestRouterHttpDelegationToken { + + private Router router; + private WebHdfsFileSystem fs; + + /** + * Custom filter to be able to test auth methods and let the other ones go. + */ + public static final class NoAuthFilter extends AuthenticationFilter { + @Override + protected Properties getConfiguration(String configPrefix, + FilterConfig filterConfig) throws ServletException { + Properties props = new Properties(); + Enumeration names = filterConfig.getInitParameterNames(); + while (names.hasMoreElements()) { + String name = (String) names.nextElement(); + if (name.startsWith(configPrefix)) { + String value = filterConfig.getInitParameter(name); + props.put(name.substring(configPrefix.length()), value); + } + } + props.put(AuthenticationFilter.AUTH_TYPE, "simple"); + props.put(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true"); + return props; + } + } + + @Before + public void setup() throws Exception { + Configuration conf = SecurityConfUtil.initSecurity(); + conf.set(RBFConfigKeys.DFS_ROUTER_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(RBFConfigKeys.DFS_ROUTER_HTTPS_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY, + NoAuthFilter.class.getName()); + + // Start routers with an RPC and HTTP service only + Configuration routerConf = new RouterConfigBuilder() + .rpc() + .http() + .build(); + + conf.addResource(routerConf); + router = new Router(); + router.init(conf); + router.start(); + + InetSocketAddress webAddress = router.getHttpServerAddress(); + URI webURI = new URI(SWebHdfs.SCHEME, null, + webAddress.getHostName(), webAddress.getPort(), null, null, null); + fs = (WebHdfsFileSystem)FileSystem.get(webURI, conf); + } + + @After + public void cleanup() throws Exception { + if (router != null) { + router.stop(); + router.close(); + } + } + + @Test + public void testGetDelegationToken() throws Exception { + final String renewer = "renewer0"; + Token token = fs.getDelegationToken(renewer); + assertNotNull(token); + + DelegationTokenIdentifier tokenId = + getTokenIdentifier(token.getIdentifier()); + assertEquals("router", tokenId.getOwner().toString()); + assertEquals(renewer, tokenId.getRenewer().toString()); + assertEquals("", tokenId.getRealUser().toString()); + assertEquals("SWEBHDFS delegation", token.getKind().toString()); + assertNotNull(token.getPassword()); + + InetSocketAddress webAddress = router.getHttpServerAddress(); + assertEquals(webAddress.getHostName() + ":" + webAddress.getPort(), + token.getService().toString()); + } + + @Test + public void testRenewDelegationToken() throws Exception { + Token token = fs.getDelegationToken("router"); + DelegationTokenIdentifier tokenId = + getTokenIdentifier(token.getIdentifier()); + + long t = fs.renewDelegationToken(token); + assertTrue(t + " should not be larger than " + tokenId.getMaxDate(), + t <= tokenId.getMaxDate()); + } + + @Test + public void testCancelDelegationToken() throws Exception { + Token token = fs.getDelegationToken("router"); + fs.cancelDelegationToken(token); + LambdaTestUtils.intercept(InvalidToken.class, + "Renewal request for unknown token", + () -> fs.renewDelegationToken(token)); + } + + private DelegationTokenIdentifier getTokenIdentifier(byte[] id) + throws IOException { + DelegationTokenIdentifier identifier = new DelegationTokenIdentifier(); + ByteArrayInputStream bais = new ByteArrayInputStream(id); + DataInputStream dais = new DataInputStream(bais); + identifier.readFields(dais); + return identifier; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterSecurityManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterSecurityManager.java index fe6e0eea91c31..cc8cd1bf64817 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterSecurityManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterSecurityManager.java @@ -18,9 +18,15 @@ package org.apache.hadoop.hdfs.server.federation.security; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.router.RouterHDFSContract; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; import org.apache.hadoop.hdfs.server.federation.router.security.RouterSecurityManager; +import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; @@ -31,7 +37,10 @@ import org.junit.Test; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertEquals; +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; +import org.hamcrest.core.StringContains; import java.io.IOException; import org.slf4j.Logger; @@ -64,21 +73,19 @@ public static void createMockSecretManager() throws IOException { @Test public void testDelegationTokens() throws IOException { - String[] groupsForTesting = new String[1]; - groupsForTesting[0] = "router_group"; + UserGroupInformation.reset(); UserGroupInformation.setLoginUser(UserGroupInformation - .createUserForTesting("router", groupsForTesting)); + .createUserForTesting("router", getUserGroupForTesting())); // Get a delegation token Token token = securityManager.getDelegationToken(new Text("some_renewer")); assertNotNull(token); - // Renew the delegation token UserGroupInformation.setLoginUser(UserGroupInformation - .createUserForTesting("some_renewer", groupsForTesting)); + .createUserForTesting("some_renewer", getUserGroupForTesting())); long updatedExpirationTime = securityManager.renewDelegationToken(token); - assertTrue(updatedExpirationTime >= token.decodeIdentifier().getMaxDate()); + assertTrue(updatedExpirationTime <= token.decodeIdentifier().getMaxDate()); // Cancel the delegation token securityManager.cancelDelegationToken(token); @@ -90,4 +97,71 @@ public void testDelegationTokens() throws IOException { // This throws an exception as token has been cancelled. securityManager.renewDelegationToken(token); } + + @Test + public void testVerifyToken() throws IOException { + UserGroupInformation.reset(); + UserGroupInformation.setLoginUser(UserGroupInformation + .createUserForTesting("router", getUserGroupForTesting())); + + // Get a delegation token + Token token = + securityManager.getDelegationToken(new Text("some_renewer")); + assertNotNull(token); + + // Verify the password in delegation token + securityManager.verifyToken(token.decodeIdentifier(), + token.getPassword()); + + // Verify an invalid password + String exceptionCause = "password doesn't match"; + exceptionRule.expect(SecretManager.InvalidToken.class); + exceptionRule.expectMessage( + StringContains.containsString(exceptionCause)); + + securityManager.verifyToken(token.decodeIdentifier(), new byte[10]); + } + + @Test + public void testCreateCredentials() throws Exception { + Configuration conf = initSecurity(); + + // Start routers with only an RPC service + Configuration routerConf = new RouterConfigBuilder() + .metrics() + .rpc() + .build(); + + conf.addResource(routerConf); + Router router = new Router(); + router.init(conf); + router.start(); + + UserGroupInformation ugi = + UserGroupInformation.createUserForTesting( + "router", getUserGroupForTesting()); + Credentials creds = RouterSecurityManager.createCredentials( + router, ugi, "some_renewer"); + for (Token token : creds.getAllTokens()) { + assertNotNull(token); + // Verify properties of the token + assertEquals("HDFS_DELEGATION_TOKEN", token.getKind().toString()); + DelegationTokenIdentifier identifier = (DelegationTokenIdentifier) + token.decodeIdentifier(); + assertNotNull(identifier); + String owner = identifier.getOwner().toString(); + // Windows will not reverse name lookup "127.0.0.1" to "localhost". + String host = Path.WINDOWS ? "127.0.0.1" : "localhost"; + String expectedOwner = "router/"+ host + "@EXAMPLE.COM"; + assertEquals(expectedOwner, owner); + assertEquals("some_renewer", identifier.getRenewer().toString()); + } + RouterHDFSContract.destroyCluster(); + } + + + private static String[] getUserGroupForTesting() { + String[] groupsForTesting = {"router_group"}; + return groupsForTesting; + } } From b522b52bb1da1692d9851b0aa8f482e3c2777864 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Thu, 25 Apr 2019 20:35:41 +0530 Subject: [PATCH 0319/1308] HDFS-14457. RBF: Add order text SPACE in CLI command 'hdfs dfsrouteradmin'. Contributed by luhuachao. --- .../hadoop/hdfs/tools/federation/RouterAdmin.java | 7 ++++--- .../server/federation/router/TestRouterAdminCLI.java | 12 ++++++++---- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 8f6d917d3a6ef..4b5053447514f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -140,13 +140,14 @@ private String getUsage(String cmd) { } if (cmd.equals("-add")) { return "\t[-add " - + "[-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + + "[-readonly] [-faulttolerant] " + + "[-order HASH|LOCAL|RANDOM|HASH_ALL|SPACE] " + "-owner -group -mode ]"; } else if (cmd.equals("-update")) { return "\t[-update " + " [ ] " - + "[-readonly true|false] [-faulttolerant true|false]" - + " [-order HASH|LOCAL|RANDOM|HASH_ALL] " + + "[-readonly true|false] [-faulttolerant true|false] " + + "[-order HASH|LOCAL|RANDOM|HASH_ALL|SPACE] " + "-owner -group -mode ]"; } else if (cmd.equals("-rm")) { return "\t[-rm ]"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index ce260ec0976fa..5f94574178eda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -243,6 +243,7 @@ public void testAddOrderMountTable() throws Exception { testAddOrderMountTable(DestinationOrder.LOCAL); testAddOrderMountTable(DestinationOrder.RANDOM); testAddOrderMountTable(DestinationOrder.HASH_ALL); + testAddOrderMountTable(DestinationOrder.SPACE); } @Test @@ -529,7 +530,7 @@ public void testInvalidArgumentMessage() throws Exception { assertTrue("Wrong message: " + out, out.toString().contains( "\t[-add " + "[-readonly] [-faulttolerant] " - + "[-order HASH|LOCAL|RANDOM|HASH_ALL] " + + "[-order HASH|LOCAL|RANDOM|HASH_ALL|SPACE] " + "-owner -group -mode ]")); out.reset(); @@ -538,7 +539,7 @@ public void testInvalidArgumentMessage() throws Exception { assertTrue("Wrong message: " + out, out.toString().contains( "\t[-update [ ] " + "[-readonly true|false] [-faulttolerant true|false] " - + "[-order HASH|LOCAL|RANDOM|HASH_ALL] " + + "[-order HASH|LOCAL|RANDOM|HASH_ALL|SPACE] " + "-owner -group -mode ]")); out.reset(); @@ -579,11 +580,13 @@ public void testInvalidArgumentMessage() throws Exception { assertEquals(-1, ToolRunner.run(admin, argv)); String expected = "Usage: hdfs dfsrouteradmin :\n" + "\t[-add " - + "[-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + + "[-readonly] [-faulttolerant] " + + "[-order HASH|LOCAL|RANDOM|HASH_ALL|SPACE] " + "-owner -group -mode ]\n" + "\t[-update [ " + "] [-readonly true|false]" - + " [-faulttolerant true|false] [-order HASH|LOCAL|RANDOM|HASH_ALL] " + + " [-faulttolerant true|false] " + + "[-order HASH|LOCAL|RANDOM|HASH_ALL|SPACE] " + "-owner -group -mode ]\n" + "\t[-rm ]\n" + "\t[-ls ]\n" + "\t[-getDestination ]\n" @@ -1139,6 +1142,7 @@ public void testUpdateOrderMountTable() throws Exception { testUpdateOrderMountTable(DestinationOrder.LOCAL); testUpdateOrderMountTable(DestinationOrder.RANDOM); testUpdateOrderMountTable(DestinationOrder.HASH_ALL); + testUpdateOrderMountTable(DestinationOrder.SPACE); } @Test From 203664e6b258b642239651fa6a17fd2561b903d2 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Fri, 3 May 2019 04:54:09 +0530 Subject: [PATCH 0320/1308] HDFS-14454. RBF: getContentSummary() should allow non-existing folders. Contributed by Inigo Goiri. --- .../federation/router/RemoteResult.java | 84 ++++++++ .../router/RouterClientProtocol.java | 65 +++--- .../federation/router/RouterRpcClient.java | 79 +++++--- .../federation/FederationTestUtils.java | 128 ++++++++++++ .../hdfs/server/federation/MockNamenode.java | 109 ++++++++++ .../router/TestRouterFaultTolerant.java | 186 +++++++----------- .../router/TestRouterMissingFolderMulti.java | 182 +++++++++++++++++ 7 files changed, 670 insertions(+), 163 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteResult.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMissingFolderMulti.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteResult.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteResult.java new file mode 100644 index 0000000000000..2fbcf42612d53 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteResult.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import java.io.IOException; + +/** + * Result from a remote location. + * It includes the exception if there was any error. + * @param Type of the remote location. + * @param Type of the result. + */ +public class RemoteResult { + /** The remote location. */ + private final T loc; + /** The result from the remote location. */ + private final R result; + /** If the result is set; used for void types. */ + private final boolean resultSet; + /** The exception if we couldn't get the result. */ + private final IOException ioe; + + public RemoteResult(T location, R r) { + this.loc = location; + this.result = r; + this.resultSet = true; + this.ioe = null; + } + + public RemoteResult(T location, IOException e) { + this.loc = location; + this.result = null; + this.resultSet = false; + this.ioe = e; + } + + public T getLocation() { + return loc; + } + + public boolean hasResult() { + return resultSet; + } + + public R getResult() { + return result; + } + + public boolean hasException() { + return getException() != null; + } + + public IOException getException() { + return ioe; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder() + .append("loc=").append(getLocation()); + if (hasResult()) { + sb.append(" result=").append(getResult()); + } + if (hasException()) { + sb.append(" exception=").append(getException()); + } + return sb.toString(); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 6039083a73535..f1f1c420b403a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -728,9 +728,9 @@ public DirectoryListing getListing(String src, byte[] startAfter, RemoteMethod method = new RemoteMethod("getListing", new Class[] {String.class, startAfter.getClass(), boolean.class}, new RemoteParam(), startAfter, needLocation); - Map listings = - rpcClient.invokeConcurrent(locations, method, - !this.allowPartialList, false, DirectoryListing.class); + final List> listings = + rpcClient.invokeConcurrent( + locations, method, false, -1, DirectoryListing.class); Map nnListing = new TreeMap<>(); int totalRemainingEntries = 0; @@ -739,13 +739,17 @@ public DirectoryListing getListing(String src, byte[] startAfter, if (listings != null) { // Check the subcluster listing with the smallest name String lastName = null; - for (Map.Entry entry : - listings.entrySet()) { - RemoteLocation location = entry.getKey(); - DirectoryListing listing = entry.getValue(); - if (listing == null) { - LOG.debug("Cannot get listing from {}", location); - } else { + for (RemoteResult result : listings) { + if (result.hasException()) { + IOException ioe = result.getException(); + if (ioe instanceof FileNotFoundException) { + RemoteLocation location = result.getLocation(); + LOG.debug("Cannot get listing from {}", location); + } else if (!allowPartialList) { + throw ioe; + } + } else if (result.getResult() != null) { + DirectoryListing listing = result.getResult(); totalRemainingEntries += listing.getRemainingEntries(); HdfsFileStatus[] partialListing = listing.getPartialListing(); int length = partialListing.length; @@ -760,13 +764,14 @@ public DirectoryListing getListing(String src, byte[] startAfter, } // Add existing entries - for (Object value : listings.values()) { - DirectoryListing listing = (DirectoryListing) value; + for (RemoteResult result : listings) { + DirectoryListing listing = result.getResult(); if (listing != null) { namenodeListingExists = true; for (HdfsFileStatus file : listing.getPartialListing()) { String filename = file.getLocalName(); - if (totalRemainingEntries > 0 && filename.compareTo(lastName) > 0) { + if (totalRemainingEntries > 0 && + filename.compareTo(lastName) > 0) { // Discarding entries further than the lastName remainingEntries++; } else { @@ -1110,19 +1115,26 @@ public ContentSummary getContentSummary(String path) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ); // Get the summaries from regular files - Collection summaries = new LinkedList<>(); + final Collection summaries = new ArrayList<>(); + final List locations = + rpcServer.getLocationsForPath(path, false); + final RemoteMethod method = new RemoteMethod("getContentSummary", + new Class[] {String.class}, new RemoteParam()); + final List> results = + rpcClient.invokeConcurrent(locations, method, + false, -1, ContentSummary.class); FileNotFoundException notFoundException = null; - try { - final List locations = - rpcServer.getLocationsForPath(path, false); - RemoteMethod method = new RemoteMethod("getContentSummary", - new Class[] {String.class}, new RemoteParam()); - Map results = - rpcClient.invokeConcurrent(locations, method, - !this.allowPartialList, false, ContentSummary.class); - summaries.addAll(results.values()); - } catch (FileNotFoundException e) { - notFoundException = e; + for (RemoteResult result : results) { + if (result.hasException()) { + IOException ioe = result.getException(); + if (ioe instanceof FileNotFoundException) { + notFoundException = (FileNotFoundException)ioe; + } else if (!allowPartialList) { + throw ioe; + } + } else if (result.getResult() != null) { + summaries.add(result.getResult()); + } } // Add mount points at this level in the tree @@ -1131,7 +1143,8 @@ public ContentSummary getContentSummary(String path) throws IOException { for (String child : children) { Path childPath = new Path(path, child); try { - ContentSummary mountSummary = getContentSummary(childPath.toString()); + ContentSummary mountSummary = getContentSummary( + childPath.toString()); if (mountSummary != null) { summaries.add(mountSummary); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index 730952b9db691..19aa13ac7e4b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -1083,11 +1083,58 @@ public Map invokeConcurrent( * @throws IOException If requiredResponse=true and any of the calls throw an * exception. */ - @SuppressWarnings("unchecked") public Map invokeConcurrent( final Collection locations, final RemoteMethod method, boolean requireResponse, boolean standby, long timeOutMs, Class clazz) throws IOException { + final List> results = invokeConcurrent( + locations, method, standby, timeOutMs, clazz); + + final Map ret = new TreeMap<>(); + for (final RemoteResult result : results) { + // Response from all servers required, use this error. + if (requireResponse && result.hasException()) { + throw result.getException(); + } + if (result.hasResult()) { + ret.put(result.getLocation(), result.getResult()); + } + } + + // Throw the exception for the first location if there are no results + if (ret.isEmpty()) { + final RemoteResult result = results.get(0); + if (result.hasException()) { + throw result.getException(); + } + } + + return ret; + } + + /** + * Invokes multiple concurrent proxy calls to different clients. Returns an + * array of results. + * + * Re-throws exceptions generated by the remote RPC call as either + * RemoteException or IOException. + * + * @param The type of the remote location. + * @param The type of the remote method return + * @param locations List of remote locations to call concurrently. + * @param method The remote method and parameters to invoke. + * @param standby If the requests should go to the standby namenodes too. + * @param timeOutMs Timeout for each individual call. + * @param clazz Type of the remote return type. + * @return Result of invoking the method per subcluster (list of results). + * This includes the exception for each remote location. + * @throws IOException If there are errors invoking the method. + */ + @SuppressWarnings("unchecked") + public List> + invokeConcurrent(final Collection locations, + final RemoteMethod method, boolean standby, long timeOutMs, + Class clazz) throws IOException { final UserGroupInformation ugi = RouterRpcServer.getRemoteUser(); final Method m = method.getMethod(); @@ -1103,8 +1150,9 @@ public Map invokeConcurrent( try { Class proto = method.getProtocol(); Object[] paramList = method.getParams(location); - Object result = invokeMethod(ugi, namenodes, proto, m, paramList); - return Collections.singletonMap(location, (R) result); + R result = (R) invokeMethod(ugi, namenodes, proto, m, paramList); + RemoteResult remoteResult = new RemoteResult<>(location, result); + return Collections.singletonList(remoteResult); } catch (IOException ioe) { // Localize the exception throw processException(ioe, location); @@ -1151,21 +1199,20 @@ public Map invokeConcurrent( } else { futures = executorService.invokeAll(callables); } - Map results = new TreeMap<>(); - Map exceptions = new TreeMap<>(); + List> results = new ArrayList<>(); for (int i=0; i future = futures.get(i); - Object result = future.get(); - results.put(location, (R) result); + R result = (R) future.get(); + results.add(new RemoteResult<>(location, result)); } catch (CancellationException ce) { T loc = orderedLocations.get(i); String msg = "Invocation to \"" + loc + "\" for \"" + method.getMethodName() + "\" timed out"; LOG.error(msg); IOException ioe = new SubClusterTimeoutException(msg); - exceptions.put(location, ioe); + results.add(new RemoteResult<>(location, ioe)); } catch (ExecutionException ex) { Throwable cause = ex.getCause(); LOG.debug("Canot execute {} in {}: {}", @@ -1180,22 +1227,8 @@ public Map invokeConcurrent( m.getName() + ": " + cause.getMessage(), cause); } - // Response from all servers required, use this error. - if (requireResponse) { - throw ioe; - } - // Store the exceptions - exceptions.put(location, ioe); - } - } - - // Throw the exception for the first location if there are no results - if (results.isEmpty()) { - T location = orderedLocations.get(0); - IOException ioe = exceptions.get(location); - if (ioe != null) { - throw ioe; + results.add(new RemoteResult<>(location, ioe)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java index 54342240f1532..fd5b23ba85de7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.spy; @@ -31,8 +32,14 @@ import java.lang.management.ManagementFactory; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; +import java.net.InetSocketAddress; +import java.net.URI; +import java.util.Collection; +import java.util.Collections; import java.util.Date; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Random; import javax.management.JMX; @@ -40,6 +47,7 @@ import javax.management.MalformedObjectNameException; import javax.management.ObjectName; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -48,12 +56,18 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; +import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; import org.apache.hadoop.hdfs.server.federation.router.ConnectionManager; +import org.apache.hadoop.hdfs.server.federation.router.Router; +import org.apache.hadoop.hdfs.server.federation.router.RouterClient; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcClient; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -61,6 +75,12 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.server.federation.store.RouterStore; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; +import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.hdfs.server.federation.store.records.RouterState; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.security.AccessControlException; @@ -412,4 +432,112 @@ public static void transitionClusterNSToActive( listNamenodeContext.get(index).getNamenodeId()); } } + + /** + * Get the file system for HDFS in an RPC port. + * @param rpcPort RPC port. + * @return HDFS file system. + * @throws IOException If it cannot create the file system. + */ + public static FileSystem getFileSystem(int rpcPort) throws IOException { + Configuration conf = new HdfsConfiguration(); + URI uri = URI.create("hdfs://localhost:" + rpcPort); + return DistributedFileSystem.get(uri, conf); + } + + /** + * Get the file system for HDFS for a Router. + * @param router Router. + * @return HDFS file system. + * @throws IOException If it cannot create the file system. + */ + public static FileSystem getFileSystem(final Router router) + throws IOException { + InetSocketAddress rpcAddress = router.getRpcServerAddress(); + int rpcPort = rpcAddress.getPort(); + return getFileSystem(rpcPort); + } + + /** + * Get the admin interface for a Router. + * @param router Router. + * @return Admin interface. + * @throws IOException If it cannot create the admin interface. + */ + public static RouterClient getAdminClient( + final Router router) throws IOException { + Configuration conf = new HdfsConfiguration(); + InetSocketAddress routerSocket = router.getAdminServerAddress(); + return new RouterClient(routerSocket, conf); + } + + /** + * Add a mount table entry in some name services and wait until it is + * available. + * @param router Router to change. + * @param mountPoint Name of the mount point. + * @param order Order of the mount table entry. + * @param nsIds Name service identifiers. + * @throws Exception If the entry could not be created. + */ + public static void createMountTableEntry( + final Router router, + final String mountPoint, final DestinationOrder order, + Collection nsIds) throws Exception { + createMountTableEntry( + Collections.singletonList(router), mountPoint, order, nsIds); + } + + /** + * Add a mount table entry in some name services and wait until it is + * available. + * @param routers List of routers. + * @param mountPoint Name of the mount point. + * @param order Order of the mount table entry. + * @param nsIds Name service identifiers. + * @throws Exception If the entry could not be created. + */ + public static void createMountTableEntry( + final List routers, + final String mountPoint, + final DestinationOrder order, + final Collection nsIds) throws Exception { + Router router = routers.get(0); + RouterClient admin = getAdminClient(router); + MountTableManager mountTable = admin.getMountTableManager(); + Map destMap = new HashMap<>(); + for (String nsId : nsIds) { + destMap.put(nsId, mountPoint); + } + MountTable newEntry = MountTable.newInstance(mountPoint, destMap); + newEntry.setDestOrder(order); + AddMountTableEntryRequest addRequest = + AddMountTableEntryRequest.newInstance(newEntry); + AddMountTableEntryResponse addResponse = + mountTable.addMountTableEntry(addRequest); + boolean created = addResponse.getStatus(); + assertTrue(created); + + refreshRoutersCaches(routers); + + // Check for the path + GetMountTableEntriesRequest getRequest = + GetMountTableEntriesRequest.newInstance(mountPoint); + GetMountTableEntriesResponse getResponse = + mountTable.getMountTableEntries(getRequest); + List entries = getResponse.getEntries(); + assertEquals("Too many entries: " + entries, 1, entries.size()); + assertEquals(mountPoint, entries.get(0).getSourcePath()); + } + + /** + * Refresh the caches of a set of Routers. + * @param routers List of Routers. + */ + public static void refreshRoutersCaches(final List routers) { + for (final Router router : routers) { + StateStoreService stateStore = router.getStateStore(); + stateStore.refreshCaches(true); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java index d8dffeedd18e2..bfa56a2b5d2bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.federation; +import static java.util.Collections.emptySet; +import static java.util.Collections.singletonList; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; @@ -31,14 +33,19 @@ import java.net.ConnectException; import java.net.URI; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.Map.Entry; import java.util.SortedMap; import java.util.concurrent.ConcurrentSkipListMap; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceStatus; @@ -67,6 +74,9 @@ import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; +import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; @@ -311,6 +321,9 @@ public void addFileSystemMock() throws IOException { when(l).thenAnswer(invocation -> { String src = getSrc(invocation); LOG.info("{} getListing({})", nsId, src); + if (fs.get(src) == null) { + throw new FileNotFoundException("File does not exist " + src); + } if (!src.endsWith("/")) { src += "/"; } @@ -338,6 +351,15 @@ public void addFileSystemMock() throws IOException { when(c).thenAnswer(invocation -> { String src = getSrc(invocation); LOG.info("{} create({})", nsId, src); + boolean createParent = (boolean)invocation.getArgument(4); + if (createParent) { + Path path = new Path(src).getParent(); + while (!path.isRoot()) { + LOG.info("{} create parent {}", nsId, path); + fs.put(path.toString(), "DIRECTORY"); + path = path.getParent(); + } + } fs.put(src, "FILE"); return getMockHdfsFileStatus(src, "FILE"); }); @@ -375,6 +397,15 @@ public void addFileSystemMock() throws IOException { when(m).thenAnswer(invocation -> { String src = getSrc(invocation); LOG.info("{} mkdirs({})", nsId, src); + boolean createParent = (boolean)invocation.getArgument(2); + if (createParent) { + Path path = new Path(src).getParent(); + while (!path.isRoot()) { + LOG.info("{} mkdir parent {}", nsId, path); + fs.put(path.toString(), "DIRECTORY"); + path = path.getParent(); + } + } fs.put(src, "DIRECTORY"); return true; }); @@ -386,6 +417,39 @@ public void addFileSystemMock() throws IOException { when(defaults.getKeyProviderUri()).thenReturn(nsId); return defaults; }); + when(mockNn.getContentSummary(anyString())).thenAnswer(invocation -> { + String src = getSrc(invocation); + LOG.info("{} getContentSummary({})", nsId, src); + if (fs.get(src) == null) { + throw new FileNotFoundException("File does not exist " + src); + } + if (!src.endsWith("/")) { + src += "/"; + } + Map files = + fs.subMap(src, src + Character.MAX_VALUE); + int numFiles = 0; + int numDirs = 0; + int length = 0; + for (Entry entry : files.entrySet()) { + String file = entry.getKey(); + if (file.substring(src.length()).indexOf('/') < 0) { + String type = entry.getValue(); + if ("DIRECTORY".equals(type)) { + numDirs++; + } else if ("FILE".equals(type)) { + numFiles++; + length += 100; + } + } + } + return new ContentSummary.Builder() + .fileCount(numFiles) + .directoryCount(numDirs) + .length(length) + .erasureCodingPolicy("") + .build(); + }); } private static String getSrc(InvocationOnMock invocation) { @@ -445,4 +509,49 @@ private static LocatedBlock getMockLocatedBlock(final String nsId) { when(lb.getBlockToken()).thenReturn(tok); return lb; } + + /** + * Register a set of NameNodes in a Router. + * @param router Router to register to. + * @param namenodes Set of NameNodes. + * @throws IOException If it cannot register them. + */ + public static void registerSubclusters(Router router, + Collection namenodes) throws IOException { + registerSubclusters(singletonList(router), namenodes, emptySet()); + } + + /** + * Register a set of NameNodes in a set of Routers. + * @param routers Set of Routers. + * @param namenodes Set of NameNodes. + * @param unavailableSubclusters Set of unavailable subclusters. + * @throws IOException If it cannot register them. + */ + public static void registerSubclusters(List routers, + Collection namenodes, + Set unavailableSubclusters) throws IOException { + + for (final Router router : routers) { + MembershipNamenodeResolver resolver = + (MembershipNamenodeResolver) router.getNamenodeResolver(); + for (final MockNamenode nn : namenodes) { + String nsId = nn.getNameserviceId(); + String rpcAddress = "localhost:" + nn.getRPCPort(); + String httpAddress = "localhost:" + nn.getHTTPPort(); + NamenodeStatusReport report = new NamenodeStatusReport( + nsId, null, rpcAddress, rpcAddress, rpcAddress, httpAddress); + if (unavailableSubclusters.contains(nsId)) { + LOG.info("Register {} as UNAVAILABLE", nsId); + report.setRegistrationValid(false); + } else { + LOG.info("Register {} as ACTIVE", nsId); + report.setRegistrationValid(true); + } + report.setNamespaceInfo(new NamespaceInfo(0, nsId, nsId, 0)); + resolver.registerNamenode(report); + } + resolver.loadCache(true); + } + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java index c8f96c659cd9c..39d9561395f82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java @@ -18,6 +18,11 @@ package org.apache.hadoop.hdfs.server.federation.router; import static java.util.Arrays.asList; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createMountTableEntry; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.getAdminClient; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.getFileSystem; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.refreshRoutersCaches; +import static org.apache.hadoop.hdfs.server.federation.MockNamenode.registerSubclusters; import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; @@ -26,8 +31,8 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.URI; +import java.io.PrintWriter; +import java.io.StringWriter; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collection; @@ -36,21 +41,21 @@ import java.util.List; import java.util.Map; import java.util.Random; -import java.util.Set; import java.util.UUID; import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.federation.MockNamenode; import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; @@ -59,17 +64,12 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver; -import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; -import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; -import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; -import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; -import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; import org.junit.After; @@ -150,7 +150,8 @@ public void setup() throws Exception { } LOG.info("Registering the subclusters in the Routers"); - registerSubclusters(Collections.singleton("ns1")); + registerSubclusters( + routers, namenodes.values(), Collections.singleton("ns1")); LOG.info("Stop ns1 to simulate an unavailable subcluster"); namenodes.get("ns1").stop(); @@ -158,36 +159,6 @@ public void setup() throws Exception { service = Executors.newFixedThreadPool(10); } - /** - * Register the subclusters in all Routers. - * @param unavailableSubclusters Set of unavailable subclusters. - * @throws IOException If it cannot register a subcluster. - */ - private void registerSubclusters(Set unavailableSubclusters) - throws IOException { - for (final Router router : routers) { - MembershipNamenodeResolver resolver = - (MembershipNamenodeResolver) router.getNamenodeResolver(); - for (final MockNamenode nn : namenodes.values()) { - String nsId = nn.getNameserviceId(); - String rpcAddress = "localhost:" + nn.getRPCPort(); - String httpAddress = "localhost:" + nn.getHTTPPort(); - NamenodeStatusReport report = new NamenodeStatusReport( - nsId, null, rpcAddress, rpcAddress, rpcAddress, httpAddress); - if (unavailableSubclusters.contains(nsId)) { - LOG.info("Register {} as UNAVAILABLE", nsId); - report.setRegistrationValid(false); - } else { - LOG.info("Register {} as ACTIVE", nsId); - report.setRegistrationValid(true); - } - report.setNamespaceInfo(new NamespaceInfo(0, nsId, nsId, 0)); - resolver.registerNamenode(report); - } - resolver.loadCache(true); - } - } - @After public void cleanup() throws Exception { LOG.info("Stopping the cluster"); @@ -205,45 +176,6 @@ public void cleanup() throws Exception { } } - /** - * Add a mount table entry in some name services and wait until it is - * available. - * @param mountPoint Name of the mount point. - * @param order Order of the mount table entry. - * @param nsIds Name service identifiers. - * @throws Exception If the entry could not be created. - */ - private void createMountTableEntry( - final String mountPoint, final DestinationOrder order, - Collection nsIds) throws Exception { - Router router = getRandomRouter(); - RouterClient admin = getAdminClient(router); - MountTableManager mountTable = admin.getMountTableManager(); - Map destMap = new HashMap<>(); - for (String nsId : nsIds) { - destMap.put(nsId, mountPoint); - } - MountTable newEntry = MountTable.newInstance(mountPoint, destMap); - newEntry.setDestOrder(order); - AddMountTableEntryRequest addRequest = - AddMountTableEntryRequest.newInstance(newEntry); - AddMountTableEntryResponse addResponse = - mountTable.addMountTableEntry(addRequest); - boolean created = addResponse.getStatus(); - assertTrue(created); - - refreshRoutersCaches(); - - // Check for the path - GetMountTableEntriesRequest getRequest = - GetMountTableEntriesRequest.newInstance(mountPoint); - GetMountTableEntriesResponse getResponse = - mountTable.getMountTableEntries(getRequest); - List entries = getResponse.getEntries(); - assertEquals("Too many entries: " + entries, 1, entries.size()); - assertEquals(mountPoint, entries.get(0).getSourcePath()); - } - /** * Update a mount table entry to be fault tolerant. * @param mountPoint Mount point to update. @@ -266,17 +198,7 @@ private void updateMountPointFaultTolerant(final String mountPoint) mountTable.updateMountTableEntry(updateRequest); assertTrue(updateResponse.getStatus()); - refreshRoutersCaches(); - } - - /** - * Refresh the caches of all Routers (to get the mount table). - */ - private void refreshRoutersCaches() { - for (final Router router : routers) { - StateStoreService stateStore = router.getStateStore(); - stateStore.refreshCaches(true); - } + refreshRoutersCaches(routers); } /** @@ -320,8 +242,8 @@ private void testWriteWithFailedSubcluster(final DestinationOrder order) final String mountPoint = "/" + order + "-failsubcluster"; final Path mountPath = new Path(mountPoint); LOG.info("Setup {} with order {}", mountPoint, order); - createMountTableEntry(mountPoint, order, namenodes.keySet()); - + createMountTableEntry( + getRandomRouter(), mountPoint, order, namenodes.keySet()); LOG.info("Write in {} should succeed writing in ns0 and fail for ns1", mountPath); @@ -383,7 +305,14 @@ private void checkDirectoriesFaultTolerant( tasks.add(getListFailTask(router0Fs, mountPoint)); int filesExpected = dirs0.length + results.getSuccess(); tasks.add(getListSuccessTask(router1Fs, mountPoint, filesExpected)); - assertEquals(2, collectResults("List " + mountPoint, tasks).getSuccess()); + results = collectResults("List " + mountPoint, tasks); + assertEquals("Failed listing", 2, results.getSuccess()); + + tasks.add(getContentSummaryFailTask(router0Fs, mountPoint)); + tasks.add(getContentSummarySuccessTask( + router1Fs, mountPoint, filesExpected)); + results = collectResults("Content summary " + mountPoint, tasks); + assertEquals("Failed content summary", 2, results.getSuccess()); } /** @@ -422,6 +351,12 @@ private void checkFilesFaultTolerant( tasks.add(getListFailTask(router0Fs, dir0)); tasks.add(getListSuccessTask(router1Fs, dir0, results.getSuccess())); assertEquals(2, collectResults("List " + dir0, tasks).getSuccess()); + + tasks.add(getContentSummaryFailTask(router0Fs, dir0)); + tasks.add(getContentSummarySuccessTask( + router1Fs, dir0, results.getSuccess())); + results = collectResults("Content summary " + dir0, tasks); + assertEquals(2, results.getSuccess()); } /** @@ -534,6 +469,42 @@ private static Callable getListSuccessTask( }; } + + /** + * Task that lists a directory and expects to fail. + * @param fs File system to check. + * @param path Path to try to list. + * @return If the listing failed as expected. + */ + private static Callable getContentSummaryFailTask( + FileSystem fs, Path path) { + return () -> { + try { + fs.getContentSummary(path); + return false; + } catch (RemoteException re) { + return true; + } + }; + } + + /** + * Task that lists a directory and succeeds. + * @param fs File system to check. + * @param path Path to list. + * @param expected Number of files to expect to find. + * @return If the listing succeeds. + */ + private static Callable getContentSummarySuccessTask( + FileSystem fs, Path path, int expected) { + return () -> { + ContentSummary summary = fs.getContentSummary(path); + assertEquals("Wrong summary for " + path, + expected, summary.getFileAndDirectoryCount()); + return true; + }; + } + /** * Invoke a set of tasks and collect their outputs. * The tasks should do assertions. @@ -556,7 +527,14 @@ private TaskResults collectResults(final String tag, results.incrFailure(); } } catch (Exception e) { - fail(e.getMessage()); + StringWriter stackTrace = new StringWriter(); + PrintWriter writer = new PrintWriter(stackTrace); + if (e instanceof ExecutionException) { + e.getCause().printStackTrace(writer); + } else { + e.printStackTrace(writer); + } + fail("Failed to run \"" + tag + "\": " + stackTrace); } }); tasks.clear(); @@ -631,24 +609,4 @@ private FileSystem getRandomRouterFileSystem() throws Exception { return userUgi.doAs( (PrivilegedExceptionAction) () -> getFileSystem(router)); } - - private static FileSystem getFileSystem(int rpcPort) throws IOException { - Configuration conf = new HdfsConfiguration(); - URI uri = URI.create("hdfs://localhost:" + rpcPort); - return DistributedFileSystem.get(uri, conf); - } - - private static FileSystem getFileSystem(final Router router) - throws IOException { - InetSocketAddress rpcAddress = router.getRpcServerAddress(); - int rpcPort = rpcAddress.getPort(); - return getFileSystem(rpcPort); - } - - private static RouterClient getAdminClient( - final Router router) throws IOException { - Configuration conf = new HdfsConfiguration(); - InetSocketAddress routerSocket = router.getAdminServerAddress(); - return new RouterClient(routerSocket, conf); - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMissingFolderMulti.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMissingFolderMulti.java new file mode 100644 index 0000000000000..8ce4eb6423190 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMissingFolderMulti.java @@ -0,0 +1,182 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static java.util.Arrays.asList; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createMountTableEntry; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.getFileSystem; +import static org.apache.hadoop.hdfs.server.federation.MockNamenode.registerSubclusters; +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration; +import static org.junit.Assert.assertEquals; + +import java.io.FileNotFoundException; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.server.federation.MockNamenode; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; +import org.apache.hadoop.test.LambdaTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Test the behavior when listing a mount point mapped to multiple subclusters + * and one of the subclusters is missing it. + */ +public class TestRouterMissingFolderMulti { + + private static final Logger LOG = + LoggerFactory.getLogger(TestRouterMissingFolderMulti.class); + + /** Number of files to create for testing. */ + private static final int NUM_FILES = 10; + + /** Namenodes for the test per name service id (subcluster). */ + private Map namenodes = new HashMap<>(); + /** Routers for the test. */ + private Router router; + + + @Before + public void setup() throws Exception { + LOG.info("Start the Namenodes"); + Configuration nnConf = new HdfsConfiguration(); + nnConf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 10); + for (final String nsId : asList("ns0", "ns1")) { + MockNamenode nn = new MockNamenode(nsId, nnConf); + nn.transitionToActive(); + nn.addFileSystemMock(); + namenodes.put(nsId, nn); + } + + LOG.info("Start the Routers"); + Configuration routerConf = new RouterConfigBuilder() + .stateStore() + .admin() + .rpc() + .build(); + routerConf.set(RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "0.0.0.0:0"); + routerConf.set(RBFConfigKeys.DFS_ROUTER_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + routerConf.set(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, "0.0.0.0:0"); + + Configuration stateStoreConf = getStateStoreConfiguration(); + stateStoreConf.setClass( + RBFConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS, + MembershipNamenodeResolver.class, ActiveNamenodeResolver.class); + stateStoreConf.setClass( + RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS, + MultipleDestinationMountTableResolver.class, + FileSubclusterResolver.class); + routerConf.addResource(stateStoreConf); + + routerConf.setBoolean(RBFConfigKeys.DFS_ROUTER_ALLOW_PARTIAL_LIST, false); + + router = new Router(); + router.init(routerConf); + router.start(); + + LOG.info("Registering the subclusters in the Routers"); + registerSubclusters(router, namenodes.values()); + } + + @After + public void cleanup() throws Exception { + LOG.info("Stopping the cluster"); + for (final MockNamenode nn : namenodes.values()) { + nn.stop(); + } + namenodes.clear(); + + if (router != null) { + router.stop(); + router = null; + } + } + + @Test + public void testSuccess() throws Exception { + FileSystem fs = getFileSystem(router); + String mountPoint = "/test-success"; + createMountTableEntry(router, mountPoint, + DestinationOrder.HASH_ALL, namenodes.keySet()); + Path folder = new Path(mountPoint, "folder-all"); + for (int i = 0; i < NUM_FILES; i++) { + Path file = new Path(folder, "file-" + i + ".txt"); + FSDataOutputStream os = fs.create(file); + os.close(); + } + FileStatus[] files = fs.listStatus(folder); + assertEquals(NUM_FILES, files.length); + ContentSummary contentSummary = fs.getContentSummary(folder); + assertEquals(NUM_FILES, contentSummary.getFileCount()); + } + + @Test + public void testFileNotFound() throws Exception { + FileSystem fs = getFileSystem(router); + String mountPoint = "/test-non-existing"; + createMountTableEntry(router, + mountPoint, DestinationOrder.HASH_ALL, namenodes.keySet()); + Path path = new Path(mountPoint, "folder-all"); + LambdaTestUtils.intercept(FileNotFoundException.class, + () -> fs.listStatus(path)); + LambdaTestUtils.intercept(FileNotFoundException.class, + () -> fs.getContentSummary(path)); + } + + @Test + public void testOneMissing() throws Exception { + FileSystem fs = getFileSystem(router); + String mountPoint = "/test-one-missing"; + createMountTableEntry(router, mountPoint, + DestinationOrder.HASH_ALL, namenodes.keySet()); + + // Create the folders directly in only one of the Namenodes + MockNamenode nn = namenodes.get("ns0"); + int nnRpcPort = nn.getRPCPort(); + FileSystem nnFs = getFileSystem(nnRpcPort); + Path folder = new Path(mountPoint, "folder-all"); + for (int i = 0; i < NUM_FILES; i++) { + Path file = new Path(folder, "file-" + i + ".txt"); + FSDataOutputStream os = nnFs.create(file); + os.close(); + } + + FileStatus[] files = fs.listStatus(folder); + assertEquals(NUM_FILES, files.length); + ContentSummary summary = fs.getContentSummary(folder); + assertEquals(NUM_FILES, summary.getFileAndDirectoryCount()); + } +} From 32841178ba62cc23c14fe1815f36eb51b915fac3 Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Mon, 13 May 2019 12:18:10 -0700 Subject: [PATCH 0321/1308] HDFS-14426. RBF: Add delegation token total count as one of the federation metrics. Contributed by Fengnan Li. --- .../federation/metrics/FederationMBean.java | 6 ++++++ .../federation/metrics/FederationMetrics.java | 11 ++++++++++ .../contract/router/RouterHDFSContract.java | 7 ++++++- ...TestRouterHDFSContractDelegationToken.java | 20 +++++++++++++++---- 4 files changed, 39 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index 8f24fcbbd1317..e33a77e039d70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -244,4 +244,10 @@ public interface FederationMBean { * @return String label for the current router state. */ String getRouterStatus(); + + /** + * Get the current number of delegation tokens in memory. + * @return number of DTs + */ + long getCurrentTokensCount(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java index a39f17d2a48c5..a196098b9d462 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; +import org.apache.hadoop.hdfs.server.federation.router.security.RouterSecurityManager; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; import org.apache.hadoop.hdfs.server.federation.store.RouterStore; @@ -604,6 +605,16 @@ public String getRouterStatus() { return this.router.getRouterState().toString(); } + @Override + public long getCurrentTokensCount() { + RouterSecurityManager mgr = + this.router.getRpcServer().getRouterSecurityManager(); + if (mgr != null && mgr.getSecretManager() != null) { + return mgr.getSecretManager().getCurrentTokensSize(); + } + return -1; + } + /** * Build a set of unique values found in all namespaces. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java index 46339a388b884..572da902fc3ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java @@ -47,8 +47,13 @@ public static void createCluster() throws IOException { } public static void createCluster(Configuration conf) throws IOException { + createCluster(true, 2, conf); + } + + public static void createCluster( + boolean ha, int numNameServices, Configuration conf) throws IOException { try { - cluster = new MiniRouterDFSCluster(true, 2, conf); + cluster = new MiniRouterDFSCluster(ha, numNameServices, conf); // Start NNs and DNs and wait until ready cluster.startCluster(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java index 062079f1760e3..137c2ab880255 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java @@ -18,11 +18,18 @@ package org.apache.hadoop.fs.contract.router; +import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; +import static org.apache.hadoop.hdfs.server.federation.metrics.TestFederationMetrics.FEDERATION_BEAN; + +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.AbstractFSContractTestBase; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.federation.FederationTestUtils; +import org.apache.hadoop.hdfs.server.federation.metrics.FederationMBean; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; import org.junit.AfterClass; @@ -31,9 +38,6 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import java.io.IOException; -import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity; - /** * Test to verify router contracts for delegation token operations. */ @@ -42,7 +46,7 @@ public class TestRouterHDFSContractDelegationToken @BeforeClass public static void createCluster() throws Exception { - RouterHDFSContract.createCluster(initSecurity()); + RouterHDFSContract.createCluster(false, 1, initSecurity()); } @AfterClass @@ -60,6 +64,10 @@ protected AbstractFSContract createContract(Configuration conf) { @Test public void testRouterDelegationToken() throws Exception { + FederationMBean bean = FederationTestUtils.getBean( + FEDERATION_BEAN, FederationMBean.class); + // Initially there is no token in memory + assertEquals(0, bean.getCurrentTokensCount()); // Generate delegation token Token token = (Token) getFileSystem() @@ -81,6 +89,8 @@ public void testRouterDelegationToken() throws Exception { assertTrue(sequenceNumber > 0); long existingMaxTime = token.decodeIdentifier().getMaxDate(); assertTrue(identifier.getMaxDate() >= identifier.getIssueDate()); + // one token is expected after the generation + assertEquals(1, bean.getCurrentTokensCount()); // Renew delegation token long expiryTime = token.renew(initSecurity()); @@ -92,9 +102,11 @@ public void testRouterDelegationToken() throws Exception { identifier = token.decodeIdentifier(); assertEquals(identifier.getMasterKeyId(), masterKeyId); assertEquals(identifier.getSequenceNumber(), sequenceNumber); + assertEquals(1, bean.getCurrentTokensCount()); // Cancel delegation token token.cancel(initSecurity()); + assertEquals(0, bean.getCurrentTokensCount()); // Renew a cancelled token exceptionRule.expect(SecretManager.InvalidToken.class); From 62fa53a01dc7165d7965cdd4fddb444082f0602c Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Thu, 16 May 2019 00:45:20 +0530 Subject: [PATCH 0322/1308] HDFS-14210. RBF: ACL commands should work over all the destinations. Contributed by Ayush Saxena. --- .../router/RouterClientProtocol.java | 30 +++++++++++++--- ...MultipleDestinationMountTableResolver.java | 35 +++++++++++++++++++ 2 files changed, 60 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index f1f1c420b403a..4e273c2d41618 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -1313,7 +1313,11 @@ public void modifyAclEntries(String src, List aclSpec) RemoteMethod method = new RemoteMethod("modifyAclEntries", new Class[] {String.class, List.class}, new RemoteParam(), aclSpec); - rpcClient.invokeSequential(locations, method, null, null); + if (rpcServer.isInvokeConcurrent(src)) { + rpcClient.invokeConcurrent(locations, method); + } else { + rpcClient.invokeSequential(locations, method); + } } @Override @@ -1327,7 +1331,11 @@ public void removeAclEntries(String src, List aclSpec) RemoteMethod method = new RemoteMethod("removeAclEntries", new Class[] {String.class, List.class}, new RemoteParam(), aclSpec); - rpcClient.invokeSequential(locations, method, null, null); + if (rpcServer.isInvokeConcurrent(src)) { + rpcClient.invokeConcurrent(locations, method); + } else { + rpcClient.invokeSequential(locations, method); + } } @Override @@ -1339,7 +1347,11 @@ public void removeDefaultAcl(String src) throws IOException { rpcServer.getLocationsForPath(src, true); RemoteMethod method = new RemoteMethod("removeDefaultAcl", new Class[] {String.class}, new RemoteParam()); - rpcClient.invokeSequential(locations, method); + if (rpcServer.isInvokeConcurrent(src)) { + rpcClient.invokeConcurrent(locations, method); + } else { + rpcClient.invokeSequential(locations, method); + } } @Override @@ -1351,7 +1363,11 @@ public void removeAcl(String src) throws IOException { rpcServer.getLocationsForPath(src, true); RemoteMethod method = new RemoteMethod("removeAcl", new Class[] {String.class}, new RemoteParam()); - rpcClient.invokeSequential(locations, method); + if (rpcServer.isInvokeConcurrent(src)) { + rpcClient.invokeConcurrent(locations, method); + } else { + rpcClient.invokeSequential(locations, method); + } } @Override @@ -1364,7 +1380,11 @@ public void setAcl(String src, List aclSpec) throws IOException { RemoteMethod method = new RemoteMethod( "setAcl", new Class[] {String.class, List.class}, new RemoteParam(), aclSpec); - rpcClient.invokeSequential(locations, method); + if (rpcServer.isInvokeConcurrent(src)) { + rpcClient.invokeConcurrent(locations, method); + } else { + rpcClient.invokeSequential(locations, method); + } } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java index 2cd11f080be65..72a243a459d6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java @@ -38,7 +38,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; @@ -84,6 +86,7 @@ public static void setUp() throws Exception { new RouterConfigBuilder().stateStore().admin().quota().rpc().build(); Configuration hdfsConf = new Configuration(false); + hdfsConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster.addRouterOverrides(routerConf); cluster.addNamenodeOverrides(hdfsConf); @@ -406,6 +409,38 @@ private boolean addMountTable(final MountTable entry) throws IOException { return addResponse.getStatus(); } + @Test + public void testACLMultipleDestinations() throws Exception { + setupOrderMountPath(DestinationOrder.HASH_ALL); + Path mountPath = new Path("/mount/dir/dir"); + Path nsPath = new Path("/tmp/dir/dir"); + List aclSpec = Collections.singletonList( + AclEntry.parseAclEntry("default:USER:TestUser:rwx", true)); + routerFs.setAcl(mountPath, aclSpec); + assertEquals(5, nnFs0.getAclStatus(nsPath).getEntries().size()); + assertEquals(5, nnFs1.getAclStatus(nsPath).getEntries().size()); + aclSpec = Collections + .singletonList(AclEntry.parseAclEntry("USER:User:rwx::", true)); + + routerFs.modifyAclEntries(mountPath, aclSpec); + assertEquals(7, nnFs0.getAclStatus(nsPath).getEntries().size()); + assertEquals(7, nnFs1.getAclStatus(nsPath).getEntries().size()); + + routerFs.removeAclEntries(mountPath, aclSpec); + assertEquals(6, nnFs0.getAclStatus(nsPath).getEntries().size()); + assertEquals(6, nnFs1.getAclStatus(nsPath).getEntries().size()); + + routerFs.modifyAclEntries(mountPath, aclSpec); + routerFs.removeDefaultAcl(mountPath); + assertEquals(2, nnFs0.getAclStatus(nsPath).getEntries().size()); + assertEquals(2, nnFs1.getAclStatus(nsPath).getEntries().size()); + + routerFs.removeAcl(mountPath); + assertEquals(0, nnFs0.getAclStatus(nsPath).getEntries().size()); + assertEquals(0, nnFs1.getAclStatus(nsPath).getEntries().size()); + + } + @Test public void testGetDestinationHashAll() throws Exception { testGetDestination(DestinationOrder.HASH_ALL, From 3deb5d345f439cbebcad5296c69689e8334f59ce Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Wed, 15 May 2019 15:22:36 -0700 Subject: [PATCH 0323/1308] HDFS-14490. RBF: Remove unnecessary quota checks. Contributed by Ayush Saxena. --- .../federation/router/ErasureCoding.java | 6 +- .../router/RouterClientProtocol.java | 61 ++++++++++--------- .../router/RouterStoragePolicy.java | 12 ++-- .../federation/router/TestRouterQuota.java | 33 ++++++++++ 4 files changed, 75 insertions(+), 37 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java index 97c5f6a601d16..e1d4844b9a3ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java @@ -140,7 +140,7 @@ public ErasureCodingPolicy getErasureCodingPolicy(String src) rpcServer.checkOperation(OperationCategory.READ); final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod remoteMethod = new RemoteMethod("getErasureCodingPolicy", new Class[] {String.class}, new RemoteParam()); ErasureCodingPolicy ret = rpcClient.invokeSequential( @@ -153,7 +153,7 @@ public void setErasureCodingPolicy(String src, String ecPolicyName) rpcServer.checkOperation(OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod remoteMethod = new RemoteMethod("setErasureCodingPolicy", new Class[] {String.class, String.class}, new RemoteParam(), ecPolicyName); @@ -168,7 +168,7 @@ public void unsetErasureCodingPolicy(String src) throws IOException { rpcServer.checkOperation(OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod remoteMethod = new RemoteMethod("unsetErasureCodingPolicy", new Class[] {String.class}, new RemoteParam()); if (rpcServer.isInvokeConcurrent(src)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 4e273c2d41618..0a14b99f51ec3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -207,7 +207,8 @@ public LocatedBlocks getBlockLocations(String src, final long offset, final long length) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ); - List locations = rpcServer.getLocationsForPath(src, false); + List locations = + rpcServer.getLocationsForPath(src, false, false); RemoteMethod remoteMethod = new RemoteMethod("getBlockLocations", new Class[] {String.class, long.class, long.class}, new RemoteParam(), offset, length); @@ -351,7 +352,7 @@ public boolean recoverLease(String src, String clientName) rpcServer.checkOperation(NameNode.OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("recoverLease", new Class[] {String.class, String.class}, new RemoteParam(), clientName); @@ -395,7 +396,7 @@ public void setPermission(String src, FsPermission permissions) rpcServer.checkOperation(NameNode.OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("setPermission", new Class[] {String.class, FsPermission.class}, new RemoteParam(), permissions); @@ -412,7 +413,7 @@ public void setOwner(String src, String username, String groupname) rpcServer.checkOperation(NameNode.OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("setOwner", new Class[] {String.class, String.class, String.class}, new RemoteParam(), username, groupname); @@ -531,7 +532,7 @@ public long getPreferredBlockSize(String src) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ); final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("getPreferredBlockSize", new Class[] {String.class}, new RemoteParam()); return rpcClient.invokeSequential(locations, method, Long.class, null); @@ -724,7 +725,7 @@ public DirectoryListing getListing(String src, byte[] startAfter, // Locate the dir and fetch the listing final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("getListing", new Class[] {String.class, startAfter.getClass(), boolean.class}, new RemoteParam(), startAfter, needLocation); @@ -821,7 +822,7 @@ public HdfsFileStatus getFileInfo(String src) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ); final List locations = - rpcServer.getLocationsForPath(src, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("getFileInfo", new Class[] {String.class}, new RemoteParam()); @@ -859,7 +860,7 @@ public boolean isFileClosed(String src) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ); final List locations = - rpcServer.getLocationsForPath(src, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("isFileClosed", new Class[] {String.class}, new RemoteParam()); return rpcClient.invokeSequential(locations, method, Boolean.class, @@ -871,7 +872,7 @@ public HdfsFileStatus getFileLinkInfo(String src) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ); final List locations = - rpcServer.getLocationsForPath(src, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("getFileLinkInfo", new Class[] {String.class}, new RemoteParam()); return rpcClient.invokeSequential(locations, method, HdfsFileStatus.class, @@ -883,7 +884,7 @@ public HdfsLocatedFileStatus getLocatedFileInfo(String src, boolean needBlockToken) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ); final List locations = - rpcServer.getLocationsForPath(src, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("getLocatedFileInfo", new Class[] {String.class, boolean.class}, new RemoteParam(), needBlockToken); @@ -1092,7 +1093,7 @@ public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) rpcServer.checkOperation(NameNode.OperationCategory.READ); final List locations = - rpcServer.getLocationsForPath(path, false); + rpcServer.getLocationsForPath(path, false, false); RemoteMethod method = new RemoteMethod("listCorruptFileBlocks", new Class[] {String.class, String.class}, new RemoteParam(), cookie); @@ -1117,7 +1118,7 @@ public ContentSummary getContentSummary(String path) throws IOException { // Get the summaries from regular files final Collection summaries = new ArrayList<>(); final List locations = - rpcServer.getLocationsForPath(path, false); + rpcServer.getLocationsForPath(path, false, false); final RemoteMethod method = new RemoteMethod("getContentSummary", new Class[] {String.class}, new RemoteParam()); final List> results = @@ -1169,7 +1170,7 @@ public void fsync(String src, long fileId, String clientName, rpcServer.checkOperation(NameNode.OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("fsync", new Class[] {String.class, long.class, String.class, long.class }, new RemoteParam(), fileId, clientName, lastBlockLength); @@ -1181,7 +1182,7 @@ public void setTimes(String src, long mtime, long atime) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("setTimes", new Class[] {String.class, long.class, long.class}, new RemoteParam(), mtime, atime); @@ -1211,7 +1212,7 @@ public String getLinkTarget(String path) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ); final List locations = - rpcServer.getLocationsForPath(path, true); + rpcServer.getLocationsForPath(path, true, false); RemoteMethod method = new RemoteMethod("getLinkTarget", new Class[] {String.class}, new RemoteParam()); return rpcClient.invokeSequential(locations, method, String.class, null); @@ -1309,7 +1310,7 @@ public void modifyAclEntries(String src, List aclSpec) // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("modifyAclEntries", new Class[] {String.class, List.class}, new RemoteParam(), aclSpec); @@ -1327,7 +1328,7 @@ public void removeAclEntries(String src, List aclSpec) // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("removeAclEntries", new Class[] {String.class, List.class}, new RemoteParam(), aclSpec); @@ -1344,7 +1345,7 @@ public void removeDefaultAcl(String src) throws IOException { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("removeDefaultAcl", new Class[] {String.class}, new RemoteParam()); if (rpcServer.isInvokeConcurrent(src)) { @@ -1360,7 +1361,7 @@ public void removeAcl(String src) throws IOException { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("removeAcl", new Class[] {String.class}, new RemoteParam()); if (rpcServer.isInvokeConcurrent(src)) { @@ -1376,7 +1377,7 @@ public void setAcl(String src, List aclSpec) throws IOException { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod( "setAcl", new Class[] {String.class, List.class}, new RemoteParam(), aclSpec); @@ -1393,7 +1394,7 @@ public AclStatus getAclStatus(String src) throws IOException { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("getAclStatus", new Class[] {String.class}, new RemoteParam()); return rpcClient.invokeSequential(locations, method, AclStatus.class, null); @@ -1406,7 +1407,7 @@ public void createEncryptionZone(String src, String keyName) // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("createEncryptionZone", new Class[] {String.class, String.class}, new RemoteParam(), keyName); @@ -1419,7 +1420,7 @@ public EncryptionZone getEZForPath(String src) throws IOException { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("getEZForPath", new Class[] {String.class}, new RemoteParam()); return rpcClient.invokeSequential( @@ -1453,7 +1454,7 @@ public void setXAttr(String src, XAttr xAttr, EnumSet flag) // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("setXAttr", new Class[] {String.class, XAttr.class, EnumSet.class}, new RemoteParam(), xAttr, flag); @@ -1472,7 +1473,7 @@ public List getXAttrs(String src, List xAttrs) // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("getXAttrs", new Class[] {String.class, List.class}, new RemoteParam(), xAttrs); return (List) rpcClient.invokeSequential( @@ -1486,7 +1487,7 @@ public List listXAttrs(String src) throws IOException { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("listXAttrs", new Class[] {String.class}, new RemoteParam()); return (List) rpcClient.invokeSequential( @@ -1499,7 +1500,7 @@ public void removeXAttr(String src, XAttr xAttr) throws IOException { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("removeXAttr", new Class[] {String.class, XAttr.class}, new RemoteParam(), xAttr); if (rpcServer.isInvokeConcurrent(src)) { @@ -1515,7 +1516,7 @@ public void checkAccess(String path, FsAction mode) throws IOException { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(path, true); + rpcServer.getLocationsForPath(path, true, false); RemoteMethod method = new RemoteMethod("checkAccess", new Class[] {String.class, FsAction.class}, new RemoteParam(), mode); @@ -1735,7 +1736,7 @@ private RemoteParam getRenameDestinations( throws IOException { final List dstLocations = - rpcServer.getLocationsForPath(dst, true); + rpcServer.getLocationsForPath(dst, true, false); final Map dstMap = new HashMap<>(); Iterator iterator = srcLocations.iterator(); @@ -2026,7 +2027,7 @@ boolean isMultiDestDirectory(String src) throws IOException { try { if (rpcServer.isPathAll(src)) { List locations; - locations = rpcServer.getLocationsForPath(src, false); + locations = rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("getFileInfo", new Class[] {String.class}, new RemoteParam()); HdfsFileStatus fileStatus = rpcClient.invokeSequential(locations, diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java index a4538b0e6bdc0..33203dcdb99c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java @@ -45,7 +45,8 @@ public void setStoragePolicy(String src, String policyName) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.WRITE); - List locations = rpcServer.getLocationsForPath(src, true); + List locations = + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("setStoragePolicy", new Class[] {String.class, String.class}, new RemoteParam(), @@ -67,7 +68,8 @@ public BlockStoragePolicy[] getStoragePolicies() throws IOException { public void unsetStoragePolicy(String src) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true); - List locations = rpcServer.getLocationsForPath(src, true); + List locations = + rpcServer.getLocationsForPath(src, true, false); RemoteMethod method = new RemoteMethod("unsetStoragePolicy", new Class[] {String.class}, new RemoteParam()); @@ -82,7 +84,8 @@ public BlockStoragePolicy getStoragePolicy(String path) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ, true); - List locations = rpcServer.getLocationsForPath(path, false); + List locations = + rpcServer.getLocationsForPath(path, false, false); RemoteMethod method = new RemoteMethod("getStoragePolicy", new Class[] {String.class}, new RemoteParam()); @@ -92,7 +95,8 @@ public BlockStoragePolicy getStoragePolicy(String path) public void satisfyStoragePolicy(String path) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ, true); - List locations = rpcServer.getLocationsForPath(path, true); + List locations = + rpcServer.getLocationsForPath(path, true, false); RemoteMethod method = new RemoteMethod("satisfyStoragePolicy", new Class[] {String.class}, new RemoteParam()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java index abcbe8fdbdc82..81d5023cfc09e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; @@ -34,7 +35,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.QuotaUsage; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -792,4 +795,34 @@ public void testSetQuotaNotMountTable() throws Exception { assertEquals(nsQuota, quota1.getQuota()); assertEquals(ssQuota, quota1.getSpaceQuota()); } + + @Test + public void testNoQuotaaExceptionForUnrelatedOperations() throws Exception { + FileSystem nnFs = nnContext1.getFileSystem(); + DistributedFileSystem routerFs = + (DistributedFileSystem) routerContext.getFileSystem(); + Path path = new Path("/quota"); + nnFs.mkdirs(new Path("/dir")); + MountTable mountTable1 = MountTable.newInstance("/quota", + Collections.singletonMap("ns0", "/dir")); + mountTable1.setQuota(new RouterQuotaUsage.Builder().quota(0).build()); + addMountTable(mountTable1); + routerFs.mkdirs(new Path("/quota/1")); + routerContext.getRouter().getQuotaCacheUpdateService().periodicInvoke(); + + // Quota check for related operation. + intercept(NSQuotaExceededException.class, + "The NameSpace quota (directories and files) is exceeded", + () -> routerFs.mkdirs(new Path("/quota/2"))); + + //Quotas shouldn't be checked for unrelated operations. + routerFs.setStoragePolicy(path, "COLD"); + routerFs.setErasureCodingPolicy(path, "RS-6-3-1024k"); + routerFs.unsetErasureCodingPolicy(path); + routerFs.setPermission(path, new FsPermission((short) 01777)); + routerFs.setOwner(path, "user", "group"); + routerFs.setTimes(path, 1L, 1L); + routerFs.listStatus(path); + routerFs.getContentSummary(path); + } } From 393f15176dfd1d2ab765f9aac897a1e35693d3cf Mon Sep 17 00:00:00 2001 From: Giovanni Matteo Fumarola Date: Thu, 16 May 2019 11:05:29 -0700 Subject: [PATCH 0324/1308] HDFS-14447. RBF: Router should support RefreshUserMappingsProtocol. Contributed by Shen Yinjie. --- .../federation/router/RouterRpcServer.java | 28 +- .../TestRefreshUserMappingsWithRouters.java | 386 ++++++++++++++++++ 2 files changed, 413 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRefreshUserMappingsWithRouters.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index d35d1f0f61c37..559270f022eb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -139,7 +139,13 @@ import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.Groups; +import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos; +import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolPB; +import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolServerSideTranslatorPB; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ReflectionUtils; @@ -158,7 +164,7 @@ * {@link org.apache.hadoop.hdfs.server.namenode.NameNode NameNode}. */ public class RouterRpcServer extends AbstractService - implements ClientProtocol, NamenodeProtocol { + implements ClientProtocol, NamenodeProtocol, RefreshUserMappingsProtocol { private static final Logger LOG = LoggerFactory.getLogger(RouterRpcServer.class); @@ -257,6 +263,12 @@ public RouterRpcServer(Configuration configuration, Router router, BlockingService nnPbService = NamenodeProtocolService .newReflectiveBlockingService(namenodeProtocolXlator); + RefreshUserMappingsProtocolServerSideTranslatorPB refreshUserMappingXlator = + new RefreshUserMappingsProtocolServerSideTranslatorPB(this); + BlockingService refreshUserMappingService = + RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService. + newReflectiveBlockingService(refreshUserMappingXlator); + InetSocketAddress confRpcAddress = conf.getSocketAddr( RBFConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY, RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, @@ -283,6 +295,8 @@ public RouterRpcServer(Configuration configuration, Router router, // Add all the RPC protocols that the Router implements DFSUtil.addPBProtocol( conf, NamenodeProtocolPB.class, nnPbService, this.rpcServer); + DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, + refreshUserMappingService, this.rpcServer); // Set service-level authorization security policy this.serviceAuthEnabled = conf.getBoolean( @@ -1661,4 +1675,16 @@ boolean isInvokeConcurrent(final String path) throws IOException { } return false; } + + @Override + public void refreshUserToGroupsMappings() throws IOException { + LOG.info("Refresh user groups mapping in Router."); + Groups.getUserToGroupsMappingService().refresh(); + } + + @Override + public void refreshSuperUserGroupsConfiguration() throws IOException { + LOG.info("Refresh superuser groups configuration in Router."); + ProxyUsers.refreshSuperUserGroupsConfiguration(); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRefreshUserMappingsWithRouters.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRefreshUserMappingsWithRouters.java new file mode 100644 index 0000000000000..597b8c2740753 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRefreshUserMappingsWithRouters.java @@ -0,0 +1,386 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; +import org.apache.hadoop.hdfs.tools.DFSAdmin; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.GroupMappingServiceProvider; +import org.apache.hadoop.security.Groups; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.UnsupportedEncodingException; +import java.net.URL; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests RefreshUserMappingsProtocol With Routers. + */ +public class TestRefreshUserMappingsWithRouters { + private static final Logger LOG = LoggerFactory.getLogger( + TestRefreshUserMappingsWithRouters.class); + + private MiniRouterDFSCluster cluster; + private Router router; + private Configuration conf; + private static final long GROUP_REFRESH_TIMEOUT_SEC = 1L; + private static final String ROUTER_NS = "rbfns"; + private static final String HDFS_SCHEMA = "hdfs://"; + private static final String LOOPBACK_ADDRESS = "127.0.0.1"; + private static final String HDFS_PREFIX = HDFS_SCHEMA + LOOPBACK_ADDRESS; + + private String tempResource = null; + + /** + * Mock class to get group mapping for fake users. + */ + public static class MockUnixGroupsMapping + implements GroupMappingServiceProvider { + private static int i = 0; + + @Override + public List getGroups(String user) throws IOException { + LOG.info("Getting groups in MockUnixGroupsMapping"); + String g1 = user + (10 * i + 1); + String g2 = user + (10 * i + 2); + List l = new ArrayList(2); + l.add(g1); + l.add(g2); + i++; + return l; + } + + @Override + public void cacheGroupsRefresh() throws IOException { + LOG.info("Refreshing groups in MockUnixGroupsMapping"); + } + + @Override + public void cacheGroupsAdd(List groups) throws IOException { + } + } + + @Before + public void setUp() { + conf = new Configuration(false); + conf.setClass("hadoop.security.group.mapping", + TestRefreshUserMappingsWithRouters.MockUnixGroupsMapping.class, + GroupMappingServiceProvider.class); + conf.setLong("hadoop.security.groups.cache.secs", + GROUP_REFRESH_TIMEOUT_SEC); + conf = new RouterConfigBuilder(conf) + .rpc() + .admin() + .build(); + Groups.getUserToGroupsMappingService(conf); + } + + /** + * Setup a single router, and return this router's rpc address + * as fs.defaultFS for {@link DFSAdmin}. + * @return router's rpc address + * @throws Exception + */ + private String setUpSingleRouterAndReturnDefaultFs() { + router = new Router(); + conf.set(RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, + LOOPBACK_ADDRESS + ":" + NetUtils.getFreeSocketPort()); + router.init(conf); + router.start(); + String defaultFs = HDFS_PREFIX + ":" + + router.getRpcServerAddress().getPort(); + return defaultFs; + } + + /** + * Setup a multi-routers mini dfs cluster with two nameservices + * and four routers. + * For dfsadmin clients to use the federated namespace, we need to create a + * new namespace that points to the routers. + * For example, a cluster with 2 namespaces ns0, ns1, can add a new one to + * hdfs-site.xml called {@link #ROUTER_NS}, which points to four of the + * routers. With this setting dfsadmin client can interact with routers + * as a regular namespace and reconginze multi-routers. + * @return fs.defaultFS for multi-routers + * @throws Exception + */ + private String setUpMultiRoutersAndReturnDefaultFs() throws Exception { + //setup a miniroutercluster with 2 nameservices, 4 routers. + cluster = new MiniRouterDFSCluster(true, 2); + cluster.addRouterOverrides(conf); + cluster.startRouters(); + + //construct client conf. + conf.set(DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY, "ns0,ns1"); + conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns0,ns1,"+ ROUTER_NS); + conf.set(HdfsClientConfigKeys.Failover. + PROXY_PROVIDER_KEY_PREFIX +"." + ROUTER_NS, + ConfiguredFailoverProxyProvider.class.getCanonicalName()); + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, + HDFS_SCHEMA + ROUTER_NS); + conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX+ + "."+ ROUTER_NS, "r1,r2"); + List routers = cluster.getRouters(); + for(int i = 0; i < routers.size(); i++) { + MiniRouterDFSCluster.RouterContext context = routers.get(i); + conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + + "." + ROUTER_NS+".r" +(i+1), LOOPBACK_ADDRESS + + ":" +context.getRouter().getRpcServerAddress().getPort()); + } + return HDFS_SCHEMA + ROUTER_NS; + } + + @Test + public void testRefreshSuperUserGroupsConfigurationWithSingleRouter() + throws Exception { + testRefreshSuperUserGroupsConfigurationInternal( + setUpSingleRouterAndReturnDefaultFs()); + } + + @Test + public void testRefreshSuperUserGroupsConfigurationWithMultiRouters() + throws Exception { + testRefreshSuperUserGroupsConfigurationInternal( + setUpMultiRoutersAndReturnDefaultFs()); + } + + @Test + public void testGroupMappingRefreshWithSingleRouter() throws Exception { + testGroupMappingRefreshInternal( + setUpSingleRouterAndReturnDefaultFs()); + } + + + @Test + public void testGroupMappingRefreshWithMultiRouters() throws Exception { + testGroupMappingRefreshInternal( + setUpMultiRoutersAndReturnDefaultFs()); + } + + /** + * Test refreshSuperUserGroupsConfiguration action. + */ + private void testRefreshSuperUserGroupsConfigurationInternal( + String defaultFs) throws Exception { + final String superUser = "super_user"; + final List groupNames1 = new ArrayList<>(); + groupNames1.add("gr1"); + groupNames1.add("gr2"); + final List groupNames2 = new ArrayList<>(); + groupNames2.add("gr3"); + groupNames2.add("gr4"); + + //keys in conf + String userKeyGroups = DefaultImpersonationProvider.getTestProvider(). + getProxySuperuserGroupConfKey(superUser); + String userKeyHosts = DefaultImpersonationProvider.getTestProvider(). + getProxySuperuserIpConfKey(superUser); + + // superuser can proxy for this group + conf.set(userKeyGroups, "gr3,gr4,gr5"); + conf.set(userKeyHosts, LOOPBACK_ADDRESS); + ProxyUsers.refreshSuperUserGroupsConfiguration(conf); + + UserGroupInformation ugi1 = mock(UserGroupInformation.class); + UserGroupInformation ugi2 = mock(UserGroupInformation.class); + UserGroupInformation suUgi = mock(UserGroupInformation.class); + when(ugi1.getRealUser()).thenReturn(suUgi); + when(ugi2.getRealUser()).thenReturn(suUgi); + + // mock super user + when(suUgi.getShortUserName()).thenReturn(superUser); + when(suUgi.getUserName()).thenReturn(superUser+"L"); + + when(ugi1.getShortUserName()).thenReturn("user1"); + when(ugi2.getShortUserName()).thenReturn("user2"); + + when(ugi1.getUserName()).thenReturn("userL1"); + when(ugi2.getUserName()).thenReturn("userL2"); + + // set groups for users + when(ugi1.getGroups()).thenReturn(groupNames1); + when(ugi2.getGroups()).thenReturn(groupNames2); + + // check before refresh + LambdaTestUtils.intercept(AuthorizationException.class, + () -> ProxyUsers.authorize(ugi1, LOOPBACK_ADDRESS)); + try { + ProxyUsers.authorize(ugi2, LOOPBACK_ADDRESS); + LOG.info("auth for {} succeeded", ugi2.getUserName()); + // expected + } catch (AuthorizationException e) { + fail("first auth for " + ugi2.getShortUserName() + + " should've succeeded: " + e.getLocalizedMessage()); + } + + // refresh will look at configuration on the server side + // add additional resource with the new value + // so the server side will pick it up + String rsrc = "testGroupMappingRefresh_rsrc.xml"; + tempResource = addNewConfigResource(rsrc, userKeyGroups, "gr2", + userKeyHosts, LOOPBACK_ADDRESS); + + conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, defaultFs); + DFSAdmin admin = new DFSAdmin(conf); + String[] args = new String[]{"-refreshSuperUserGroupsConfiguration"}; + admin.run(args); + + LambdaTestUtils.intercept(AuthorizationException.class, + () -> ProxyUsers.authorize(ugi2, LOOPBACK_ADDRESS)); + try { + ProxyUsers.authorize(ugi1, LOOPBACK_ADDRESS); + LOG.info("auth for {} succeeded", ugi1.getUserName()); + // expected + } catch (AuthorizationException e) { + fail("second auth for " + ugi1.getShortUserName() + + " should've succeeded: " + e.getLocalizedMessage()); + } + } + + /** + * Test refreshUserToGroupsMappings action. + */ + private void testGroupMappingRefreshInternal(String defaultFs) + throws Exception { + Groups groups = Groups.getUserToGroupsMappingService(conf); + String user = "test_user123"; + + LOG.info("First attempt:"); + List g1 = groups.getGroups(user); + LOG.info("Group 1 :{}", g1); + + LOG.info("Second attempt, should be the same:"); + List g2 = groups.getGroups(user); + LOG.info("Group 2 :{}", g2); + for(int i = 0; i < g2.size(); i++) { + assertEquals("Should be same group ", g1.get(i), g2.get(i)); + } + + // set fs.defaultFS point to router(s). + conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, defaultFs); + // Test refresh command + DFSAdmin admin = new DFSAdmin(conf); + String[] args = new String[]{"-refreshUserToGroupsMappings"}; + admin.run(args); + + LOG.info("Third attempt(after refresh command), should be different:"); + List g3 = groups.getGroups(user); + LOG.info("Group 3:{}", g3); + for(int i = 0; i < g3.size(); i++) { + assertNotEquals("Should be different group: " + + g1.get(i) + " and " + g3.get(i), g1.get(i), g3.get(i)); + } + + // Test timeout + LOG.info("Fourth attempt(after timeout), should be different:"); + GenericTestUtils.waitFor(() -> { + List g4; + try { + g4 = groups.getGroups(user); + } catch (IOException e) { + LOG.debug("Failed to get groups for user:{}", user); + return false; + } + LOG.info("Group 4 : {}", g4); + // if g4 is the same as g3, wait and retry + return !g3.equals(g4); + }, 50, Math.toIntExact(TimeUnit.SECONDS.toMillis( + GROUP_REFRESH_TIMEOUT_SEC * 30))); + } + + public static String addNewConfigResource(String rsrcName, String keyGroup, + String groups, String keyHosts, String hosts) + throws FileNotFoundException, UnsupportedEncodingException { + // location for temp resource should be in CLASSPATH + Configuration conf = new Configuration(); + URL url = conf.getResource("hdfs-site.xml"); + + String urlPath = URLDecoder.decode(url.getPath(), "UTF-8"); + Path p = new Path(urlPath); + Path dir = p.getParent(); + String tmp = dir.toString() + "/" + rsrcName; + + StringBuilder newResource = new StringBuilder() + .append("") + .append("") + .append("").append(keyGroup).append("") + .append("").append(groups).append("") + .append("") + .append("") + .append("").append(keyHosts).append("") + .append("").append(hosts).append("") + .append("") + .append(""); + PrintWriter writer = new PrintWriter(new FileOutputStream(tmp)); + writer.println(newResource.toString()); + writer.close(); + Configuration.addDefaultResource(rsrcName); + return tmp; + } + + @After + public void tearDown() { + if (router != null) { + router.shutDown(); + router = null; + } + + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + + if (tempResource != null) { + File f = new File(tempResource); + f.delete(); + tempResource = null; + } + } +} From 2636a54ffd12b098e6b2744abd5b62732a9039aa Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Tue, 21 May 2019 22:48:53 +0530 Subject: [PATCH 0325/1308] HDFS-13995. RBF: Security documentation. Contributed by CR Hota. --- .../src/site/markdown/HDFSRouterFederation.md | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index 83cecda53d7b4..d9ae5af96c646 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -169,7 +169,15 @@ It is similar to the mount table in [ViewFs](../hadoop-hdfs/ViewFs.html) where i ### Security -Secure authentication and authorization are not supported yet, so the Router will not proxy to Hadoop clusters with security enabled. +Router supports security similar to [current security model](../hadoop-common/SecureMode.html) in HDFS. This feature is available for both RPC and Web based calls. It has the capability to proxy to underlying secure HDFS clusters. + +Similar to Namenode, support exists for both kerberos and token based authentication for clients connecting to routers. Router internally relies on existing security related configs of `core-site.xml` and `hdfs-site.xml` to support this feature. In addition to that, routers need to be configured with its own keytab and principal. + +For token based authentication, router issues delegation tokens to upstream clients without communicating with downstream namenodes. Router uses its own credentials to securely proxy to downstream namenode on behalf of upstream real user. Router principal has to be configured as a superuser in all secure downstream namenodes. Refer [here](../hadoop-common/Superusers.html) to configure proxy user for namenode. Along with that, user owning router daemons should be configured with the same identity as namenode process itself. Refer [here](../hadoop-hdfs/HdfsPermissionsGuide.html#The_Super-User) for details. +Router relies on a state store to distribute tokens across all routers. Apart from default implementation provided users can plugin their own implementation of state store for token management. Default implementation relies on zookeeper for token management. Since a large router/zookeeper cluster could potentially hold millions of tokens, `jute.maxbuffer` system property that zookeeper clients rely on should be appropriately configured in router daemons. + + +See the Apache JIRA ticket [HDFS-13532](https://issues.apache.org/jira/browse/HDFS-13532) for more information on this feature. Deployment @@ -444,6 +452,18 @@ Global quota supported in federation. | dfs.federation.router.quota.enable | `false` | If `true`, the quota system enabled in the Router. In that case, setting or clearing sub-cluster's quota directly is not recommended since Router Admin server will override sub-cluster's quota with global quota.| | dfs.federation.router.quota-cache.update.interval | 60s | How often the Router updates quota cache. This setting supports multiple time unit suffixes. If no suffix is specified then milliseconds is assumed. | +### Security + +Kerberos and Delegation token supported in federation. + +| Property | Default | Description| +|:---- |:---- |:---- | +| dfs.federation.router.keytab.file | | The keytab file used by router to login as its service principal. The principal name is configured with 'dfs.federation.router.kerberos.principal'.| +| dfs.federation.router.kerberos.principal | | The Router service principal. This is typically set to router/_HOST@REALM.TLD. Each Router will substitute _HOST with its own fully qualified hostname at startup. The _HOST placeholder allows using the same configuration setting on all Routers in an HA setup. | +| dfs.federation.router.kerberos.principal.hostname | | The hostname for the Router containing this configuration file. Will be different for each machine. Defaults to current hostname. | +| dfs.federation.router.kerberos.internal.spnego.principal | `${dfs.web.authentication.kerberos.principal}` | The server principal used by the Router for web UI SPNEGO authentication when Kerberos security is enabled. This is typically set to HTTP/_HOST@REALM.TLD The SPNEGO server principal begins with the prefix HTTP/ by convention. If the value is '*', the web server will attempt to login with every principal specified in the keytab file 'dfs.web.authentication.kerberos.keytab'. | +| dfs.federation.router.secret.manager.class | `org.apache.hadoop.hdfs.server.federation.router.security.token.ZKDelegationTokenSecretManagerImpl` | Class to implement state store to delegation tokens. Default implementation uses zookeeper as the backend to store delegation tokens. | + Metrics ------- From 8e4267650fe52eb6b6d4466fc006e7af4a1326d0 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Fri, 24 May 2019 20:37:58 +0530 Subject: [PATCH 0326/1308] HDFS-14440. RBF: Optimize the file write process in case of multiple destinations. Contributed by Ayush Saxena. --- .../federation/router/RouterRpcServer.java | 46 ++++++++++--------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 559270f022eb7..e41ccc7c803ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -631,28 +631,11 @@ RemoteLocation getCreateLocation( RemoteLocation createLocation = locations.get(0); if (locations.size() > 1) { try { - // Check if this file already exists in other subclusters - LocatedBlocks existingLocation = getBlockLocations(src, 0, 1); + RemoteLocation existingLocation = getExistingLocation(src, locations); + // Forward to the existing location and let the NN handle the error if (existingLocation != null) { - // Forward to the existing location and let the NN handle the error - LocatedBlock existingLocationLastLocatedBlock = - existingLocation.getLastLocatedBlock(); - if (existingLocationLastLocatedBlock == null) { - // The block has no blocks yet, check for the meta data - for (RemoteLocation location : locations) { - RemoteMethod method = new RemoteMethod("getFileInfo", - new Class[] {String.class}, new RemoteParam()); - if (rpcClient.invokeSingle(location, method) != null) { - createLocation = location; - break; - } - } - } else { - ExtendedBlock existingLocationLastBlock = - existingLocationLastLocatedBlock.getBlock(); - String blockPoolId = existingLocationLastBlock.getBlockPoolId(); - createLocation = getLocationForPath(src, true, blockPoolId); - } + LOG.debug("{} already exists in {}.", src, existingLocation); + createLocation = existingLocation; } } catch (FileNotFoundException fne) { // Ignore if the file is not found @@ -661,6 +644,27 @@ RemoteLocation getCreateLocation( return createLocation; } + /** + * Gets the remote location where the file exists. + * @param src the name of file. + * @param locations all the remote locations. + * @return the remote location of the file if it exists, else null. + * @throws IOException in case of any exception. + */ + private RemoteLocation getExistingLocation(String src, + List locations) throws IOException { + RemoteMethod method = new RemoteMethod("getFileInfo", + new Class[] {String.class}, new RemoteParam()); + Map results = rpcClient.invokeConcurrent( + locations, method, false, false, HdfsFileStatus.class); + for (RemoteLocation loc : locations) { + if (results.get(loc) != null) { + return loc; + } + } + return null; + } + @Override // ClientProtocol public LastBlockWithStatus append(String src, final String clientName, final EnumSetWritable flag) throws IOException { From ffbb6b6557f4eb8587c7d57cda38f2a0de573f8b Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Wed, 29 May 2019 01:15:18 +0530 Subject: [PATCH 0327/1308] HDFS-13255. RBF: Fail when try to remove mount point paths. Contributed by Akira Ajisaka. --- .../federation/router/ErasureCoding.java | 6 +- .../hdfs/server/federation/router/Quota.java | 5 +- .../router/RouterClientProtocol.java | 32 +++---- .../federation/router/RouterRpcServer.java | 24 +++++- .../router/RouterStoragePolicy.java | 4 +- .../TestRouterHDFSContractRootDirectory.java | 10 +++ ...RouterHDFSContractRootDirectorySecure.java | 10 +++ ...estRouterWebHDFSContractRootDirectory.java | 10 +++ .../router/TestRouterMountTable.java | 84 +++++++++++++++++++ .../federation/router/TestRouterQuota.java | 54 +++++++----- 10 files changed, 192 insertions(+), 47 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java index e1d4844b9a3ab..2ef2a3f9e9262 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java @@ -140,7 +140,7 @@ public ErasureCodingPolicy getErasureCodingPolicy(String src) rpcServer.checkOperation(OperationCategory.READ); final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod remoteMethod = new RemoteMethod("getErasureCodingPolicy", new Class[] {String.class}, new RemoteParam()); ErasureCodingPolicy ret = rpcClient.invokeSequential( @@ -153,7 +153,7 @@ public void setErasureCodingPolicy(String src, String ecPolicyName) rpcServer.checkOperation(OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod remoteMethod = new RemoteMethod("setErasureCodingPolicy", new Class[] {String.class, String.class}, new RemoteParam(), ecPolicyName); @@ -168,7 +168,7 @@ public void unsetErasureCodingPolicy(String src) throws IOException { rpcServer.checkOperation(OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod remoteMethod = new RemoteMethod("unsetErasureCodingPolicy", new Class[] {String.class}, new RemoteParam()); if (rpcServer.isInvokeConcurrent(src)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java index a6f5baba39316..0ac64a1137863 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java @@ -213,13 +213,14 @@ private List getQuotaRemoteLocations(String path) if (manager != null) { Set childrenPaths = manager.getPaths(path); for (String childPath : childrenPaths) { - locations.addAll(rpcServer.getLocationsForPath(childPath, true, false)); + locations.addAll( + rpcServer.getLocationsForPath(childPath, false, false)); } } if (locations.size() >= 1) { return locations; } else { - locations.addAll(rpcServer.getLocationsForPath(path, true, false)); + locations.addAll(rpcServer.getLocationsForPath(path, false, false)); return locations; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 0a14b99f51ec3..550f5e7e90bc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -396,7 +396,7 @@ public void setPermission(String src, FsPermission permissions) rpcServer.checkOperation(NameNode.OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("setPermission", new Class[] {String.class, FsPermission.class}, new RemoteParam(), permissions); @@ -413,7 +413,7 @@ public void setOwner(String src, String username, String groupname) rpcServer.checkOperation(NameNode.OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("setOwner", new Class[] {String.class, String.class, String.class}, new RemoteParam(), username, groupname); @@ -672,7 +672,7 @@ public boolean mkdirs(String src, FsPermission masked, boolean createParent) rpcServer.checkOperation(NameNode.OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, false); RemoteMethod method = new RemoteMethod("mkdirs", new Class[] {String.class, FsPermission.class, boolean.class}, new RemoteParam(), masked, createParent); @@ -725,7 +725,7 @@ public DirectoryListing getListing(String src, byte[] startAfter, // Locate the dir and fetch the listing final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("getListing", new Class[] {String.class, startAfter.getClass(), boolean.class}, new RemoteParam(), startAfter, needLocation); @@ -1182,7 +1182,7 @@ public void setTimes(String src, long mtime, long atime) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("setTimes", new Class[] {String.class, long.class, long.class}, new RemoteParam(), mtime, atime); @@ -1212,7 +1212,7 @@ public String getLinkTarget(String path) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ); final List locations = - rpcServer.getLocationsForPath(path, true, false); + rpcServer.getLocationsForPath(path, false, false); RemoteMethod method = new RemoteMethod("getLinkTarget", new Class[] {String.class}, new RemoteParam()); return rpcClient.invokeSequential(locations, method, String.class, null); @@ -1310,7 +1310,7 @@ public void modifyAclEntries(String src, List aclSpec) // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("modifyAclEntries", new Class[] {String.class, List.class}, new RemoteParam(), aclSpec); @@ -1328,7 +1328,7 @@ public void removeAclEntries(String src, List aclSpec) // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("removeAclEntries", new Class[] {String.class, List.class}, new RemoteParam(), aclSpec); @@ -1345,7 +1345,7 @@ public void removeDefaultAcl(String src) throws IOException { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("removeDefaultAcl", new Class[] {String.class}, new RemoteParam()); if (rpcServer.isInvokeConcurrent(src)) { @@ -1361,7 +1361,7 @@ public void removeAcl(String src) throws IOException { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("removeAcl", new Class[] {String.class}, new RemoteParam()); if (rpcServer.isInvokeConcurrent(src)) { @@ -1377,7 +1377,7 @@ public void setAcl(String src, List aclSpec) throws IOException { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod( "setAcl", new Class[] {String.class, List.class}, new RemoteParam(), aclSpec); @@ -1407,7 +1407,7 @@ public void createEncryptionZone(String src, String keyName) // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("createEncryptionZone", new Class[] {String.class, String.class}, new RemoteParam(), keyName); @@ -1454,7 +1454,7 @@ public void setXAttr(String src, XAttr xAttr, EnumSet flag) // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("setXAttr", new Class[] {String.class, XAttr.class, EnumSet.class}, new RemoteParam(), xAttr, flag); @@ -1500,7 +1500,7 @@ public void removeXAttr(String src, XAttr xAttr) throws IOException { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("removeXAttr", new Class[] {String.class, XAttr.class}, new RemoteParam(), xAttr); if (rpcServer.isInvokeConcurrent(src)) { @@ -1516,7 +1516,7 @@ public void checkAccess(String path, FsAction mode) throws IOException { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(path, true, false); + rpcServer.getLocationsForPath(path, false, false); RemoteMethod method = new RemoteMethod("checkAccess", new Class[] {String.class, FsAction.class}, new RemoteParam(), mode); @@ -1736,7 +1736,7 @@ private RemoteParam getRenameDestinations( throws IOException { final List dstLocations = - rpcServer.getLocationsForPath(dst, true, false); + rpcServer.getLocationsForPath(dst, false, false); final Map dstMap = new HashMap<>(); Iterator iterator = srcLocations.iterator(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index e41ccc7c803ac..32f3e6676986e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -1450,7 +1450,8 @@ protected List getLocationsForPath(String path, * Get the possible locations of a path in the federated cluster. * * @param path Path to check. - * @param failIfLocked Fail the request if locked (top mount point). + * @param failIfLocked Fail the request if there is any mount point under + * the path. * @param needQuotaVerify If need to do the quota verification. * @return Prioritized list of locations in the federated cluster. * @throws IOException If the location for this path cannot be determined. @@ -1458,6 +1459,27 @@ protected List getLocationsForPath(String path, protected List getLocationsForPath(String path, boolean failIfLocked, boolean needQuotaVerify) throws IOException { try { + if (failIfLocked) { + // check if there is any mount point under the path + final List mountPoints = + this.subclusterResolver.getMountPoints(path); + if (mountPoints != null) { + StringBuilder sb = new StringBuilder(); + sb.append("The operation is not allowed because "); + if (mountPoints.isEmpty()) { + sb.append("the path: ") + .append(path) + .append(" is a mount point"); + } else { + sb.append("there are mount points: ") + .append(String.join(",", mountPoints)) + .append(" under the path: ") + .append(path); + } + throw new AccessControlException(sb.toString()); + } + } + // Check the location for this path final PathLocation location = this.subclusterResolver.getDestinationForPath(path); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java index 33203dcdb99c0..05f983c397297 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java @@ -46,7 +46,7 @@ public void setStoragePolicy(String src, String policyName) rpcServer.checkOperation(NameNode.OperationCategory.WRITE); List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("setStoragePolicy", new Class[] {String.class, String.class}, new RemoteParam(), @@ -69,7 +69,7 @@ public void unsetStoragePolicy(String src) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true); List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("unsetStoragePolicy", new Class[] {String.class}, new RemoteParam()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java index cc603ddd7bb8c..21f5ee7bd758d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java @@ -61,4 +61,14 @@ public void testRmEmptyRootDirNonRecursive() throws IOException { public void testRecursiveRootListing() throws IOException { // It doesn't apply because we still have the mount points here } + + @Override + public void testRmRootRecursive() { + // It doesn't apply because we still have the mount points here + } + + @Override + public void testRmEmptyRootDirRecursive() { + // It doesn't apply because we still have the mount points here + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java index 32ec1617d1fec..faa08ba786179 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java @@ -60,4 +60,14 @@ public void testRmEmptyRootDirNonRecursive() throws IOException { public void testRecursiveRootListing() throws IOException { // It doesn't apply because we still have the mount points here } + + @Override + public void testRmRootRecursive() { + // It doesn't apply because we still have the mount points here + } + + @Override + public void testRmEmptyRootDirRecursive() { + // It doesn't apply because we still have the mount points here + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java index 0128b3010d4bb..dd2bbff7d8da2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java @@ -61,4 +61,14 @@ public void testRmEmptyRootDirNonRecursive() throws IOException { public void testRecursiveRootListing() throws IOException { // It doesn't apply because we still have the mount points here } + + @Override + public void testRmRootRecursive() { + // It doesn't apply because we still have the mount points here + } + + @Override + public void testRmEmptyRootDirRecursive() { + // It doesn't apply because we still have the mount points here + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java index 24dfc3fd31392..b745ecdcee5ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.federation.router; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -51,6 +52,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.Time; import org.junit.After; @@ -474,4 +476,86 @@ public void testGetListingWithTrailingSlash() throws IOException { nnFs1.delete(new Path("/testlist/tmp1"), true); } } + + /** + * Regression test for HDFS-13255. + * Verify that delete fails if the path is a mount point or + * there are any mount point under the path. + */ + @Test + public void testDeleteMountPoint() throws Exception { + try { + MountTable addEntry = MountTable.newInstance("/testdelete/subdir", + Collections.singletonMap("ns0", "/testdelete/subdir")); + assertTrue(addMountTable(addEntry)); + nnFs0.mkdirs(new Path("/testdelete/subdir")); + LambdaTestUtils.intercept(AccessControlException.class, + "The operation is not allowed because there are mount points: " + + "subdir under the path: /testdelete", + () -> routerFs.delete(new Path("/testdelete"), true)); + LambdaTestUtils.intercept(AccessControlException.class, + "The operation is not allowed because there are mount points: " + + "subdir under the path: /testdelete", + () -> routerFs.delete(new Path("/testdelete"), false)); + LambdaTestUtils.intercept(AccessControlException.class, + "The operation is not allowed because the path: " + + "/testdelete/subdir is a mount point", + () -> routerFs.delete(new Path("/testdelete/subdir"), true)); + LambdaTestUtils.intercept(AccessControlException.class, + "The operation is not allowed because the path: " + + "/testdelete/subdir is a mount point", + () -> routerFs.delete(new Path("/testdelete/subdir"), false)); + } finally { + nnFs0.delete(new Path("/testdelete"), true); + } + } + + /** + * Regression test for HDFS-13255. + * Verify that rename fails if the src path is a mount point or + * there are any mount point under the path. + */ + @Test + public void testRenameMountPoint() throws Exception { + try { + MountTable addEntry = MountTable.newInstance("/testrename1/sub", + Collections.singletonMap("ns0", "/testrename1/sub")); + assertTrue(addMountTable(addEntry)); + addEntry = MountTable.newInstance("/testrename2/sub", + Collections.singletonMap("ns0", "/testrename2/sub")); + assertTrue(addMountTable(addEntry)); + nnFs0.mkdirs(new Path("/testrename1/sub/sub")); + nnFs0.mkdirs(new Path("/testrename2")); + + // Success: rename a directory to a mount point + assertTrue(nnFs0.exists(new Path("/testrename1/sub/sub"))); + assertFalse(nnFs0.exists(new Path("/testrename2/sub"))); + assertTrue(routerFs.rename(new Path("/testrename1/sub/sub"), + new Path("/testrename2"))); + assertFalse(nnFs0.exists(new Path("/testrename1/sub/sub"))); + assertTrue(nnFs0.exists(new Path("/testrename2/sub"))); + + // Fail: the target already exists + nnFs0.mkdirs(new Path("/testrename1/sub/sub")); + assertFalse(routerFs.rename(new Path("/testrename1/sub/sub"), + new Path("/testrename2"))); + + // Fail: The src is a mount point + LambdaTestUtils.intercept(AccessControlException.class, + "The operation is not allowed because the path: " + + "/testrename1/sub is a mount point", + () -> routerFs.rename(new Path("/testrename1/sub"), + new Path("/testrename2/sub"))); + + // Fail: There is a mount point under the src + LambdaTestUtils.intercept(AccessControlException.class, + "The operation is not allowed because there are mount points: " + + "sub under the path: /testrename1", + () -> routerFs.rename(new Path("/testrename1"), + new Path("/testrename2/sub"))); + } finally { + nnFs0.delete(new Path("/testrename1"), true); + nnFs0.delete(new Path("/testrename2"), true); + } + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java index 81d5023cfc09e..6dc98b85ac60f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java @@ -19,12 +19,14 @@ import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; import java.util.List; @@ -141,29 +143,35 @@ public void testNamespaceQuotaExceed() throws Exception { addMountTable(mountTable2); final FileSystem routerFs = routerContext.getFileSystem(); - GenericTestUtils.waitFor(new Supplier() { - - @Override - public Boolean get() { - boolean isNsQuotaViolated = false; - try { - // create new directory to trigger NSQuotaExceededException - routerFs.mkdirs(new Path("/nsquota/" + UUID.randomUUID())); - routerFs.mkdirs(new Path("/nsquota/subdir/" + UUID.randomUUID())); - } catch (NSQuotaExceededException e) { - isNsQuotaViolated = true; - } catch (IOException ignored) { - } - return isNsQuotaViolated; + final List created = new ArrayList<>(); + GenericTestUtils.waitFor(() -> { + boolean isNsQuotaViolated = false; + try { + // create new directory to trigger NSQuotaExceededException + Path p = new Path("/nsquota/" + UUID.randomUUID()); + routerFs.mkdirs(p); + created.add(p); + p = new Path("/nsquota/subdir/" + UUID.randomUUID()); + routerFs.mkdirs(p); + created.add(p); + } catch (NSQuotaExceededException e) { + isNsQuotaViolated = true; + } catch (IOException ignored) { } + return isNsQuotaViolated; }, 5000, 60000); + // mkdir in real FileSystem should be okay nnFs1.mkdirs(new Path("/testdir1/" + UUID.randomUUID())); nnFs2.mkdirs(new Path("/testdir2/" + UUID.randomUUID())); - // delete/rename call should be still okay - routerFs.delete(new Path("/nsquota"), true); - routerFs.rename(new Path("/nsquota/subdir"), new Path("/nsquota/subdir")); + // rename/delete call should be still okay + assertFalse(created.isEmpty()); + for(Path src: created) { + final Path dst = new Path(src.toString()+"-renamed"); + routerFs.rename(src, dst); + routerFs.delete(dst, true); + } } @Test @@ -376,7 +384,7 @@ public void testStaleQuotaRemoving() throws Exception { /** * Remove a mount table entry to the mount table through the admin API. - * @param entry Mount table entry to remove. + * @param path Mount table entry to remove. * @return If it was successfully removed. * @throws IOException Problems removing entries. */ @@ -677,8 +685,8 @@ public void testQuotaRefreshWhenDestinationNotPresent() throws Exception { assertEquals(BLOCK_SIZE, mountQuota2.getSpaceConsumed()); FileSystem routerFs = routerContext.getFileSystem(); - // Remove destination directory for the mount entry - routerFs.delete(new Path("/setdir1"), true); + // Remove file in setdir1. The target directory still exists. + routerFs.delete(new Path("/setdir1/file1"), true); // Create file routerClient.create("/setdir2/file3", true).close(); @@ -699,9 +707,9 @@ public void testQuotaRefreshWhenDestinationNotPresent() throws Exception { updatedMountTable = getMountTable("/setdir2"); mountQuota2 = updatedMountTable.getQuota(); - // If destination is not present the quota usage should be reset to 0 - assertEquals(0, cacheQuota1.getFileAndDirectoryCount()); - assertEquals(0, mountQuota1.getFileAndDirectoryCount()); + // The quota usage should be reset. + assertEquals(1, cacheQuota1.getFileAndDirectoryCount()); + assertEquals(1, mountQuota1.getFileAndDirectoryCount()); assertEquals(0, cacheQuota1.getSpaceConsumed()); assertEquals(0, mountQuota1.getSpaceConsumed()); From 9b197c289384d2cd3879f9a464b35ae80aecdf39 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Wed, 29 May 2019 08:31:47 +0530 Subject: [PATCH 0328/1308] HDFS-13909. RBF: Add Cache pools and directives related ClientProtocol APIs. Contributed by Ayush Saxena. --- .../federation/router/RemoteMethod.java | 13 +- .../federation/router/RouterCacheAdmin.java | 173 ++++++++++++++++++ .../router/RouterClientProtocol.java | 26 +-- .../federation/router/TestRouterRpc.java | 62 +++++++ 4 files changed, 259 insertions(+), 15 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterCacheAdmin.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java index f7ba8123d5f77..6f1121ef9fd66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java @@ -21,6 +21,8 @@ import java.lang.reflect.Method; import java.util.Arrays; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -198,9 +200,16 @@ public Object[] getParams(RemoteLocationContext context) { for (int i = 0; i < this.params.length; i++) { Object currentObj = this.params[i]; if (currentObj instanceof RemoteParam) { - // Map the parameter using the context RemoteParam paramGetter = (RemoteParam) currentObj; - objList[i] = paramGetter.getParameterForContext(context); + // Map the parameter using the context + if (this.types[i] == CacheDirectiveInfo.class) { + CacheDirectiveInfo path = + (CacheDirectiveInfo) paramGetter.getParameterForContext(context); + objList[i] = new CacheDirectiveInfo.Builder(path) + .setPath(new Path(context.getDest())).build(); + } else { + objList[i] = paramGetter.getParameterForContext(context); + } } else { objList[i] = currentObj; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterCacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterCacheAdmin.java new file mode 100644 index 0000000000000..e25d8b269dfb9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterCacheAdmin.java @@ -0,0 +1,173 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.fs.CacheFlag; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.namenode.NameNode; + +/** + * Module that implements all the RPC calls in + * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol} related to Cache Admin + * in the {@link RouterRpcServer}. + */ +public class RouterCacheAdmin { + + /** RPC server to receive client calls. */ + private final RouterRpcServer rpcServer; + /** RPC clients to connect to the Namenodes. */ + private final RouterRpcClient rpcClient; + /** Interface to identify the active NN for a nameservice or blockpool ID. */ + private final ActiveNamenodeResolver namenodeResolver; + + public RouterCacheAdmin(RouterRpcServer server) { + this.rpcServer = server; + this.rpcClient = this.rpcServer.getRPCClient(); + this.namenodeResolver = this.rpcClient.getNamenodeResolver(); + } + + public long addCacheDirective(CacheDirectiveInfo path, + EnumSet flags) throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true); + final List locations = + rpcServer.getLocationsForPath(path.getPath().toString(), true, false); + RemoteMethod method = new RemoteMethod("addCacheDirective", + new Class[] {CacheDirectiveInfo.class, EnumSet.class}, + new RemoteParam(getRemoteMap(path, locations)), flags); + Map response = + rpcClient.invokeConcurrent(locations, method, false, false, long.class); + return response.values().iterator().next(); + } + + public void modifyCacheDirective(CacheDirectiveInfo directive, + EnumSet flags) throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true); + Path p = directive.getPath(); + if (p != null) { + final List locations = rpcServer + .getLocationsForPath(directive.getPath().toString(), true, false); + RemoteMethod method = new RemoteMethod("modifyCacheDirective", + new Class[] {CacheDirectiveInfo.class, EnumSet.class}, + new RemoteParam(getRemoteMap(directive, locations)), flags); + rpcClient.invokeConcurrent(locations, method); + return; + } + RemoteMethod method = new RemoteMethod("modifyCacheDirective", + new Class[] {CacheDirectiveInfo.class, EnumSet.class}, directive, + flags); + Set nss = namenodeResolver.getNamespaces(); + rpcClient.invokeConcurrent(nss, method, false, false); + } + + public void removeCacheDirective(long id) throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true); + RemoteMethod method = new RemoteMethod("removeCacheDirective", + new Class[] {long.class}, id); + Set nss = namenodeResolver.getNamespaces(); + rpcClient.invokeConcurrent(nss, method, false, false); + } + + public BatchedEntries listCacheDirectives(long prevId, + CacheDirectiveInfo filter) throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.READ, true); + if (filter.getPath() != null) { + final List locations = rpcServer + .getLocationsForPath(filter.getPath().toString(), true, false); + RemoteMethod method = new RemoteMethod("listCacheDirectives", + new Class[] {long.class, CacheDirectiveInfo.class}, prevId, + new RemoteParam(getRemoteMap(filter, locations))); + Map response = rpcClient.invokeConcurrent( + locations, method, false, false, BatchedEntries.class); + return response.values().iterator().next(); + } + RemoteMethod method = new RemoteMethod("listCacheDirectives", + new Class[] {long.class, CacheDirectiveInfo.class}, prevId, + filter); + Set nss = namenodeResolver.getNamespaces(); + Map results = rpcClient + .invokeConcurrent(nss, method, true, false, BatchedEntries.class); + return results.values().iterator().next(); + } + + public void addCachePool(CachePoolInfo info) throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true); + RemoteMethod method = new RemoteMethod("addCachePool", + new Class[] {CachePoolInfo.class}, info); + Set nss = namenodeResolver.getNamespaces(); + rpcClient.invokeConcurrent(nss, method, true, false); + } + + public void modifyCachePool(CachePoolInfo info) throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true); + RemoteMethod method = new RemoteMethod("modifyCachePool", + new Class[] {CachePoolInfo.class}, info); + Set nss = namenodeResolver.getNamespaces(); + rpcClient.invokeConcurrent(nss, method, true, false); + } + + public void removeCachePool(String cachePoolName) throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true); + RemoteMethod method = new RemoteMethod("removeCachePool", + new Class[] {String.class}, cachePoolName); + Set nss = namenodeResolver.getNamespaces(); + rpcClient.invokeConcurrent(nss, method, true, false); + } + + public BatchedEntries listCachePools(String prevKey) + throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.READ, true); + RemoteMethod method = new RemoteMethod("listCachePools", + new Class[] {String.class}, prevKey); + Set nss = namenodeResolver.getNamespaces(); + Map results = rpcClient + .invokeConcurrent(nss, method, true, false, BatchedEntries.class); + return results.values().iterator().next(); + } + + /** + * Returns a map with the CacheDirectiveInfo mapped to each location. + * @param path CacheDirectiveInfo to be mapped to the locations. + * @param locations the locations to map. + * @return map with CacheDirectiveInfo mapped to the locations. + */ + private Map getRemoteMap( + CacheDirectiveInfo path, final List locations) { + final Map dstMap = new HashMap<>(); + Iterator iterator = locations.iterator(); + while (iterator.hasNext()) { + dstMap.put(iterator.next(), path); + } + return dstMap; + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 550f5e7e90bc3..66718fb12e15f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -132,6 +132,8 @@ public class RouterClientProtocol implements ClientProtocol { private final String superGroup; /** Erasure coding calls. */ private final ErasureCoding erasureCoding; + /** Cache Admin calls. */ + private final RouterCacheAdmin routerCacheAdmin; /** StoragePolicy calls. **/ private final RouterStoragePolicy storagePolicy; /** Router security manager to handle token operations. */ @@ -164,6 +166,7 @@ public class RouterClientProtocol implements ClientProtocol { DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT); this.erasureCoding = new ErasureCoding(rpcServer); this.storagePolicy = new RouterStoragePolicy(rpcServer); + this.routerCacheAdmin = new RouterCacheAdmin(rpcServer); this.securityManager = rpcServer.getRouterSecurityManager(); } @@ -1259,48 +1262,45 @@ public SnapshotDiffReportListing getSnapshotDiffReportListing( @Override public long addCacheDirective(CacheDirectiveInfo path, EnumSet flags) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); - return 0; + return routerCacheAdmin.addCacheDirective(path, flags); } @Override public void modifyCacheDirective(CacheDirectiveInfo directive, EnumSet flags) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); + routerCacheAdmin.modifyCacheDirective(directive, flags); } @Override public void removeCacheDirective(long id) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); + routerCacheAdmin.removeCacheDirective(id); } @Override - public BatchedEntries listCacheDirectives( - long prevId, CacheDirectiveInfo filter) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.READ, false); - return null; + public BatchedEntries listCacheDirectives(long prevId, + CacheDirectiveInfo filter) throws IOException { + return routerCacheAdmin.listCacheDirectives(prevId, filter); } @Override public void addCachePool(CachePoolInfo info) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); + routerCacheAdmin.addCachePool(info); } @Override public void modifyCachePool(CachePoolInfo info) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); + routerCacheAdmin.modifyCachePool(info); } @Override public void removeCachePool(String cachePoolName) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); + routerCacheAdmin.removeCachePool(cachePoolName); } @Override public BatchedEntries listCachePools(String prevKey) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.READ, false); - return null; + return routerCacheAdmin.listCachePools(prevKey); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index d9430767753bb..2f7eb6e917895 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -61,13 +61,18 @@ import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -1439,6 +1444,63 @@ public Boolean get() { cluster.waitNamenodeRegistration(); } + @Test + public void testCacheAdmin() throws Exception { + DistributedFileSystem routerDFS = (DistributedFileSystem) routerFS; + // Verify cache directive commands. + CachePoolInfo cpInfo = new CachePoolInfo("Check"); + cpInfo.setOwnerName("Owner"); + + // Add a cache pool. + routerProtocol.addCachePool(cpInfo); + RemoteIterator iter = routerDFS.listCachePools(); + assertTrue(iter.hasNext()); + + // Modify a cache pool. + CachePoolInfo info = iter.next().getInfo(); + assertEquals("Owner", info.getOwnerName()); + cpInfo.setOwnerName("new Owner"); + routerProtocol.modifyCachePool(cpInfo); + iter = routerDFS.listCachePools(); + assertTrue(iter.hasNext()); + info = iter.next().getInfo(); + assertEquals("new Owner", info.getOwnerName()); + + // Remove a cache pool. + routerProtocol.removeCachePool("Check"); + iter = routerDFS.listCachePools(); + assertFalse(iter.hasNext()); + + // Verify cache directive commands. + cpInfo.setOwnerName("Owner"); + routerProtocol.addCachePool(cpInfo); + routerDFS.mkdirs(new Path("/ns1/dir")); + + // Add a cache directive. + CacheDirectiveInfo cacheDir = new CacheDirectiveInfo.Builder() + .setPath(new Path("/ns1/dir")) + .setReplication((short) 1) + .setPool("Check") + .build(); + long id = routerDFS.addCacheDirective(cacheDir); + CacheDirectiveInfo filter = + new CacheDirectiveInfo.Builder().setPath(new Path("/ns1/dir")).build(); + assertTrue(routerDFS.listCacheDirectives(filter).hasNext()); + + // List cache directive. + assertEquals("Check", + routerDFS.listCacheDirectives(filter).next().getInfo().getPool()); + cacheDir = new CacheDirectiveInfo.Builder().setReplication((short) 2) + .setId(id).setPath(new Path("/ns1/dir")).build(); + + // Modify cache directive. + routerDFS.modifyCacheDirective(cacheDir); + assertEquals((short) 2, (short) routerDFS.listCacheDirectives(filter).next() + .getInfo().getReplication()); + routerDFS.removeCacheDirective(id); + assertFalse(routerDFS.listCacheDirectives(filter).hasNext()); + } + /** * Check the erasure coding policies in the Router and the Namenode. * @return The erasure coding policies. From d240eec1364f2e11286dd00d10f35bcbbca19ec4 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Wed, 29 May 2019 23:31:21 +0530 Subject: [PATCH 0329/1308] HDFS-14516. RBF: Create hdfs-rbf-site.xml for RBF specific properties. Contributed by Takanobu Asanuma. --- .../src/main/conf/hdfs-rbf-site.xml | 20 +++++++++++++++++++ .../federation/router/RBFConfigKeys.java | 7 +++++++ .../src/main/resources/hdfs-rbf-default.xml | 4 ++-- .../src/site/markdown/HDFSRouterFederation.md | 2 +- 4 files changed, 30 insertions(+), 3 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/conf/hdfs-rbf-site.xml diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/conf/hdfs-rbf-site.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/conf/hdfs-rbf-site.xml new file mode 100644 index 0000000000000..094ce6675ce60 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/conf/hdfs-rbf-site.xml @@ -0,0 +1,20 @@ + + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java index 153cd6414051d..6f9c888ec1773 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.federation.router; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformanceMonitor; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; @@ -39,6 +40,12 @@ @InterfaceAudience.Private public class RBFConfigKeys extends CommonConfigurationKeysPublic { + public static final String HDFS_RBF_SITE_XML = "hdfs-rbf-site.xml"; + + static { + Configuration.addDefaultResource(HDFS_RBF_SITE_XML); + } + // HDFS Router-based federation public static final String FEDERATION_ROUTER_PREFIX = "dfs.federation.router."; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml index e23f863e8a633..641d273ce4aa5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml @@ -19,8 +19,8 @@ --> - - + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index d9ae5af96c646..d4f10a15638d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -349,7 +349,7 @@ This federated namespace can also be set as the default one at **core-site.xml** Router configuration -------------------- -One can add the configurations for Router-based federation to **hdfs-site.xml**. +One can add the configurations for Router-based federation to **hdfs-rbf-site.xml**. The main options are documented in [hdfs-rbf-default.xml](../hadoop-hdfs-rbf/hdfs-rbf-default.xml). The configuration values are described in this section. From b6fff8c81e3d2245c93aa2369b42eedab0252ab9 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Thu, 30 May 2019 19:58:19 +0530 Subject: [PATCH 0330/1308] HDFS-13787. RBF: Add Snapshot related ClientProtocol APIs. Contributed by Inigo Goiri. --- .../router/RouterClientProtocol.java | 30 +-- .../federation/router/RouterRpcClient.java | 9 + .../federation/router/RouterRpcServer.java | 16 +- .../federation/router/RouterSnapshot.java | 208 ++++++++++++++++++ .../federation/router/TestRouterRpc.java | 130 +++++++++++ 5 files changed, 372 insertions(+), 21 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 66718fb12e15f..9d336085edd3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -136,6 +136,8 @@ public class RouterClientProtocol implements ClientProtocol { private final RouterCacheAdmin routerCacheAdmin; /** StoragePolicy calls. **/ private final RouterStoragePolicy storagePolicy; + /** Snapshot calls. */ + private final RouterSnapshot snapshotProto; /** Router security manager to handle token operations. */ private RouterSecurityManager securityManager = null; @@ -166,6 +168,7 @@ public class RouterClientProtocol implements ClientProtocol { DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT); this.erasureCoding = new ErasureCoding(rpcServer); this.storagePolicy = new RouterStoragePolicy(rpcServer); + this.snapshotProto = new RouterSnapshot(rpcServer); this.routerCacheAdmin = new RouterCacheAdmin(rpcServer); this.securityManager = rpcServer.getRouterSecurityManager(); } @@ -1221,42 +1224,42 @@ public String getLinkTarget(String path) throws IOException { return rpcClient.invokeSequential(locations, method, String.class, null); } - @Override // Client Protocol + @Override public void allowSnapshot(String snapshotRoot) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); + snapshotProto.allowSnapshot(snapshotRoot); } - @Override // Client Protocol + @Override public void disallowSnapshot(String snapshot) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); + snapshotProto.disallowSnapshot(snapshot); } @Override public void renameSnapshot(String snapshotRoot, String snapshotOldName, String snapshotNewName) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); + snapshotProto.renameSnapshot( + snapshotRoot, snapshotOldName, snapshotNewName); } @Override public SnapshottableDirectoryStatus[] getSnapshottableDirListing() throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.READ, false); - return null; + return snapshotProto.getSnapshottableDirListing(); } @Override public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, String earlierSnapshotName, String laterSnapshotName) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.READ, false); - return null; + return snapshotProto.getSnapshotDiffReport( + snapshotRoot, earlierSnapshotName, laterSnapshotName); } @Override public SnapshotDiffReportListing getSnapshotDiffReportListing( String snapshotRoot, String earlierSnapshotName, String laterSnapshotName, byte[] startPath, int index) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.READ, false); - return null; + return snapshotProto.getSnapshotDiffReportListing( + snapshotRoot, earlierSnapshotName, laterSnapshotName, startPath, index); } @Override @@ -1558,14 +1561,13 @@ public DataEncryptionKey getDataEncryptionKey() throws IOException { @Override public String createSnapshot(String snapshotRoot, String snapshotName) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE); - return null; + return snapshotProto.createSnapshot(snapshotRoot, snapshotName); } @Override public void deleteSnapshot(String snapshotRoot, String snapshotName) throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); + snapshotProto.deleteSnapshot(snapshotRoot, snapshotName); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index 19aa13ac7e4b9..03704381acacb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; @@ -882,6 +883,14 @@ private IOException processException( return newException; } + if (ioe instanceof SnapshotException) { + String newMsg = processExceptionMsg( + ioe.getMessage(), loc.getDest(), loc.getSrc()); + SnapshotException newException = new SnapshotException(newMsg); + newException.setStackTrace(ioe.getStackTrace()); + return newException; + } + return ioe; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 32f3e6676986e..6facd7ef2b147 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -1007,12 +1007,12 @@ public String getLinkTarget(String path) throws IOException { return clientProto.getLinkTarget(path); } - @Override // Client Protocol + @Override // ClientProtocol public void allowSnapshot(String snapshotRoot) throws IOException { clientProto.allowSnapshot(snapshotRoot); } - @Override // Client Protocol + @Override // ClientProtocol public void disallowSnapshot(String snapshot) throws IOException { clientProto.disallowSnapshot(snapshot); } @@ -1023,7 +1023,7 @@ public void renameSnapshot(String snapshotRoot, String snapshotOldName, clientProto.renameSnapshot(snapshotRoot, snapshotOldName, snapshotNewName); } - @Override // Client Protocol + @Override // ClientProtocol public SnapshottableDirectoryStatus[] getSnapshottableDirListing() throws IOException { return clientProto.getSnapshottableDirListing(); @@ -1584,14 +1584,16 @@ static void resetCurrentUser() { * @param clazz Class of the values. * @return Array with the outputs. */ - protected static T[] merge( + static T[] merge( Map map, Class clazz) { // Put all results into a set to avoid repeats Set ret = new LinkedHashSet<>(); for (T[] values : map.values()) { - for (T val : values) { - ret.add(val); + if (values != null) { + for (T val : values) { + ret.add(val); + } } } @@ -1605,7 +1607,7 @@ protected static T[] merge( * @param clazz Class of the values. * @return Array with the values in set. */ - private static T[] toArray(Collection set, Class clazz) { + static T[] toArray(Collection set, Class clazz) { @SuppressWarnings("unchecked") T[] combinedData = (T[]) Array.newInstance(clazz, set.size()); combinedData = set.toArray(combinedData); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java new file mode 100644 index 0000000000000..7b08092d6431a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java @@ -0,0 +1,208 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; + +/** + * Module that implements all the RPC calls related to snapshots in + * {@link ClientProtocol} in the {@link RouterRpcServer}. + */ +public class RouterSnapshot { + + /** RPC server to receive client calls. */ + private final RouterRpcServer rpcServer; + /** RPC clients to connect to the Namenodes. */ + private final RouterRpcClient rpcClient; + /** Find generic locations. */ + private final ActiveNamenodeResolver namenodeResolver; + + public RouterSnapshot(RouterRpcServer server) { + this.rpcServer = server; + this.rpcClient = this.rpcServer.getRPCClient(); + this.namenodeResolver = rpcServer.getNamenodeResolver(); + } + + public void allowSnapshot(String snapshotRoot) throws IOException { + rpcServer.checkOperation(OperationCategory.WRITE); + + final List locations = + rpcServer.getLocationsForPath(snapshotRoot, true, false); + RemoteMethod method = new RemoteMethod("allowSnapshot", + new Class[] {String.class}, new RemoteParam()); + + if (rpcServer.isInvokeConcurrent(snapshotRoot)) { + rpcClient.invokeConcurrent(locations, method); + } else { + rpcClient.invokeSequential(locations, method); + } + } + + public void disallowSnapshot(String snapshotRoot) throws IOException { + rpcServer.checkOperation(OperationCategory.WRITE); + + final List locations = + rpcServer.getLocationsForPath(snapshotRoot, true, false); + RemoteMethod method = new RemoteMethod("disallowSnapshot", + new Class[] {String.class}, new RemoteParam()); + if (rpcServer.isInvokeConcurrent(snapshotRoot)) { + rpcClient.invokeConcurrent(locations, method); + } else { + rpcClient.invokeSequential(locations, method); + } + } + + public String createSnapshot(String snapshotRoot, String snapshotName) + throws IOException { + rpcServer.checkOperation(OperationCategory.WRITE); + + final List locations = + rpcServer.getLocationsForPath(snapshotRoot, true, false); + RemoteMethod method = new RemoteMethod("createSnapshot", + new Class[] {String.class, String.class}, new RemoteParam(), + snapshotName); + + String result = null; + if (rpcServer.isInvokeConcurrent(snapshotRoot)) { + Map results = rpcClient.invokeConcurrent( + locations, method, String.class); + Entry firstelement = + results.entrySet().iterator().next(); + RemoteLocation loc = firstelement.getKey(); + result = firstelement.getValue(); + result = result.replaceFirst(loc.getDest(), loc.getSrc()); + } else { + result = rpcClient.invokeSequential( + locations, method, String.class, null); + RemoteLocation loc = locations.get(0); + result = result.replaceFirst(loc.getDest(), loc.getSrc()); + } + return result; + } + + public void deleteSnapshot(String snapshotRoot, String snapshotName) + throws IOException { + rpcServer.checkOperation(OperationCategory.WRITE); + + final List locations = + rpcServer.getLocationsForPath(snapshotRoot, true, false); + RemoteMethod method = new RemoteMethod("deleteSnapshot", + new Class[] {String.class, String.class}, + new RemoteParam(), snapshotName); + + if (rpcServer.isInvokeConcurrent(snapshotRoot)) { + rpcClient.invokeConcurrent(locations, method); + } else { + rpcClient.invokeSequential(locations, method); + } + } + + public void renameSnapshot(String snapshotRoot, String oldSnapshotName, + String newSnapshot) throws IOException { + rpcServer.checkOperation(OperationCategory.WRITE); + + final List locations = + rpcServer.getLocationsForPath(snapshotRoot, true, false); + RemoteMethod method = new RemoteMethod("renameSnapshot", + new Class[] {String.class, String.class, String.class}, + new RemoteParam(), oldSnapshotName, newSnapshot); + + if (rpcServer.isInvokeConcurrent(snapshotRoot)) { + rpcClient.invokeConcurrent(locations, method); + } else { + rpcClient.invokeSequential(locations, method); + } + } + + public SnapshottableDirectoryStatus[] getSnapshottableDirListing() + throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.READ); + + RemoteMethod method = new RemoteMethod("getSnapshottableDirListing"); + Set nss = namenodeResolver.getNamespaces(); + Map ret = + rpcClient.invokeConcurrent( + nss, method, true, false, SnapshottableDirectoryStatus[].class); + + return RouterRpcServer.merge(ret, SnapshottableDirectoryStatus.class); + } + + public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, + String earlierSnapshotName, String laterSnapshotName) + throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.READ); + + final List locations = + rpcServer.getLocationsForPath(snapshotRoot, true, false); + RemoteMethod remoteMethod = new RemoteMethod("getSnapshotDiffReport", + new Class[] {String.class, String.class, String.class}, + new RemoteParam(), earlierSnapshotName, laterSnapshotName); + + if (rpcServer.isInvokeConcurrent(snapshotRoot)) { + Map ret = rpcClient.invokeConcurrent( + locations, remoteMethod, true, false, SnapshotDiffReport.class); + return ret.values().iterator().next(); + } else { + return rpcClient.invokeSequential( + locations, remoteMethod, SnapshotDiffReport.class, null); + } + } + + public SnapshotDiffReportListing getSnapshotDiffReportListing( + String snapshotRoot, String earlierSnapshotName, String laterSnapshotName, + byte[] startPath, int index) throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.READ); + + final List locations = + rpcServer.getLocationsForPath(snapshotRoot, true, false); + Class[] params = new Class[] { + String.class, String.class, String.class, + byte[].class, int.class}; + RemoteMethod remoteMethod = new RemoteMethod( + "getSnapshotDiffReportListing", params, + new RemoteParam(), earlierSnapshotName, laterSnapshotName, + startPath, index); + + if (rpcServer.isInvokeConcurrent(snapshotRoot)) { + Map ret = + rpcClient.invokeConcurrent(locations, remoteMethod, false, false, + SnapshotDiffReportListing.class); + Collection listings = ret.values(); + SnapshotDiffReportListing listing0 = listings.iterator().next(); + return listing0; + } else { + return rpcClient.invokeSequential( + locations, remoteMethod, SnapshotDiffReportListing.class, null); + } + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index 2f7eb6e917895..e656e7aecc790 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -86,6 +86,10 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; +import org.apache.hadoop.hdfs.protocol.SnapshotException; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext; @@ -94,6 +98,11 @@ import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; import org.apache.hadoop.hdfs.server.federation.metrics.NamenodeBeanMetrics; import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; +import org.apache.hadoop.hdfs.server.namenode.FSDirectory; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; @@ -105,6 +114,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.codehaus.jettison.json.JSONObject; import org.junit.AfterClass; import org.junit.Before; @@ -743,6 +753,126 @@ public void testProxyTruncateFile() throws Exception { new Object[] {badPath, (long) 0, "testclient"}); } + @Test + public void testAllowDisallowSnapshots() throws Exception { + + // Create a directory via the router at the root level + String dirPath = "/testdir"; + String filePath1 = "/sample"; + FsPermission permission = new FsPermission("705"); + routerProtocol.mkdirs(dirPath, permission, false); + createFile(routerFS, filePath1, 32); + + // Check that initially doesn't allow snapshots + NamenodeContext nnContext = cluster.getNamenodes().get(0); + NameNode nn = nnContext.getNamenode(); + FSNamesystem fsn = NameNodeAdapter.getNamesystem(nn); + FSDirectory fsdir = fsn.getFSDirectory(); + INodeDirectory dirNode = fsdir.getINode4Write(dirPath).asDirectory(); + assertFalse(dirNode.isSnapshottable()); + + // Allow snapshots and verify the folder allows them + routerProtocol.allowSnapshot("/testdir"); + dirNode = fsdir.getINode4Write(dirPath).asDirectory(); + assertTrue(dirNode.isSnapshottable()); + + // Disallow snapshot on dir and verify does not allow snapshots anymore + routerProtocol.disallowSnapshot("/testdir"); + dirNode = fsdir.getINode4Write(dirPath).asDirectory(); + assertFalse(dirNode.isSnapshottable()); + + // Cleanup + routerProtocol.delete(dirPath, true); + } + + @Test + public void testManageSnapshot() throws Exception { + + final String mountPoint = "/mntsnapshot"; + final String snapshotFolder = mountPoint + "/folder"; + LOG.info("Setup a mount point for snapshots: {}", mountPoint); + Router r = router.getRouter(); + MockResolver resolver = (MockResolver) r.getSubclusterResolver(); + String ns0 = cluster.getNameservices().get(0); + resolver.addLocation(mountPoint, ns0, "/"); + + FsPermission permission = new FsPermission("777"); + routerProtocol.mkdirs(mountPoint, permission, false); + routerProtocol.mkdirs(snapshotFolder, permission, false); + for (int i = 1; i <= 9; i++) { + String folderPath = snapshotFolder + "/subfolder" + i; + routerProtocol.mkdirs(folderPath, permission, false); + } + + LOG.info("Create the snapshot: {}", snapshotFolder); + routerProtocol.allowSnapshot(snapshotFolder); + String snapshotName = routerProtocol.createSnapshot( + snapshotFolder, "snap"); + assertEquals(snapshotFolder + "/.snapshot/snap", snapshotName); + assertTrue(verifyFileExists(routerFS, snapshotFolder + "/.snapshot/snap")); + + LOG.info("Rename the snapshot and check it changed"); + routerProtocol.renameSnapshot(snapshotFolder, "snap", "newsnap"); + assertFalse( + verifyFileExists(routerFS, snapshotFolder + "/.snapshot/snap")); + assertTrue( + verifyFileExists(routerFS, snapshotFolder + "/.snapshot/newsnap")); + LambdaTestUtils.intercept(SnapshotException.class, + "Cannot delete snapshot snap from path " + snapshotFolder + ":", + () -> routerFS.deleteSnapshot(new Path(snapshotFolder), "snap")); + + LOG.info("Delete the snapshot and check it is not there"); + routerProtocol.deleteSnapshot(snapshotFolder, "newsnap"); + assertFalse( + verifyFileExists(routerFS, snapshotFolder + "/.snapshot/newsnap")); + + // Cleanup + routerProtocol.delete(mountPoint, true); + } + + @Test + public void testGetSnapshotListing() throws IOException { + + // Create a directory via the router and allow snapshots + final String snapshotPath = "/testGetSnapshotListing"; + final String childDir = snapshotPath + "/subdir"; + FsPermission permission = new FsPermission("705"); + routerProtocol.mkdirs(snapshotPath, permission, false); + routerProtocol.allowSnapshot(snapshotPath); + + // Create two snapshots + final String snapshot1 = "snap1"; + final String snapshot2 = "snap2"; + routerProtocol.createSnapshot(snapshotPath, snapshot1); + routerProtocol.mkdirs(childDir, permission, false); + routerProtocol.createSnapshot(snapshotPath, snapshot2); + + // Check for listing through the Router + SnapshottableDirectoryStatus[] dirList = + routerProtocol.getSnapshottableDirListing(); + assertEquals(1, dirList.length); + SnapshottableDirectoryStatus snapshotDir0 = dirList[0]; + assertEquals(snapshotPath, snapshotDir0.getFullPath().toString()); + + // Check for difference report in two snapshot + SnapshotDiffReport diffReport = routerProtocol.getSnapshotDiffReport( + snapshotPath, snapshot1, snapshot2); + assertEquals(2, diffReport.getDiffList().size()); + + // Check for difference in two snapshot + byte[] startPath = {}; + SnapshotDiffReportListing diffReportListing = + routerProtocol.getSnapshotDiffReportListing( + snapshotPath, snapshot1, snapshot2, startPath, -1); + assertEquals(1, diffReportListing.getModifyList().size()); + assertEquals(1, diffReportListing.getCreateList().size()); + + // Cleanup + routerProtocol.deleteSnapshot(snapshotPath, snapshot1); + routerProtocol.deleteSnapshot(snapshotPath, snapshot2); + routerProtocol.disallowSnapshot(snapshotPath); + } + @Test public void testProxyGetBlockLocations() throws Exception { From b062dd462d11a44c881a65ee648f46c924ffd5d9 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 30 May 2019 16:29:18 -0700 Subject: [PATCH 0331/1308] HDFS-14475. RBF: Expose router security enabled status on the UI. Contributed by CR Hota. --- .../hdfs/server/federation/metrics/FederationMBean.java | 7 ++++++- .../hdfs/server/federation/metrics/FederationMetrics.java | 5 +++++ .../src/main/webapps/router/federationhealth.html | 2 +- .../server/federation/metrics/TestFederationMetrics.java | 2 ++ 4 files changed, 14 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index e33a77e039d70..53b27038d4d4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -240,7 +240,6 @@ public interface FederationMBean { /** * Get the current state of the router. - * * @return String label for the current router state. */ String getRouterStatus(); @@ -250,4 +249,10 @@ public interface FederationMBean { * @return number of DTs */ long getCurrentTokensCount(); + + /** + * Get the security status of the router. + * @return Security status. + */ + boolean isSecurityEnabled(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java index a196098b9d462..c219a43ab6533 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java @@ -77,6 +77,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.RouterState; import org.apache.hadoop.hdfs.server.federation.store.records.StateStoreVersion; import org.apache.hadoop.metrics2.util.MBeans; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; import org.codehaus.jettison.json.JSONObject; @@ -615,6 +616,10 @@ public long getCurrentTokensCount() { return -1; } + public boolean isSecurityEnabled() { + return UserGroupInformation.isSecurityEnabled(); + } + /** * Build a set of unique values found in all namespaces. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html index cf8653bc8f728..99eb6ecc390fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html @@ -90,7 +90,7 @@

    {#federation}

    - Security is {#routerstat}{#SecurityEnabled}on{:else}off{/SecurityEnabled}{/routerstat}.

    + Security is {#federation}{#SecurityEnabled}on{:else}off{/SecurityEnabled}{/federation}.

    {#router}{#Safemode}{.}{:else}Safemode is off.{/Safemode}{/router}

    diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java index 2c147ebf1d1ab..8758aa34cbf0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; import java.io.IOException; import java.util.Iterator; @@ -318,5 +319,6 @@ private void validateClusterStatsBean(FederationMBean bean) assertTrue(bean.getCompileInfo().length() > 0); assertTrue(bean.getRouterStarted().length() > 0); assertTrue(bean.getHostAndPort().length() > 0); + assertFalse(bean.isSecurityEnabled()); } } From ddbe08db3323431693ceb91ca00bc138be7577b0 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Fri, 31 May 2019 17:35:28 +0530 Subject: [PATCH 0332/1308] HDFS-13955. RBF: Support secure Namenode in NamenodeHeartbeatService. Contributed by CR Hota. --- .../federation/router/FederationUtil.java | 14 ++++-- .../router/NamenodeHeartbeatService.java | 27 ++++++++-- .../router/TestRouterNamenodeMonitoring.java | 50 +++++++++++++++++++ 3 files changed, 85 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java index f0d9168a36162..45868a8222328 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java @@ -31,6 +31,8 @@ import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.VersionInfo; import org.codehaus.jettison.json.JSONArray; import org.codehaus.jettison.json.JSONException; @@ -55,9 +57,12 @@ private FederationUtil() { * * @param beanQuery JMX bean. * @param webAddress Web address of the JMX endpoint. + * @param connectionFactory to open http/https connection. + * @param scheme to use for URL connection. * @return JSON with the JMX data */ - public static JSONArray getJmx(String beanQuery, String webAddress) { + public static JSONArray getJmx(String beanQuery, String webAddress, + URLConnectionFactory connectionFactory, String scheme) { JSONArray ret = null; BufferedReader reader = null; try { @@ -68,8 +73,11 @@ public static JSONArray getJmx(String beanQuery, String webAddress) { host = webAddressSplit[0]; port = Integer.parseInt(webAddressSplit[1]); } - URL jmxURL = new URL("http", host, port, "/jmx?qry=" + beanQuery); - URLConnection conn = jmxURL.openConnection(); + URL jmxURL = new URL(scheme, host, port, "/jmx?qry=" + beanQuery); + LOG.debug("JMX URL: {}", jmxURL); + // Create a URL connection + URLConnection conn = connectionFactory.openConnection( + jmxURL, UserGroupInformation.isSecurityEnabled()); conn.setConnectTimeout(5 * 1000); conn.setReadTimeout(5 * 1000); InputStream in = conn.getInputStream(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java index 82b5609a8667f..d50a5fcdf4ffc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.tools.DFSHAAdmin; import org.apache.hadoop.hdfs.tools.NNHAServiceTarget; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.codehaus.jettison.json.JSONArray; import org.codehaus.jettison.json.JSONObject; import org.slf4j.Logger; @@ -86,7 +87,10 @@ public class NamenodeHeartbeatService extends PeriodicService { private String lifelineAddress; /** HTTP address for the namenode. */ private String webAddress; - + /** Connection factory for JMX calls. */ + private URLConnectionFactory connectionFactory; + /** URL scheme to use for JMX calls. */ + private String scheme; /** * Create a new Namenode status updater. * @param resolver Namenode resolver service to handle NN registration. @@ -147,6 +151,12 @@ protected void serviceInit(Configuration configuration) throws Exception { DFSUtil.getNamenodeWebAddr(conf, nameserviceId, namenodeId); LOG.info("{} Web address: {}", nnDesc, webAddress); + this.connectionFactory = + URLConnectionFactory.newDefaultURLConnectionFactory(conf); + + this.scheme = + DFSUtil.getHttpPolicy(conf).isHttpEnabled() ? "http" : "https"; + this.setIntervalMs(conf.getLong( DFS_ROUTER_HEARTBEAT_INTERVAL_MS, DFS_ROUTER_HEARTBEAT_INTERVAL_MS_DEFAULT)); @@ -329,7 +339,8 @@ private void updateJMXParameters( try { // TODO part of this should be moved to its own utility String query = "Hadoop:service=NameNode,name=FSNamesystem*"; - JSONArray aux = FederationUtil.getJmx(query, address); + JSONArray aux = FederationUtil.getJmx( + query, address, connectionFactory, scheme); if (aux != null) { for (int i = 0; i < aux.length(); i++) { JSONObject jsonObject = aux.getJSONObject(i); @@ -364,4 +375,14 @@ private void updateJMXParameters( LOG.error("Cannot get stat from {} using JMX", getNamenodeDesc(), e); } } -} + + @Override + protected void serviceStop() throws Exception { + LOG.info("Stopping NamenodeHeartbeat service for, NS {} NN {} ", + this.nameserviceId, this.namenodeId); + if (this.connectionFactory != null) { + this.connectionFactory.destroy(); + } + super.serviceStop(); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java index 8fa3506f73cce..9fcfcb4ae3e77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java @@ -20,6 +20,7 @@ import static java.util.Arrays.asList; import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; import java.util.ArrayList; import java.util.Collection; @@ -32,6 +33,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.server.federation.MockNamenode; import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; @@ -40,8 +42,10 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.log4j.Level; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -264,4 +268,50 @@ private static void assertNamenodeHeartbeatService( assertTrue(actualSet + " does not contain all " + expected, actualSet.containsAll(expected)); } + + @Test + public void testJmxUrlHTTP() { + verifyUrlSchemes(HttpConfig.Policy.HTTP_ONLY.name()); + } + + @Test + public void testJmxUrlHTTPs() { + verifyUrlSchemes(HttpConfig.Policy.HTTPS_ONLY.name()); + } + + private void verifyUrlSchemes(String scheme) { + + // Attach our own log appender so we can verify output + final LogVerificationAppender appender = + new LogVerificationAppender(); + final org.apache.log4j.Logger logger = + org.apache.log4j.Logger.getRootLogger(); + logger.addAppender(appender); + logger.setLevel(Level.DEBUG); + + // Setup and start the Router + Configuration conf = getNamenodesConfig(); + conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, scheme); + Configuration routerConf = new RouterConfigBuilder(conf) + .heartbeat(true) + .build(); + routerConf.set(RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "0.0.0.0:0"); + routerConf.set(RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE, "ns1.nn0"); + router = new Router(); + router.init(routerConf); + + // Test the heartbeat services of the Router + Collection heartbeatServices = + router.getNamenodeHeartbeatServices(); + for (NamenodeHeartbeatService heartbeatService : heartbeatServices) { + heartbeatService.getNamenodeStatusReport(); + } + if (HttpConfig.Policy.HTTPS_ONLY.name().equals(scheme)) { + assertEquals(1, appender.countLinesWithMessage("JMX URL: https://")); + assertEquals(0, appender.countLinesWithMessage("JMX URL: http://")); + } else { + assertEquals(1, appender.countLinesWithMessage("JMX URL: http://")); + assertEquals(0, appender.countLinesWithMessage("JMX URL: https://")); + } + } } From 6915d7e13c2afbb2738176ba55ea0774f25e1264 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sat, 1 Jun 2019 10:00:02 +0530 Subject: [PATCH 0333/1308] HDFS-13480. RBF: Separate namenodeHeartbeat and routerHeartbeat to different config key. Contributed by Ayush Saxena. --- .../federation/router/RBFConfigKeys.java | 2 + .../hdfs/server/federation/router/Router.java | 19 ++++++-- .../src/main/resources/hdfs-rbf-default.xml | 10 +++++ .../src/site/markdown/HDFSRouterFederation.md | 7 ++- .../server/federation/router/TestRouter.java | 43 +++++++++++++++++++ 5 files changed, 77 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java index 6f9c888ec1773..1daebdc77ec52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java @@ -91,6 +91,8 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic { public static final String DFS_ROUTER_HEARTBEAT_ENABLE = FEDERATION_ROUTER_PREFIX + "heartbeat.enable"; public static final boolean DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT = true; + public static final String DFS_ROUTER_NAMENODE_HEARTBEAT_ENABLE = + FEDERATION_ROUTER_PREFIX + "namenode.heartbeat.enable"; public static final String DFS_ROUTER_HEARTBEAT_INTERVAL_MS = FEDERATION_ROUTER_PREFIX + "heartbeat.interval"; public static final long DFS_ROUTER_HEARTBEAT_INTERVAL_MS_DEFAULT = diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 7f9c597c13a81..539c6c8d4ad91 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -205,9 +205,13 @@ protected void serviceInit(Configuration configuration) throws Exception { addService(this.httpServer); } - if (conf.getBoolean( + boolean isRouterHeartbeatEnabled = conf.getBoolean( RBFConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE, - RBFConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT)) { + RBFConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT); + boolean isNamenodeHeartbeatEnable = conf.getBoolean( + RBFConfigKeys.DFS_ROUTER_NAMENODE_HEARTBEAT_ENABLE, + isRouterHeartbeatEnabled); + if (isNamenodeHeartbeatEnable) { // Create status updater for each monitored Namenode this.namenodeHeartbeatServices = createNamenodeHeartbeatServices(); @@ -219,7 +223,8 @@ protected void serviceInit(Configuration configuration) throws Exception { if (this.namenodeHeartbeatServices.isEmpty()) { LOG.error("Heartbeat is enabled but there are no namenodes to monitor"); } - + } + if (isRouterHeartbeatEnabled) { // Periodically update the router state this.routerHeartbeatService = new RouterHeartbeatService(this); addService(this.routerHeartbeatService); @@ -750,6 +755,14 @@ Collection getNamenodeHeartbeatServices() { return this.namenodeHeartbeatServices; } + /** + * Get this router heartbeat service. + */ + @VisibleForTesting + RouterHeartbeatService getRouterHeartbeatService() { + return this.routerHeartbeatService; + } + /** * Get the Router safe mode service. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml index 641d273ce4aa5..3f743f9774e1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml @@ -371,6 +371,16 @@ + + dfs.federation.router.namenode.heartbeat.enable + true + + If true, get namenode heartbeats and send into the State Store. + If not explicitly specified takes the same value as for + dfs.federation.router.heartbeat.enable. + + + dfs.federation.router.store.router.expiration 5m diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index d4f10a15638d8..5e107d2b87e15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -45,6 +45,7 @@ This approach has the same architecture as [YARN federation](../../hadoop-yarn/h ### Example flow The simplest configuration deploys a Router on each NameNode machine. +The Router monitors the local NameNode and its state and heartbeats to the State Store. The Router monitors the local NameNode and heartbeats the state to the State Store. When a regular DFS client contacts any of the Routers to access a file in the federated filesystem, the Router checks the Mount Table in the State Store (i.e., the local cache) to find out which subcluster contains the file. Then it checks the Membership table in the State Store (i.e., the local cache) for the NameNode responsible for the subcluster. @@ -69,6 +70,9 @@ To make sure that changes have been propagated to all Routers, each Router heart The communications between the Routers and the State Store are cached (with timed expiration for freshness). This improves the performance of the system. +#### Router heartbeat +The Router periodically heartbeats its state to the State Store. + #### NameNode heartbeat For this role, the Router periodically checks the state of a NameNode (usually on the same server) and reports their high availability (HA) state and load/space status to the State Store. Note that this is an optional role, as a Router can be independent of any subcluster. @@ -433,7 +437,8 @@ Monitor the namenodes in the subclusters for forwarding the client requests. | Property | Default | Description| |:---- |:---- |:---- | -| dfs.federation.router.heartbeat.enable | `true` | If `true`, the Router heartbeats into the State Store. | +| dfs.federation.router.heartbeat.enable | `true` | If `true`, the Router periodically heartbeats its state to the State Store. | +| dfs.federation.router.namenode.heartbeat.enable | | If `true`, the Router gets namenode heartbeats and send to the State Store. If not explicitly specified takes the same value as for `dfs.federation.router.heartbeat.enable`. | | dfs.federation.router.heartbeat.interval | 5000 | How often the Router should heartbeat into the State Store in milliseconds. | | dfs.federation.router.monitor.namenode | | The identifier of the namenodes to monitor and heartbeat. | | dfs.federation.router.monitor.localnamenode.enable | `true` | If `true`, the Router should monitor the namenode in the local machine. | diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java index f83cfda6015ea..5fdb2cbcaffe6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java @@ -21,11 +21,13 @@ import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.Collection; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -217,4 +219,45 @@ public void testRouterMetricsWhenDisabled() throws Exception { router.stop(); router.close(); } + + @Test + public void testSwitchRouter() throws IOException { + assertRouterHeartbeater(true, true); + assertRouterHeartbeater(true, false); + assertRouterHeartbeater(false, true); + assertRouterHeartbeater(false, false); + } + + /** + * Execute the test by specify the routerHeartbeat and nnHeartbeat switch. + * + * @param expectedRouterHeartbeat expect the routerHeartbeat enable state. + * @param expectedNNHeartbeat expect the nnHeartbeat enable state. + */ + private void assertRouterHeartbeater(boolean expectedRouterHeartbeat, + boolean expectedNNHeartbeat) throws IOException { + final Router router = new Router(); + Configuration baseCfg = new RouterConfigBuilder(conf).rpc().build(); + baseCfg.setBoolean(RBFConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE, + expectedRouterHeartbeat); + baseCfg.setBoolean(RBFConfigKeys.DFS_ROUTER_NAMENODE_HEARTBEAT_ENABLE, + expectedNNHeartbeat); + router.init(baseCfg); + RouterHeartbeatService routerHeartbeatService = + router.getRouterHeartbeatService(); + if (expectedRouterHeartbeat) { + assertNotNull(routerHeartbeatService); + } else { + assertNull(routerHeartbeatService); + } + Collection namenodeHeartbeatServices = + router.getNamenodeHeartbeatServices(); + if (expectedNNHeartbeat) { + assertNotNull(namenodeHeartbeatServices); + } else { + assertNull(namenodeHeartbeatServices); + } + router.close(); + } + } From ade8d3b60ecdab55bd61a71905ea3dbba0922f3e Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Tue, 4 Jun 2019 08:40:31 +0530 Subject: [PATCH 0334/1308] HDFS-14508. RBF: Clean-up and refactor UI components. Contributed by Takanobu Asanuma. --- .../federation/metrics/FederationMBean.java | 29 ++++- .../metrics/NamenodeBeanMetrics.java | 73 +++++------- ...FederationMetrics.java => RBFMetrics.java} | 56 ++++++++-- .../federation/metrics/RouterMBean.java | 104 ++++++++++++++++++ .../hdfs/server/federation/router/Router.java | 6 +- .../router/RouterMetricsService.java | 14 +-- .../main/webapps/router/federationhealth.html | 8 +- .../main/webapps/router/federationhealth.js | 3 +- ...TestRouterHDFSContractDelegationToken.java | 8 +- ...rationMetrics.java => TestRBFMetrics.java} | 38 ++++--- .../router/TestDisableNameservices.java | 4 +- .../federation/router/TestRouterAdminCLI.java | 6 +- 12 files changed, 251 insertions(+), 98 deletions(-) rename hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/{FederationMetrics.java => RBFMetrics.java} (93%) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RouterMBean.java rename hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/{TestFederationMetrics.java => TestRBFMetrics.java} (92%) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index 53b27038d4d4f..5fa4755868bd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -193,66 +193,87 @@ public interface FederationMBean { /** * When the router started. * @return Date as a string the router started. + * @deprecated Use {@link RouterMBean#getRouterStarted()} instead. */ + @Deprecated String getRouterStarted(); /** * Get the version of the router. * @return Version of the router. + * @deprecated Use {@link RouterMBean#getVersion()} instead. */ + @Deprecated String getVersion(); /** * Get the compilation date of the router. * @return Compilation date of the router. + * @deprecated Use {@link RouterMBean#getCompiledDate()} instead. */ + @Deprecated String getCompiledDate(); /** * Get the compilation info of the router. * @return Compilation info of the router. + * @deprecated Use {@link RouterMBean#getCompileInfo()} instead. */ + @Deprecated String getCompileInfo(); /** * Get the host and port of the router. * @return Host and port of the router. + * @deprecated Use {@link RouterMBean#getHostAndPort()} instead. */ + @Deprecated String getHostAndPort(); /** * Get the identifier of the router. * @return Identifier of the router. + * @deprecated Use {@link RouterMBean#getRouterId()} instead. */ + @Deprecated String getRouterId(); /** - * Get the host and port of the router. - * @return Host and port of the router. + * Gets the cluster ids of the namenodes. + * @return the cluster ids of the namenodes. + * @deprecated Use {@link RouterMBean#getClusterId()} instead. */ String getClusterId(); /** - * Get the host and port of the router. - * @return Host and port of the router. + * Gets the block pool ids of the namenodes. + * @return the block pool ids of the namenodes. + * @deprecated Use {@link RouterMBean#getBlockPoolId()} instead. */ + @Deprecated String getBlockPoolId(); /** * Get the current state of the router. * @return String label for the current router state. + * @deprecated Use {@link RouterMBean#getRouterStatus()} instead. */ + @Deprecated String getRouterStatus(); /** * Get the current number of delegation tokens in memory. * @return number of DTs + * @deprecated Use {@link RouterMBean#getCurrentTokensCount()} instead. */ + @Deprecated long getCurrentTokensCount(); /** * Get the security status of the router. * @return Security status. + * @deprecated Use {@link RouterMBean#isSecurityEnabled()} instead. */ + @Deprecated boolean isSecurityEnabled(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java index 50ec175fa9aba..6d26aa0945a9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; -import org.apache.hadoop.hdfs.server.federation.router.RouterServiceState; import org.apache.hadoop.hdfs.server.federation.router.SubClusterTimeoutException; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; @@ -169,8 +168,8 @@ public void close() { } } - private FederationMetrics getFederationMetrics() throws IOException { - FederationMetrics metrics = getRouter().getMetrics(); + private RBFMetrics getRBFMetrics() throws IOException { + RBFMetrics metrics = getRouter().getMetrics(); if (metrics == null) { throw new IOException("Federated metrics is not initialized"); } @@ -194,7 +193,7 @@ public String getSoftwareVersion() { @Override public long getUsed() { try { - return getFederationMetrics().getUsedCapacity(); + return getRBFMetrics().getUsedCapacity(); } catch (IOException e) { LOG.debug("Failed to get the used capacity", e.getMessage()); } @@ -204,7 +203,7 @@ public long getUsed() { @Override public long getFree() { try { - return getFederationMetrics().getRemainingCapacity(); + return getRBFMetrics().getRemainingCapacity(); } catch (IOException e) { LOG.debug("Failed to get remaining capacity", e.getMessage()); } @@ -214,7 +213,7 @@ public long getFree() { @Override public long getTotal() { try { - return getFederationMetrics().getTotalCapacity(); + return getRBFMetrics().getTotalCapacity(); } catch (IOException e) { LOG.debug("Failed to Get total capacity", e.getMessage()); } @@ -224,7 +223,7 @@ public long getTotal() { @Override public long getProvidedCapacity() { try { - return getFederationMetrics().getProvidedSpace(); + return getRBFMetrics().getProvidedSpace(); } catch (IOException e) { LOG.debug("Failed to get provided capacity", e.getMessage()); } @@ -234,29 +233,11 @@ public long getProvidedCapacity() { @Override public String getSafemode() { try { - if (getRouter().isRouterState(RouterServiceState.SAFEMODE)) { - return "Safe mode is ON. " + this.getSafeModeTip(); - } + return getRBFMetrics().getSafemode(); } catch (IOException e) { return "Failed to get safemode status. Please check router" + "log for more detail."; } - return ""; - } - - private String getSafeModeTip() throws IOException { - Router rt = getRouter(); - String cmd = "Use \"hdfs dfsrouteradmin -safemode leave\" " - + "to turn safe mode off."; - if (rt.isRouterState(RouterServiceState.INITIALIZING) - || rt.isRouterState(RouterServiceState.UNINITIALIZED)) { - return "Router is in" + rt.getRouterState() - + "mode, the router will immediately return to " - + "normal mode after some time. " + cmd; - } else if (rt.isRouterState(RouterServiceState.SAFEMODE)) { - return "It was turned on manually. " + cmd; - } - return ""; } @Override @@ -309,7 +290,7 @@ public float getPercentBlockPoolUsed() { @Override public long getTotalBlocks() { try { - return getFederationMetrics().getNumBlocks(); + return getRBFMetrics().getNumBlocks(); } catch (IOException e) { LOG.debug("Failed to get number of blocks", e.getMessage()); } @@ -319,7 +300,7 @@ public long getTotalBlocks() { @Override public long getNumberOfMissingBlocks() { try { - return getFederationMetrics().getNumOfMissingBlocks(); + return getRBFMetrics().getNumOfMissingBlocks(); } catch (IOException e) { LOG.debug("Failed to get number of missing blocks", e.getMessage()); } @@ -330,7 +311,7 @@ public long getNumberOfMissingBlocks() { @Deprecated public long getPendingReplicationBlocks() { try { - return getFederationMetrics().getNumOfBlocksPendingReplication(); + return getRBFMetrics().getNumOfBlocksPendingReplication(); } catch (IOException e) { LOG.debug("Failed to get number of blocks pending replica", e.getMessage()); @@ -341,7 +322,7 @@ public long getPendingReplicationBlocks() { @Override public long getPendingReconstructionBlocks() { try { - return getFederationMetrics().getNumOfBlocksPendingReplication(); + return getRBFMetrics().getNumOfBlocksPendingReplication(); } catch (IOException e) { LOG.debug("Failed to get number of blocks pending replica", e.getMessage()); @@ -353,7 +334,7 @@ public long getPendingReconstructionBlocks() { @Deprecated public long getUnderReplicatedBlocks() { try { - return getFederationMetrics().getNumOfBlocksUnderReplicated(); + return getRBFMetrics().getNumOfBlocksUnderReplicated(); } catch (IOException e) { LOG.debug("Failed to get number of blocks under replicated", e.getMessage()); @@ -364,7 +345,7 @@ public long getUnderReplicatedBlocks() { @Override public long getLowRedundancyBlocks() { try { - return getFederationMetrics().getNumOfBlocksUnderReplicated(); + return getRBFMetrics().getNumOfBlocksUnderReplicated(); } catch (IOException e) { LOG.debug("Failed to get number of blocks under replicated", e.getMessage()); @@ -375,7 +356,7 @@ public long getLowRedundancyBlocks() { @Override public long getPendingDeletionBlocks() { try { - return getFederationMetrics().getNumOfBlocksPendingDeletion(); + return getRBFMetrics().getNumOfBlocksPendingDeletion(); } catch (IOException e) { LOG.debug("Failed to get number of blocks pending deletion", e.getMessage()); @@ -620,7 +601,7 @@ public long getProvidedCapacityTotal() { @Override public long getFilesTotal() { try { - return getFederationMetrics().getNumFiles(); + return getRBFMetrics().getNumFiles(); } catch (IOException e) { LOG.debug("Failed to get number of files", e.getMessage()); } @@ -635,7 +616,7 @@ public int getTotalLoad() { @Override public int getNumLiveDataNodes() { try { - return getFederationMetrics().getNumLiveNodes(); + return getRBFMetrics().getNumLiveNodes(); } catch (IOException e) { LOG.debug("Failed to get number of live nodes", e.getMessage()); } @@ -645,7 +626,7 @@ public int getNumLiveDataNodes() { @Override public int getNumDeadDataNodes() { try { - return getFederationMetrics().getNumDeadNodes(); + return getRBFMetrics().getNumDeadNodes(); } catch (IOException e) { LOG.debug("Failed to get number of dead nodes", e.getMessage()); } @@ -655,7 +636,7 @@ public int getNumDeadDataNodes() { @Override public int getNumStaleDataNodes() { try { - return getFederationMetrics().getNumStaleNodes(); + return getRBFMetrics().getNumStaleNodes(); } catch (IOException e) { LOG.debug("Failed to get number of stale nodes", e.getMessage()); } @@ -665,7 +646,7 @@ public int getNumStaleDataNodes() { @Override public int getNumDecomLiveDataNodes() { try { - return getFederationMetrics().getNumDecomLiveNodes(); + return getRBFMetrics().getNumDecomLiveNodes(); } catch (IOException e) { LOG.debug("Failed to get the number of live decommissioned datanodes", e.getMessage()); @@ -676,7 +657,7 @@ public int getNumDecomLiveDataNodes() { @Override public int getNumDecomDeadDataNodes() { try { - return getFederationMetrics().getNumDecomDeadNodes(); + return getRBFMetrics().getNumDecomDeadNodes(); } catch (IOException e) { LOG.debug("Failed to get the number of dead decommissioned datanodes", e.getMessage()); @@ -687,7 +668,7 @@ public int getNumDecomDeadDataNodes() { @Override public int getNumDecommissioningDataNodes() { try { - return getFederationMetrics().getNumDecommissioningNodes(); + return getRBFMetrics().getNumDecommissioningNodes(); } catch (IOException e) { LOG.debug("Failed to get number of decommissioning nodes", e.getMessage()); @@ -698,7 +679,7 @@ public int getNumDecommissioningDataNodes() { @Override public int getNumInMaintenanceLiveDataNodes() { try { - return getFederationMetrics().getNumInMaintenanceLiveDataNodes(); + return getRBFMetrics().getNumInMaintenanceLiveDataNodes(); } catch (IOException e) { LOG.debug("Failed to get number of live in maintenance nodes", e.getMessage()); @@ -709,7 +690,7 @@ public int getNumInMaintenanceLiveDataNodes() { @Override public int getNumInMaintenanceDeadDataNodes() { try { - return getFederationMetrics().getNumInMaintenanceDeadDataNodes(); + return getRBFMetrics().getNumInMaintenanceDeadDataNodes(); } catch (IOException e) { LOG.debug("Failed to get number of dead in maintenance nodes", e.getMessage()); @@ -720,7 +701,7 @@ public int getNumInMaintenanceDeadDataNodes() { @Override public int getNumEnteringMaintenanceDataNodes() { try { - return getFederationMetrics().getNumEnteringMaintenanceDataNodes(); + return getRBFMetrics().getNumEnteringMaintenanceDataNodes(); } catch (IOException e) { LOG.debug("Failed to get number of entering maintenance nodes", e.getMessage()); @@ -803,6 +784,12 @@ public String getHostAndPort() { @Override public boolean isSecurityEnabled() { + try { + return getRBFMetrics().isSecurityEnabled(); + } catch (IOException e) { + LOG.debug("Failed to get security status.", + e.getMessage()); + } return false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java similarity index 93% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java rename to hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java index c219a43ab6533..9aa469d831db4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; +import org.apache.hadoop.hdfs.server.federation.router.RouterServiceState; import org.apache.hadoop.hdfs.server.federation.router.security.RouterSecurityManager; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; @@ -90,10 +91,10 @@ /** * Implementation of the Router metrics collector. */ -public class FederationMetrics implements FederationMBean { +public class RBFMetrics implements RouterMBean, FederationMBean { private static final Logger LOG = - LoggerFactory.getLogger(FederationMetrics.class); + LoggerFactory.getLogger(RBFMetrics.class); /** Format for a date. */ private static final String DATE_FORMAT = "yyyy/MM/dd HH:mm:ss"; @@ -106,7 +107,8 @@ public class FederationMetrics implements FederationMBean { private final Router router; /** FederationState JMX bean. */ - private ObjectName beanName; + private ObjectName routerBeanName; + private ObjectName federationBeanName; /** Resolve the namenode for each namespace. */ private final ActiveNamenodeResolver namenodeResolver; @@ -121,17 +123,26 @@ public class FederationMetrics implements FederationMBean { private RouterStore routerStore; - public FederationMetrics(Router router) throws IOException { + public RBFMetrics(Router router) throws IOException { this.router = router; try { - StandardMBean bean = new StandardMBean(this, FederationMBean.class); - this.beanName = MBeans.register("Router", "FederationState", bean); - LOG.info("Registered Router MBean: {}", this.beanName); + StandardMBean bean = new StandardMBean(this, RouterMBean.class); + this.routerBeanName = MBeans.register("Router", "Router", bean); + LOG.info("Registered Router MBean: {}", this.routerBeanName); } catch (NotCompliantMBeanException e) { throw new RuntimeException("Bad Router MBean setup", e); } + try { + StandardMBean bean = new StandardMBean(this, FederationMBean.class); + this.federationBeanName = MBeans.register("Router", "FederationState", + bean); + LOG.info("Registered FederationState MBean: {}", this.federationBeanName); + } catch (NotCompliantMBeanException e) { + throw new RuntimeException("Bad FederationState MBean setup", e); + } + // Resolve namenode for each nameservice this.namenodeResolver = this.router.getNamenodeResolver(); @@ -159,8 +170,11 @@ public FederationMetrics(Router router) throws IOException { * Unregister the JMX beans. */ public void close() { - if (this.beanName != null) { - MBeans.unregister(beanName); + if (this.routerBeanName != null) { + MBeans.unregister(routerBeanName); + } + if (this.federationBeanName != null) { + MBeans.unregister(federationBeanName); } } @@ -616,10 +630,34 @@ public long getCurrentTokensCount() { return -1; } + @Override public boolean isSecurityEnabled() { return UserGroupInformation.isSecurityEnabled(); } + @Override + public String getSafemode() { + if (this.router.isRouterState(RouterServiceState.SAFEMODE)) { + return "Safe mode is ON. " + this.getSafeModeTip(); + } else { + return ""; + } + } + + private String getSafeModeTip() { + String cmd = "Use \"hdfs dfsrouteradmin -safemode leave\" " + + "to turn safe mode off."; + if (this.router.isRouterState(RouterServiceState.INITIALIZING) + || this.router.isRouterState(RouterServiceState.UNINITIALIZED)) { + return "Router is in" + this.router.getRouterState() + + "mode, the router will immediately return to " + + "normal mode after some time. " + cmd; + } else if (this.router.isRouterState(RouterServiceState.SAFEMODE)) { + return "It was turned on manually. " + cmd; + } + return ""; + } + /** * Build a set of unique values found in all namespaces. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RouterMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RouterMBean.java new file mode 100644 index 0000000000000..daec1548b57a9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RouterMBean.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.metrics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * JMX interface for the router specific metrics. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface RouterMBean { + + /** + * When the router started. + * @return Date as a string the router started. + */ + String getRouterStarted(); + + /** + * Get the version of the router. + * @return Version of the router. + */ + String getVersion(); + + /** + * Get the compilation date of the router. + * @return Compilation date of the router. + */ + String getCompiledDate(); + + /** + * Get the compilation info of the router. + * @return Compilation info of the router. + */ + String getCompileInfo(); + + /** + * Get the host and port of the router. + * @return Host and port of the router. + */ + String getHostAndPort(); + + /** + * Get the identifier of the router. + * @return Identifier of the router. + */ + String getRouterId(); + + /** + * Get the current state of the router. + * + * @return String label for the current router state. + */ + String getRouterStatus(); + + /** + * Gets the cluster ids of the namenodes. + * @return the cluster ids of the namenodes. + */ + String getClusterId(); + + /** + * Gets the block pool ids of the namenodes. + * @return the block pool ids of the namenodes. + */ + String getBlockPoolId(); + + /** + * Get the current number of delegation tokens in memory. + * @return number of DTs + */ + long getCurrentTokensCount(); + + /** + * Gets the safemode status. + * + * @return the safemode status. + */ + String getSafemode(); + + /** + * Gets if security is enabled. + * + * @return true, if security is enabled. + */ + boolean isSecurityEnabled(); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 539c6c8d4ad91..b6d188d21c05a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.TokenVerifier; -import org.apache.hadoop.hdfs.server.federation.metrics.FederationMetrics; +import org.apache.hadoop.hdfs.server.federation.metrics.RBFMetrics; import org.apache.hadoop.hdfs.server.federation.metrics.NamenodeBeanMetrics; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; @@ -634,9 +634,9 @@ public RouterMetrics getRouterMetrics() { * * @return Federation metrics. */ - public FederationMetrics getMetrics() { + public RBFMetrics getMetrics() { if (this.metrics != null) { - return this.metrics.getFederationMetrics(); + return this.metrics.getRBFMetrics(); } return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java index 1887ed6bce2f5..1ed15bfd1b3e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.federation.router; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.federation.metrics.FederationMetrics; +import org.apache.hadoop.hdfs.server.federation.metrics.RBFMetrics; import org.apache.hadoop.hdfs.server.federation.metrics.NamenodeBeanMetrics; import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.service.AbstractService; @@ -34,7 +34,7 @@ public class RouterMetricsService extends AbstractService { /** Router metrics. */ private RouterMetrics routerMetrics; /** Federation metrics. */ - private FederationMetrics federationMetrics; + private RBFMetrics rbfMetrics; /** Namenode mock metrics. */ private NamenodeBeanMetrics nnMetrics; @@ -55,14 +55,14 @@ protected void serviceStart() throws Exception { this.nnMetrics = new NamenodeBeanMetrics(this.router); // Federation MBean JMX interface - this.federationMetrics = new FederationMetrics(this.router); + this.rbfMetrics = new RBFMetrics(this.router); } @Override protected void serviceStop() throws Exception { // Remove JMX interfaces - if (this.federationMetrics != null) { - this.federationMetrics.close(); + if (this.rbfMetrics != null) { + this.rbfMetrics.close(); } // Remove Namenode JMX interfaces @@ -90,8 +90,8 @@ public RouterMetrics getRouterMetrics() { * * @return Federation metrics. */ - public FederationMetrics getFederationMetrics() { - return this.federationMetrics; + public RBFMetrics getRBFMetrics() { + return this.rbfMetrics; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html index 99eb6ecc390fb..105755d439316 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html @@ -75,8 +75,8 @@ + - + diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index ac01ca72a4486..13da176424154 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -391,11 +391,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> src/main/webapps/secondary/robots.txt src/contrib/** src/site/resources/images/* - src/main/webapps/static/bootstrap-3.3.7/** + src/main/webapps/static/bootstrap-3.4.1/** src/main/webapps/static/moment.min.js src/main/webapps/static/dust-full-2.0.0.min.js src/main/webapps/static/dust-helpers-1.1.1.min.js - src/main/webapps/static/jquery-3.3.1.min.js + src/main/webapps/static/jquery-3.4.1.min.js src/main/webapps/static/jquery.dataTables.min.js src/main/webapps/static/json-bignum.js src/main/webapps/static/dataTables.bootstrap.css diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html index ad4d4345f1084..8341b7b84c16f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html @@ -19,7 +19,7 @@ - + DataNode Information @@ -171,8 +171,8 @@ {/dn.VolumeInfo} - - + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 769315ec68382..366c80fcc1375 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -19,7 +19,7 @@ - + Namenode information @@ -475,9 +475,9 @@ - - - + @@ -71,6 +71,6 @@ - + diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot index c750521baef40..edaee5e726758 100644 --- a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot +++ b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot @@ -26,7 +26,7 @@ ${DATANODE_HOST} datanode Check webui static resources Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user - ${result} = Execute curl --negotiate -u : -s -I http://scm:9876/static/bootstrap-3.3.7/js/bootstrap.min.js + ${result} = Execute curl --negotiate -u : -s -I http://scm:9876/static/bootstrap-3.4.1/js/bootstrap.min.js Should contain ${result} 200 Start freon testing diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html index ba54cb2cf43f4..1b5e6936743a6 100644 --- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html +++ b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html @@ -26,7 +26,7 @@ Ozone Manager - + @@ -57,7 +57,7 @@ - + @@ -65,6 +65,6 @@ - + diff --git a/hadoop-ozone/s3gateway/src/main/resources/browser.html b/hadoop-ozone/s3gateway/src/main/resources/browser.html index a1f23380901f8..0405b17e90b53 100644 --- a/hadoop-ozone/s3gateway/src/main/resources/browser.html +++ b/hadoop-ozone/s3gateway/src/main/resources/browser.html @@ -24,7 +24,7 @@ + href="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css"> - - + + diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html index c4b791b98bfce..68939ef29eea0 100644 --- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html +++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html @@ -26,7 +26,7 @@ S3 gateway -- Apache Hadoop Ozone - + @@ -74,6 +74,6 @@

    S3 gateway

    - + From 8fc6567b946f1d536ffed4798b5403a365021464 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Elek?= Date: Mon, 29 Jul 2019 14:44:58 +0200 Subject: [PATCH 0799/1308] HDDS-1871. Remove anti-affinity rules from k8s minkube example Signed-off-by: Anu Engineer --- .../k8s/definitions/ozone/datanode-ss.yaml | 6 ++--- .../getting-started/datanode-statefulset.yaml | 6 ++--- .../getting-started/om-statefulset.yaml | 22 +++++-------------- .../getting-started/s3g-statefulset.yaml | 5 +++++ .../getting-started/scm-statefulset.yaml | 4 ++++ .../src/main/k8s/examples/minikube/Flekszible | 9 ++++++++ .../minikube/datanode-statefulset.yaml | 22 +++++-------------- .../k8s/examples/minikube/om-statefulset.yaml | 6 ++--- .../examples/minikube/s3g-statefulset.yaml | 6 ++--- .../examples/minikube/scm-statefulset.yaml | 12 +++++----- .../ozone-dev/csi/csi-ozone-clusterrole.yaml | 2 +- .../csi/csi-ozone-clusterrolebinding.yaml | 6 ++--- .../csi/csi-ozone-serviceaccount.yaml | 2 +- .../ozone-dev/datanode-statefulset.yaml | 6 ++--- .../ozone-dev/prometheus-clusterrole.yaml | 2 +- ...rometheus-operator-clusterrolebinding.yaml | 6 ++--- .../ozone/csi/csi-ozone-clusterrole.yaml | 2 +- .../csi/csi-ozone-clusterrolebinding.yaml | 6 ++--- .../ozone/csi/csi-ozone-serviceaccount.yaml | 2 +- .../examples/ozone/datanode-statefulset.yaml | 6 ++--- 20 files changed, 68 insertions(+), 70 deletions(-) diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml index 94dc570c45171..88a4308adbf2d 100644 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml @@ -32,9 +32,9 @@ spec: app: ozone component: datanode annotations: - prdatanodeetheus.io/scrape: "true" - prdatanodeetheus.io/port: "9882" - prdatanodeetheus.io/path: "/prom" + prometheus.io/scrape: "true" + prometheus.io/port: "9882" + prometheus.io/path: "/prom" spec: affinity: podAntiAffinity: diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml index 6c8d1bfdb1a01..c393eada79d63 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml @@ -33,9 +33,9 @@ spec: app: ozone component: datanode annotations: - prdatanodeetheus.io/scrape: "true" - prdatanodeetheus.io/port: "9882" - prdatanodeetheus.io/path: /prom + prometheus.io/scrape: "true" + prometheus.io/port: "9882" + prometheus.io/path: /prom spec: affinity: podAntiAffinity: diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml index c8ff81b0b93a0..5de01f5feaee9 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml @@ -39,22 +39,6 @@ spec: spec: securityContext: fsGroup: 1000 - initContainers: - - name: init - image: '@docker.image@' - args: - - ozone - - om - - --init - env: - - name: WAITFOR - value: scm-0.scm:9876 - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data containers: - name: om image: '@docker.image@' @@ -64,6 +48,12 @@ spec: env: - name: WAITFOR value: scm-0.scm:9876 + - name: ENSURE_OM_INITIALIZED + value: /data/metadata/om/current/VERSION + livenessProbe: + tcpSocket: + port: 9862 + initialDelaySeconds: 30 envFrom: - configMapRef: name: config diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml index c7e13e55cf57d..240958303f69b 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml @@ -39,6 +39,11 @@ spec: args: - ozone - s3g + livenessProbe: + httpGet: + path: / + port: 9878 + initialDelaySeconds: 30 envFrom: - configMapRef: name: config diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-statefulset.yaml index e1deed8ab84cf..0f8173c48bae8 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-statefulset.yaml @@ -58,6 +58,10 @@ spec: args: - ozone - scm + livenessProbe: + tcpSocket: + port: 9861 + initialDelaySeconds: 30 envFrom: - configMapRef: name: config diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible index e00d9ce8182a0..3390db03d4a14 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible @@ -22,6 +22,15 @@ import: image: "@docker.image@" - type: PublishStatefulSet - type: ozone/emptydir + - type: Remove + trigger: + metadata: + name: datanode + path: + - spec + - template + - spec + - affinity - path: ozone/freon destination: freon transformations: diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml index 452e258e03551..db91864bdaf39 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml @@ -33,20 +33,10 @@ spec: app: ozone component: datanode annotations: - prdatanodeetheus.io/scrape: "true" - prdatanodeetheus.io/port: "9882" - prdatanodeetheus.io/path: /prom + prometheus.io/scrape: "true" + prometheus.io/port: "9882" + prometheus.io/path: /prom spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: component - operator: In - values: - - datanode - topologyKey: kubernetes.io/hostname securityContext: fsGroup: 1000 containers: @@ -55,12 +45,12 @@ spec: args: - ozone - datanode - volumeMounts: - - name: data - mountPath: /data envFrom: - configMapRef: name: config + volumeMounts: + - name: data + mountPath: /data volumes: - name: data emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml index 172df34744f5f..5de01f5feaee9 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml @@ -54,12 +54,12 @@ spec: tcpSocket: port: 9862 initialDelaySeconds: 30 - volumeMounts: - - name: data - mountPath: /data envFrom: - configMapRef: name: config + volumeMounts: + - name: data + mountPath: /data volumes: - name: data emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-statefulset.yaml index 1718fed218418..240958303f69b 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-statefulset.yaml @@ -44,12 +44,12 @@ spec: path: / port: 9878 initialDelaySeconds: 30 - volumeMounts: - - name: data - mountPath: /data envFrom: - configMapRef: name: config + volumeMounts: + - name: data + mountPath: /data volumes: - name: data emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-statefulset.yaml index 7f1606d475417..0f8173c48bae8 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-statefulset.yaml @@ -46,12 +46,12 @@ spec: - ozone - scm - --init - volumeMounts: - - name: data - mountPath: /data envFrom: - configMapRef: name: config + volumeMounts: + - name: data + mountPath: /data containers: - name: scm image: '@docker.image@' @@ -62,12 +62,12 @@ spec: tcpSocket: port: 9861 initialDelaySeconds: 30 - volumeMounts: - - name: data - mountPath: /data envFrom: - configMapRef: name: config + volumeMounts: + - name: data + mountPath: /data volumes: - name: data emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrole.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrole.yaml index efcd51f0724b8..927ba6ff7b7fe 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrole.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrole.yaml @@ -17,7 +17,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: csi-ozone + name: csi-ozone-default rules: - apiGroups: - "" diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrolebinding.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrolebinding.yaml index bb8bdf8b55bf9..948e759fbe356 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrolebinding.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrolebinding.yaml @@ -17,12 +17,12 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: csi-ozone + name: csi-ozone-default subjects: - kind: ServiceAccount name: csi-ozone - namespace: weekly-test + namespace: default roleRef: kind: ClusterRole - name: csi-ozone + name: csi-ozone-default apiGroup: rbac.authorization.k8s.io diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-serviceaccount.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-serviceaccount.yaml index 41f7747ae9723..628d2a1c5957e 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-serviceaccount.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-serviceaccount.yaml @@ -17,5 +17,5 @@ apiVersion: v1 kind: ServiceAccount metadata: - namespace: weekly-test + namespace: default name: csi-ozone diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml index 843aa948e108f..475ce690b643b 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml @@ -33,9 +33,9 @@ spec: app: ozone component: datanode annotations: - prdatanodeetheus.io/scrape: "true" - prdatanodeetheus.io/port: "9882" - prdatanodeetheus.io/path: /prom + prometheus.io/scrape: "true" + prometheus.io/port: "9882" + prometheus.io/path: /prom spec: affinity: podAntiAffinity: diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-clusterrole.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-clusterrole.yaml index d5df196b4a946..bf62be615b7bc 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-clusterrole.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-clusterrole.yaml @@ -17,7 +17,7 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: - name: prometheus + name: prometheus-default rules: - apiGroups: - "" diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-clusterrolebinding.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-clusterrolebinding.yaml index 6ee6217cfeaad..13ac066aba9fa 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-clusterrolebinding.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-clusterrolebinding.yaml @@ -17,12 +17,12 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: - name: prometheus-operator + name: prometheus-operator-default roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: prometheus + name: prometheus-default subjects: - kind: ServiceAccount name: prometheus-operator - namespace: weekly-test + namespace: default diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrole.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrole.yaml index efcd51f0724b8..927ba6ff7b7fe 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrole.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrole.yaml @@ -17,7 +17,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: csi-ozone + name: csi-ozone-default rules: - apiGroups: - "" diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrolebinding.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrolebinding.yaml index bb8bdf8b55bf9..948e759fbe356 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrolebinding.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrolebinding.yaml @@ -17,12 +17,12 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: csi-ozone + name: csi-ozone-default subjects: - kind: ServiceAccount name: csi-ozone - namespace: weekly-test + namespace: default roleRef: kind: ClusterRole - name: csi-ozone + name: csi-ozone-default apiGroup: rbac.authorization.k8s.io diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-serviceaccount.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-serviceaccount.yaml index 41f7747ae9723..628d2a1c5957e 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-serviceaccount.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-serviceaccount.yaml @@ -17,5 +17,5 @@ apiVersion: v1 kind: ServiceAccount metadata: - namespace: weekly-test + namespace: default name: csi-ozone diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-statefulset.yaml index b74d58e856942..a3aa528dff0b3 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-statefulset.yaml @@ -33,9 +33,9 @@ spec: app: ozone component: datanode annotations: - prdatanodeetheus.io/scrape: "true" - prdatanodeetheus.io/port: "9882" - prdatanodeetheus.io/path: /prom + prometheus.io/scrape: "true" + prometheus.io/port: "9882" + prometheus.io/path: /prom spec: affinity: podAntiAffinity: From 0f598aed13d0fc55908bab3f1653f20084153299 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Thu, 22 Aug 2019 02:25:06 +0530 Subject: [PATCH 0800/1308] HDFS-14276. [SBN read] Reduce tailing overhead. Contributed by Wei-Chiu Chuang. --- .../hadoop/hdfs/server/namenode/ha/EditLogTailer.java | 8 ++++++-- .../hadoop/hdfs/server/namenode/TestNameNodeMXBean.java | 3 +-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index 536986152d957..d27947b995238 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -467,10 +467,12 @@ private void doWork() { try { // There's no point in triggering a log roll if the Standby hasn't // read any more transactions since the last time a roll was - // triggered. + // triggered. + boolean triggeredLogRoll = false; if (tooLongSinceLastLoad() && lastRollTriggerTxId < lastLoadedTxnId) { triggerActiveLogRoll(); + triggeredLogRoll = true; } /** * Check again in case someone calls {@link EditLogTailer#stop} while @@ -496,7 +498,9 @@ private void doWork() { Time.monotonicNow() - startTime); } //Update NameDirSize Metric - namesystem.getFSImage().getStorage().updateNameDirSize(); + if (triggeredLogRoll) { + namesystem.getFSImage().getStorage().updateNameDirSize(); + } } catch (EditLogInputException elie) { LOG.warn("Error while reading edits from disk. Will try again.", elie); } catch (InterruptedException ie) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index f6da894dc1968..7157cff0e7a2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -671,6 +671,7 @@ public void testQueueLength() throws Exception { public void testNNDirectorySize() throws Exception{ Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 0); MiniDFSCluster cluster = null; for (int i = 0; i < 5; i++) { try{ @@ -700,8 +701,6 @@ public void testNNDirectorySize() throws Exception{ FSNamesystem nn0 = cluster.getNamesystem(0); FSNamesystem nn1 = cluster.getNamesystem(1); - checkNNDirSize(cluster.getNameDirs(0), nn0.getNameDirSize()); - checkNNDirSize(cluster.getNameDirs(1), nn1.getNameDirSize()); cluster.transitionToActive(0); fs = cluster.getFileSystem(0); DFSTestUtil.createFile(fs, new Path("/file"), 0, (short) 1, 0L); From f9029c4070e8eb046b403f5cb6d0a132c5d58448 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Thu, 22 Aug 2019 02:40:39 +0530 Subject: [PATCH 0801/1308] HDFS-14744. RBF: Non secured routers should not log in error mode when UGI is default. Contributed by CR Hota. --- .../server/federation/router/RouterClientProtocol.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index e9cb54741dceb..4d8527f906960 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -1937,7 +1937,12 @@ private HdfsFileStatus getMountPointStatus( owner = ugi.getUserName(); group = ugi.getPrimaryGroupName(); } catch (IOException e) { - LOG.error("Cannot get remote user: {}", e.getMessage()); + String msg = "Cannot get remote user: " + e.getMessage(); + if (UserGroupInformation.isSecurityEnabled()) { + LOG.error(msg); + } else { + LOG.debug(msg); + } } } long inodeId = 0; From d58eba867234eaac0e229feb990e9dab3912e063 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Mon, 19 Aug 2019 15:54:44 -0700 Subject: [PATCH 0802/1308] HDDS-1927. Consolidate add/remove Acl into OzoneAclUtil class. Contributed by Xiaoyu Yao. Signed-off-by: Anu Engineer --- .../hadoop/ozone/client/rpc/RpcClient.java | 5 +- .../org/apache/hadoop/ozone/OzoneAcl.java | 5 + .../hadoop/ozone/om/helpers/OmBucketInfo.java | 76 +---- .../hadoop/ozone/om/helpers/OmKeyInfo.java | 147 ++------- .../ozone/om/helpers/OmOzoneAclMap.java | 7 +- .../hadoop/ozone/om/helpers/OmPrefixInfo.java | 30 +- .../hadoop/ozone/om/helpers/OzoneAclUtil.java | 286 ++++++++++++++++++ .../hadoop/ozone/web/utils/OzoneUtils.java | 158 ---------- .../ozone/om/helpers/TestOzoneAclUtil.java | 191 ++++++++++++ .../rpc/TestOzoneRpcClientAbstract.java | 15 +- .../hadoop/ozone/om/TestKeyManagerImpl.java | 8 +- .../hadoop/ozone/om/TestOzoneManager.java | 3 +- .../acl/TestOzoneNativeAuthorizer.java | 4 +- .../storage/DistributedStorageHandler.java | 3 +- .../hadoop/ozone/om/BucketManagerImpl.java | 91 +----- .../hadoop/ozone/om/KeyManagerImpl.java | 204 +++---------- .../hadoop/ozone/om/PrefixManagerImpl.java | 186 +++--------- .../file/OMDirectoryCreateRequest.java | 11 +- .../ozone/om/request/key/OMKeyRequest.java | 13 +- .../request/key/acl/OMKeyAddAclRequest.java | 7 +- .../key/acl/OMKeyRemoveAclRequest.java | 7 +- .../request/key/acl/OMKeySetAclRequest.java | 9 +- .../S3InitiateMultipartUploadRequest.java | 3 +- .../S3MultipartUploadCompleteRequest.java | 4 +- .../OzoneManagerRequestHandler.java | 5 +- 25 files changed, 699 insertions(+), 779 deletions(-) create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java create mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index fbb488ecd4602..003bcc43ef5a3 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -64,9 +64,10 @@ import org.apache.hadoop.ozone.om.helpers.OmPartInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.protocolPB .OzoneManagerProtocolClientSideTranslatorPB; @@ -440,7 +441,7 @@ public void createBucket( * @return listOfAcls * */ private List getAclList() { - return OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(), + return OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(), userRights, groupRights); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index 1730a4fe372f7..6a74342b8d289 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -328,6 +328,11 @@ public boolean equals(Object obj) { otherAcl.getAclScope().equals(this.getAclScope()); } + public OzoneAcl setAclScope(AclScope scope) { + this.aclScope = scope; + return this; + } + /** * Scope of ozone acl. * */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index 4d764a5cee270..42075837fa4dd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.helpers; -import java.util.BitSet; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.LinkedList; @@ -37,8 +36,6 @@ import com.google.common.base.Preconditions; -import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET; - /** * A class that encapsulates Bucket Info. */ @@ -135,36 +132,7 @@ public List getAcls() { * already existing in the acl list. */ public boolean addAcl(OzoneAcl ozoneAcl) { - // Case 1: When we are adding more rights to existing user/group. - boolean addToExistingAcl = false; - for(OzoneAcl existingAcl: getAcls()) { - if(existingAcl.getName().equals(ozoneAcl.getName()) && - existingAcl.getType().equals(ozoneAcl.getType())) { - - BitSet bits = (BitSet) ozoneAcl.getAclBitSet().clone(); - - // We need to do "or" before comparision because think of a case like - // existing acl is 777 and newly added acl is 444, we have already - // that acl set. In this case if we do direct check they will not - // be equal, but if we do or and then check, we shall know it - // has acl's already set or not. - bits.or(existingAcl.getAclBitSet()); - - if (bits.equals(existingAcl.getAclBitSet())) { - return false; - } else { - existingAcl.getAclBitSet().or(ozoneAcl.getAclBitSet()); - addToExistingAcl = true; - break; - } - } - } - - // Case 2: When a completely new acl is added. - if(!addToExistingAcl) { - getAcls().add(ozoneAcl); - } - return true; + return OzoneAclUtil.addAcl(acls, ozoneAcl); } /** @@ -174,36 +142,7 @@ public boolean addAcl(OzoneAcl ozoneAcl) { * to that acl is not in the existing acl list. */ public boolean removeAcl(OzoneAcl ozoneAcl) { - boolean removed = false; - - // When we are removing subset of rights from existing acl. - for(OzoneAcl existingAcl: getAcls()) { - if (existingAcl.getName().equals(ozoneAcl.getName()) && - existingAcl.getType().equals(ozoneAcl.getType())) { - BitSet bits = (BitSet) ozoneAcl.getAclBitSet().clone(); - bits.and(existingAcl.getAclBitSet()); - - // This happens when the acl bitset is not existing for current name - // and type. - // Like a case we have 444 permission, 333 is asked to removed. - if (bits.equals(ZERO_BITSET)) { - return false; - } - - // We have some matching. Remove them. - existingAcl.getAclBitSet().xor(bits); - - // If existing acl has same bitset as passed acl bitset, remove that - // acl from the list - if (existingAcl.getAclBitSet().equals(ZERO_BITSET)) { - getAcls().remove(existingAcl); - } - removed = true; - break; - } - } - - return removed; + return OzoneAclUtil.removeAcl(acls, ozoneAcl); } /** @@ -212,9 +151,7 @@ public boolean removeAcl(OzoneAcl ozoneAcl) { * @return true - if successfully able to reset. */ public boolean setAcls(List ozoneAcls) { - this.acls.clear(); - this.acls = ozoneAcls; - return true; + return OzoneAclUtil.setAcl(acls, ozoneAcls); } /** @@ -307,7 +244,9 @@ public Builder setBucketName(String bucket) { } public Builder setAcls(List listOfAcls) { - this.acls = listOfAcls; + if (listOfAcls != null) { + this.acls.addAll(listOfAcls); + } return this; } @@ -367,8 +306,7 @@ public BucketInfo getProtobuf() { BucketInfo.Builder bib = BucketInfo.newBuilder() .setVolumeName(volumeName) .setBucketName(bucketName) - .addAllAcls(acls.stream().map(OzoneAcl::toProtobuf) - .collect(Collectors.toList())) + .addAllAcls(OzoneAclUtil.toProtobuf(acls)) .setIsVersionEnabled(isVersionEnabled) .setStorageType(storageType.toProto()) .setCreationTime(creationTime) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index 17aabd2448368..83adee980a8e3 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -19,25 +19,21 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.BitSet; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.stream.Collectors; -import com.google.protobuf.ByteString; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.util.Time; import com.google.common.base.Preconditions; -import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET; - /** * Args for key block. The block instance for the key requested in putKey. * This is returned from OM to client, and client use class to talk to @@ -58,7 +54,7 @@ public final class OmKeyInfo extends WithMetadata { /** * ACL Information. */ - private List acls; + private List acls; @SuppressWarnings("parameternumber") OmKeyInfo(String volumeName, String bucketName, String keyName, @@ -67,7 +63,7 @@ public final class OmKeyInfo extends WithMetadata { HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, Map metadata, - FileEncryptionInfo encInfo, List acls) { + FileEncryptionInfo encInfo, List acls) { this.volumeName = volumeName; this.bucketName = bucketName; this.keyName = keyName; @@ -235,123 +231,22 @@ public FileEncryptionInfo getFileEncryptionInfo() { return encInfo; } - public List getAcls() { + public List getAcls() { return acls; } - /** - * Add an ozoneAcl to list of existing Acl set. - * @param ozoneAcl - * @return true - if successfully added, false if not added or acl is - * already existing in the acl list. - */ - public boolean addAcl(OzoneAclInfo ozoneAcl) { - // Case 1: When we are adding more rights to existing user/group. - boolean addToExistingAcl = false; - for(OzoneAclInfo existingAcl: getAcls()) { - if(existingAcl.getName().equals(ozoneAcl.getName()) && - existingAcl.getType().equals(ozoneAcl.getType())) { - - // We need to do "or" before comparision because think of a case like - // existing acl is 777 and newly added acl is 444, we have already - // that acl set. In this case if we do direct check they will not - // be equal, but if we do or and then check, we shall know it - // has acl's already set or not. - BitSet newAclBits = BitSet.valueOf( - existingAcl.getRights().toByteArray()); - - newAclBits.or(BitSet.valueOf(ozoneAcl.getRights().toByteArray())); - - if (newAclBits.equals(BitSet.valueOf( - existingAcl.getRights().toByteArray()))) { - return false; - } else { - OzoneAclInfo newAcl = OzoneAclInfo.newBuilder() - .setType(ozoneAcl.getType()) - .setName(ozoneAcl.getName()) - .setAclScope(ozoneAcl.getAclScope()) - .setRights(ByteString.copyFrom(newAclBits.toByteArray())) - .build(); - getAcls().remove(existingAcl); - getAcls().add(newAcl); - addToExistingAcl = true; - break; - } - } - } - - // Case 2: When a completely new acl is added. - if(!addToExistingAcl) { - getAcls().add(ozoneAcl); - } - return true; + public boolean addAcl(OzoneAcl acl) { + return OzoneAclUtil.addAcl(acls, acl); } - /** - * Remove acl from existing acl list. - * @param ozoneAcl - * @return true - if successfully removed, false if not able to remove due - * to that acl is not in the existing acl list. - */ - public boolean removeAcl(OzoneAclInfo ozoneAcl) { - boolean removed = false; - - // When we are removing subset of rights from existing acl. - for(OzoneAclInfo existingAcl: getAcls()) { - if (existingAcl.getName().equals(ozoneAcl.getName()) && - existingAcl.getType().equals(ozoneAcl.getType())) { - - BitSet bits = BitSet.valueOf(ozoneAcl.getRights().toByteArray()); - BitSet existingAclBits = - BitSet.valueOf(existingAcl.getRights().toByteArray()); - bits.and(existingAclBits); - - // This happens when the acl bitset asked to remove is not set for - // matched name and type. - // Like a case we have 444 permission, 333 is asked to removed. - if (bits.equals(ZERO_BITSET)) { - return false; - } - - // We have some matching. Remove them. - bits.xor(existingAclBits); - - // If existing acl has same bitset as passed acl bitset, remove that - // acl from the list - if (bits.equals(ZERO_BITSET)) { - getAcls().remove(existingAcl); - } else { - // Remove old acl and add new acl. - OzoneAclInfo newAcl = OzoneAclInfo.newBuilder() - .setType(ozoneAcl.getType()) - .setName(ozoneAcl.getName()) - .setAclScope(ozoneAcl.getAclScope()) - .setRights(ByteString.copyFrom(bits.toByteArray())) - .build(); - getAcls().remove(existingAcl); - getAcls().add(newAcl); - } - removed = true; - break; - } - } - - return removed; + public boolean removeAcl(OzoneAcl acl) { + return OzoneAclUtil.removeAcl(acls, acl); } - /** - * Reset the existing acl list. - * @param ozoneAcls - * @return true - if successfully able to reset. - */ - public boolean setAcls(List ozoneAcls) { - this.acls.clear(); - this.acls = ozoneAcls; - return true; + public boolean setAcls(List newAcls) { + return OzoneAclUtil.setAcl(acls, newAcls); } - - /** * Builder of OmKeyInfo. */ @@ -368,11 +263,12 @@ public static class Builder { private HddsProtos.ReplicationFactor factor; private Map metadata; private FileEncryptionInfo encInfo; - private List acls; + private List acls; public Builder() { this.metadata = new HashMap<>(); omKeyLocationInfoGroups = new ArrayList<>(); + acls = new ArrayList<>(); } public Builder setVolumeName(String volume) { @@ -436,9 +332,10 @@ public Builder setFileEncryptionInfo(FileEncryptionInfo feInfo) { return this; } - public Builder setAcls(List listOfAcls) { - this.acls = new ArrayList<>(); - this.acls.addAll(listOfAcls); + public Builder setAcls(List listOfAcls) { + if (listOfAcls != null) { + this.acls.addAll(listOfAcls); + } return this; } @@ -466,13 +363,11 @@ public KeyInfo getProtobuf() { .setLatestVersion(latestVersion) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)); + .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .addAllAcls(OzoneAclUtil.toProtobuf(acls)); if (encInfo != null) { kb.setFileEncryptionInfo(OMPBHelper.convert(encInfo)); } - if(acls != null) { - kb.addAllAcls(acls); - } return kb.build(); } @@ -492,7 +387,8 @@ public static OmKeyInfo getFromProtobuf(KeyInfo keyInfo) { .addAllMetadata(KeyValueUtil.getFromProtobuf(keyInfo.getMetadataList())) .setFileEncryptionInfo(keyInfo.hasFileEncryptionInfo() ? OMPBHelper.convert(keyInfo.getFileEncryptionInfo()): null) - .setAcls(keyInfo.getAclsList()).build(); + .setAcls(OzoneAclUtil.fromProtobuf(keyInfo.getAclsList())) + .build(); } @Override @@ -514,7 +410,8 @@ public boolean equals(Object o) { .equals(keyLocationVersions, omKeyInfo.keyLocationVersions) && type == omKeyInfo.type && factor == omKeyInfo.factor && - Objects.equals(metadata, omKeyInfo.metadata); + Objects.equals(metadata, omKeyInfo.metadata) && + Objects.equals(acls, omKeyInfo.acls); } @Override diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java index ac836fe479c47..b4f0d1679f806 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java @@ -28,7 +28,6 @@ .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.security.UserGroupInformation; import java.util.BitSet; @@ -235,12 +234,12 @@ private boolean checkAccessForOzoneAclType(OzoneAclType identityType, switch (identityType) { case USER: - return OzoneUtils.checkIfAclBitIsSet(acl, getAcl(identityType, + return OzoneAclUtil.checkIfAclBitIsSet(acl, getAcl(identityType, ugi.getUserName())); case GROUP: // Check access for user groups. for (String userGroup : ugi.getGroupNames()) { - if (OzoneUtils.checkIfAclBitIsSet(acl, getAcl(identityType, + if (OzoneAclUtil.checkIfAclBitIsSet(acl, getAcl(identityType, userGroup))) { // Return true if any user group has required permission. return true; @@ -249,7 +248,7 @@ private boolean checkAccessForOzoneAclType(OzoneAclType identityType, break; default: // For type WORLD and ANONYMOUS we set acl type as name. - if(OzoneUtils.checkIfAclBitIsSet(acl, getAcl(identityType, + if(OzoneAclUtil.checkIfAclBitIsSet(acl, getAcl(identityType, identityType.name()))) { return true; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java index d625a81be5357..26b5b1d7c7d66 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java @@ -27,7 +27,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; /** * Wrapper class for Ozone prefix path info, currently mainly target for ACL but @@ -54,6 +53,18 @@ public List getAcls() { return acls; } + public boolean addAcl(OzoneAcl acl) { + return OzoneAclUtil.addAcl(acls, acl); + } + + public boolean removeAcl(OzoneAcl acl) { + return OzoneAclUtil.removeAcl(acls, acl); + } + + public boolean setAcls(List newAcls) { + return OzoneAclUtil.setAcl(acls, newAcls); + } + /** * Returns the name of the prefix path. * @return name of the prefix path. @@ -86,7 +97,9 @@ public Builder() { } public Builder setAcls(List listOfAcls) { - this.acls = listOfAcls; + if (listOfAcls != null) { + acls.addAll(listOfAcls); + } return this; } @@ -114,7 +127,6 @@ public OmPrefixInfo.Builder addAllMetadata( */ public OmPrefixInfo build() { Preconditions.checkNotNull(name); - Preconditions.checkNotNull(acls); return new OmPrefixInfo(name, acls, metadata); } } @@ -124,9 +136,10 @@ public OmPrefixInfo build() { */ public PrefixInfo getProtobuf() { PrefixInfo.Builder pib = PrefixInfo.newBuilder().setName(name) - .addAllAcls(acls.stream().map(OzoneAcl::toProtobuf) - .collect(Collectors.toList())) .addAllMetadata(KeyValueUtil.toProtobuf(metadata)); + if (acls != null) { + pib.addAllAcls(OzoneAclUtil.toProtobuf(acls)); + } return pib.build(); } @@ -137,13 +150,14 @@ public PrefixInfo getProtobuf() { */ public static OmPrefixInfo getFromProtobuf(PrefixInfo prefixInfo) { OmPrefixInfo.Builder opib = OmPrefixInfo.newBuilder() - .setName(prefixInfo.getName()) - .setAcls(prefixInfo.getAclsList().stream().map( - OzoneAcl::fromProtobuf).collect(Collectors.toList())); + .setName(prefixInfo.getName()); if (prefixInfo.getMetadataList() != null) { opib.addAllMetadata(KeyValueUtil .getFromProtobuf(prefixInfo.getMetadataList())); } + if (prefixInfo.getAclsList() != null) { + opib.setAcls(OzoneAclUtil.fromProtobuf(prefixInfo.getAclsList())); + } return opib.build(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java new file mode 100644 index 0000000000000..fd42fea9ac062 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java @@ -0,0 +1,286 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.helpers; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.hadoop.ozone.security.acl.RequestContext; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE; + +/** + * Helper class for ozone acls operations. + */ +public final class OzoneAclUtil { + + private OzoneAclUtil(){ + } + + /** + * Helper function to get access acl list for current user. + * + * @param userName + * @param userGroups + * @return list of OzoneAcls + * */ + public static List getAclList(String userName, + List userGroups, ACLType userRights, ACLType groupRights) { + + List listOfAcls = new ArrayList<>(); + + // User ACL. + listOfAcls.add(new OzoneAcl(USER, userName, userRights, ACCESS)); + if(userGroups != null) { + // Group ACLs of the User. + userGroups.forEach((group) -> listOfAcls.add( + new OzoneAcl(GROUP, group, groupRights, ACCESS))); + } + return listOfAcls; + } + + /** + * Check if acl right requested for given RequestContext exist + * in provided acl list. + * Acl validation rules: + * 1. If user/group has ALL bit set than all user should have all rights. + * 2. If user/group has NONE bit set than user/group will not have any right. + * 3. For all other individual rights individual bits should be set. + * + * @param acls + * @param context + * @return return true if acl list contains right requsted in context. + * */ + public static boolean checkAclRight(List acls, + RequestContext context) throws OMException { + String[] userGroups = context.getClientUgi().getGroupNames(); + String userName = context.getClientUgi().getUserName(); + ACLType aclToCheck = context.getAclRights(); + for (OzoneAcl a : acls) { + if(checkAccessInAcl(a, userGroups, userName, aclToCheck)) { + return true; + } + } + return false; + } + + private static boolean checkAccessInAcl(OzoneAcl a, String[] groups, + String username, ACLType aclToCheck) { + BitSet rights = a.getAclBitSet(); + switch (a.getType()) { + case USER: + if (a.getName().equals(username)) { + return checkIfAclBitIsSet(aclToCheck, rights); + } + break; + case GROUP: + for (String grp : groups) { + if (a.getName().equals(grp)) { + return checkIfAclBitIsSet(aclToCheck, rights); + } + } + break; + + default: + return checkIfAclBitIsSet(aclToCheck, rights); + } + return false; + } + + /** + * Check if acl right requested for given RequestContext exist + * in provided acl list. + * Acl validation rules: + * 1. If user/group has ALL bit set than all user should have all rights. + * 2. If user/group has NONE bit set than user/group will not have any right. + * 3. For all other individual rights individual bits should be set. + * + * @param acls + * @param context + * @return return true if acl list contains right requsted in context. + * */ + public static boolean checkAclRights(List acls, + RequestContext context) throws OMException { + String[] userGroups = context.getClientUgi().getGroupNames(); + String userName = context.getClientUgi().getUserName(); + ACLType aclToCheck = context.getAclRights(); + for (OzoneAcl acl : acls) { + if (checkAccessInAcl(acl, userGroups, userName, aclToCheck)) { + return true; + } + } + return false; + } + + /** + * Helper function to check if bit for given acl is set. + * @param acl + * @param bitset + * @return True of acl bit is set else false. + * */ + public static boolean checkIfAclBitIsSet(IAccessAuthorizer.ACLType acl, + BitSet bitset) { + if (bitset == null) { + return false; + } + + return ((bitset.get(acl.ordinal()) + || bitset.get(ALL.ordinal())) + && !bitset.get(NONE.ordinal())); + } + + /** + * Helper function to inherit default ACL as access ACL for child object. + * 1. deep copy of OzoneAcl to avoid unexpected parent default ACL change + * 2. merge inherited access ACL with existing access ACL via + * OzoneUtils.addAcl(). + * @param acls + * @param parentAcls + * @return true if acls inherited DEFAULT acls from parentAcls successfully, + * false otherwise. + */ + public static boolean inheritDefaultAcls(List acls, + List parentAcls) { + List inheritedAcls = null; + if (parentAcls != null && !parentAcls.isEmpty()) { + inheritedAcls = parentAcls.stream() + .filter(a -> a.getAclScope() == DEFAULT) + .map(acl -> new OzoneAcl(acl.getType(), acl.getName(), + acl.getAclBitSet(), OzoneAcl.AclScope.ACCESS)) + .collect(Collectors.toList()); + } + if (inheritedAcls != null && !inheritedAcls.isEmpty()) { + inheritedAcls.stream().forEach(acl -> addAcl(acls, acl)); + return true; + } + return false; + } + + /** + * Convert a list of OzoneAclInfo(protoc) to list of OzoneAcl(java). + * @param protoAcls + * @return list of OzoneAcl. + */ + public static List fromProtobuf(List protoAcls) { + return protoAcls.stream().map(acl->OzoneAcl.fromProtobuf(acl)) + .collect(Collectors.toList()); + } + + /** + * Convert a list of OzoneAcl(java) to list of OzoneAclInfo(protoc). + * @param protoAcls + * @return list of OzoneAclInfo. + */ + public static List toProtobuf(List protoAcls) { + return protoAcls.stream().map(acl->OzoneAcl.toProtobuf(acl)) + .collect(Collectors.toList()); + } + + /** + * Add an OzoneAcl to existing list of OzoneAcls. + * @param existingAcls + * @param acl + * @return true if current OzoneAcls are changed, false otherwise. + */ + public static boolean addAcl(List existingAcls, OzoneAcl acl) { + if (existingAcls == null || acl == null) { + return false; + } + + for (OzoneAcl a: existingAcls) { + if (a.getName().equals(acl.getName()) && + a.getType().equals(acl.getType()) && + a.getAclScope().equals(acl.getAclScope())) { + BitSet current = a.getAclBitSet(); + BitSet original = (BitSet) current.clone(); + current.or(acl.getAclBitSet()); + if (current.equals(original)) { + return false; + } + return true; + } + } + + existingAcls.add(acl); + return true; + } + + /** + * remove OzoneAcl from existing list of OzoneAcls. + * @param existingAcls + * @param acl + * @return true if current OzoneAcls are changed, false otherwise. + */ + public static boolean removeAcl(List existingAcls, OzoneAcl acl) { + if (existingAcls == null || existingAcls.isEmpty() || acl == null) { + return false; + } + + for (OzoneAcl a: existingAcls) { + if (a.getName().equals(acl.getName()) && + a.getType().equals(acl.getType()) && + a.getAclScope().equals(acl.getAclScope())) { + BitSet current = a.getAclBitSet(); + BitSet original = (BitSet) current.clone(); + current.andNot(acl.getAclBitSet()); + + if (current.equals(original)) { + return false; + } + + if (current.isEmpty()) { + existingAcls.remove(a); + } + return true; + } + } + return false; + } + + /** + * Set existingAcls to newAcls. + * @param existingAcls + * @param newAcls + * @return true if newAcls are set successfully, false otherwise. + */ + public static boolean setAcl(List existingAcls, + List newAcls) { + if (existingAcls == null) { + return false; + } else { + existingAcls.clear(); + if (newAcls != null) { + existingAcls.addAll(newAcls); + } + } + return true; + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java index 2a0fb9d9b93aa..954cab1e62b93 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java @@ -23,36 +23,19 @@ import java.nio.charset.Charset; import java.text.ParseException; import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.BitSet; -import java.util.Collection; -import java.util.List; import java.util.Locale; import java.util.TimeZone; import java.util.UUID; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.ratis.util.TimeDuration; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE; - /** * Set of Utility functions used in ozone. */ @@ -254,145 +237,4 @@ public static long getTimeDurationInMS(Configuration conf, String key, .toLong(TimeUnit.MILLISECONDS); } - /** - * Helper function to get access acl list for current user. - * - * @param userName - * @param userGroups - * @return listOfAcls - * */ - public static List getAclList(String userName, - List userGroups, ACLType userRights, ACLType groupRights) { - - List listOfAcls = new ArrayList<>(); - - // User ACL. - listOfAcls.add(new OzoneAcl(USER, userName, userRights, ACCESS)); - if(userGroups != null) { - // Group ACLs of the User. - userGroups.forEach((group) -> listOfAcls.add( - new OzoneAcl(GROUP, group, groupRights, ACCESS))); - } - return listOfAcls; - } - - /** - * Check if acl right requested for given RequestContext exist - * in provided acl list. - * Acl validation rules: - * 1. If user/group has ALL bit set than all user should have all rights. - * 2. If user/group has NONE bit set than user/group will not have any right. - * 3. For all other individual rights individual bits should be set. - * - * @param acls - * @param context - * @return return true if acl list contains right requsted in context. - * */ - public static boolean checkAclRight(List acls, - RequestContext context) throws OMException { - String[] userGroups = context.getClientUgi().getGroupNames(); - String userName = context.getClientUgi().getUserName(); - ACLType aclToCheck = context.getAclRights(); - for (OzoneAclInfo a : acls) { - if(checkAccessInAcl(a, userGroups, userName, aclToCheck)) { - return true; - } - } - return false; - } - - private static boolean checkAccessInAcl(OzoneAclInfo a, String[] groups, - String username, ACLType aclToCheck) { - BitSet rights = BitSet.valueOf(a.getRights().toByteArray()); - switch (a.getType()) { - case USER: - if (a.getName().equals(username)) { - return checkIfAclBitIsSet(aclToCheck, rights); - } - break; - case GROUP: - for (String grp : groups) { - // TODO: Convert ozone acls to proto map format for efficient - // acl checks. - if (a.getName().equals(grp)) { - return checkIfAclBitIsSet(aclToCheck, rights); - } - } - break; - - default: - return checkIfAclBitIsSet(aclToCheck, rights); - } - return false; - } - - /** - * Check if acl right requested for given RequestContext exist - * in provided acl list. - * Acl validation rules: - * 1. If user/group has ALL bit set than all user should have all rights. - * 2. If user/group has NONE bit set than user/group will not have any right. - * 3. For all other individual rights individual bits should be set. - * - * @param acls - * @param context - * @return return true if acl list contains right requsted in context. - * */ - public static boolean checkAclRights(List acls, - RequestContext context) throws OMException { - String[] userGroups = context.getClientUgi().getGroupNames(); - String userName = context.getClientUgi().getUserName(); - ACLType aclToCheck = context.getAclRights(); - // TODO: All ozone types should use one data type for acls. i.e Store - // and maintain acls in proto format only. - for (OzoneAcl a : acls) { - if (checkAccessInAcl(OzoneAcl.toProtobuf(a), userGroups, - userName, aclToCheck)) { - return true; - } - } - return false; - } - - /** - * Helper function to check if bit for given acl is set. - * @param acl - * @param bitset - * @return True of acl bit is set else false. - * */ - public static boolean checkIfAclBitIsSet(ACLType acl, BitSet bitset) { - if (bitset == null) { - return false; - } - - return ((bitset.get(acl.ordinal()) - || bitset.get(ALL.ordinal())) - && !bitset.get(NONE.ordinal())); - } - - /** - * Helper function to find and return all DEFAULT acls in input list with - * scope changed to ACCESS. - * @param acls - * - * @return list of default Acls. - * */ - public static Collection getDefaultAclsProto( - List acls) { - return acls.stream().filter(a -> a.getAclScope() == DEFAULT) - .map(OzoneAcl::toProtobufWithAccessType).collect(Collectors.toList()); - } - - /** - * Helper function to find and return all DEFAULT acls in input list with - * scope changed to ACCESS. - * @param acls - * - * @return list of default Acls. - * */ - public static Collection getDefaultAcls(List acls) { - return acls.stream().filter(a -> a.getAclScope() == DEFAULT) - .collect(Collectors.toList()); - } - } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java new file mode 100644 index 0000000000000..b1a4e4550c49b --- /dev/null +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java @@ -0,0 +1,191 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.helpers; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.BitSet; +import java.util.List; + +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * Test for OzoneAcls utility class. + */ +public class TestOzoneAclUtil { + + private static final List DEFAULT_ACLS = + getDefaultAcls(new OzoneConfiguration()); + + private static final OzoneAcl USER1 = new OzoneAcl(USER, "user1", + ACLType.READ_ACL, ACCESS); + + private static final OzoneAcl USER2 = new OzoneAcl(USER, "user2", + ACLType.WRITE, ACCESS); + + private static final OzoneAcl GROUP1 = new OzoneAcl(GROUP, "group1", + ACLType.ALL, ACCESS); + + @Test + public void testAddAcl() throws IOException { + List currentAcls = getDefaultAcls(new OzoneConfiguration()); + assertTrue(currentAcls.size() > 0); + + // Add new permission to existing acl entry. + OzoneAcl oldAcl = currentAcls.get(0); + OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), + ACLType.READ_ACL, ACCESS); + + addAndVerifyAcl(currentAcls, newAcl, true, DEFAULT_ACLS.size()); + // Add same permission again and verify result + addAndVerifyAcl(currentAcls, newAcl, false, DEFAULT_ACLS.size()); + + // Add a new user acl entry. + addAndVerifyAcl(currentAcls, USER1, true, DEFAULT_ACLS.size() + 1); + // Add same acl entry again and verify result + addAndVerifyAcl(currentAcls, USER1, false, DEFAULT_ACLS.size() + 1); + + // Add a new group acl entry. + addAndVerifyAcl(currentAcls, GROUP1, true, DEFAULT_ACLS.size() + 2); + // Add same acl entry again and verify result + addAndVerifyAcl(currentAcls, GROUP1, false, DEFAULT_ACLS.size() + 2); + } + + @Test + public void testRemoveAcl() { + List currentAcls = null; + + // add/remove to/from null OzoneAcls + removeAndVerifyAcl(currentAcls, USER1, false, 0); + addAndVerifyAcl(currentAcls, USER1, false, 0); + removeAndVerifyAcl(currentAcls, USER1, false, 0); + + currentAcls = getDefaultAcls(new OzoneConfiguration()); + assertTrue(currentAcls.size() > 0); + + // Add new permission to existing acl entru. + OzoneAcl oldAcl = currentAcls.get(0); + OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), + ACLType.READ_ACL, ACCESS); + + // Remove non existing acl entry + removeAndVerifyAcl(currentAcls, USER1, false, DEFAULT_ACLS.size()); + + // Remove non existing acl permission + removeAndVerifyAcl(currentAcls, newAcl, false, DEFAULT_ACLS.size()); + + // Add new permission to existing acl entry. + addAndVerifyAcl(currentAcls, newAcl, true, DEFAULT_ACLS.size()); + + // Remove the new permission added. + removeAndVerifyAcl(currentAcls, newAcl, true, DEFAULT_ACLS.size()); + + removeAndVerifyAcl(currentAcls, oldAcl, true, DEFAULT_ACLS.size() - 1); + } + + private void addAndVerifyAcl(List currentAcls, OzoneAcl addedAcl, + boolean expectedResult, int expectedSize) { + assertEquals(expectedResult, OzoneAclUtil.addAcl(currentAcls, addedAcl)); + if (currentAcls != null) { + boolean verified = verifyAclAdded(currentAcls, addedAcl); + assertTrue("addedAcl: " + addedAcl + " should exist in the" + + " current acls: " + currentAcls, verified); + assertEquals(expectedSize, currentAcls.size()); + } + } + + private void removeAndVerifyAcl(List currentAcls, + OzoneAcl removedAcl, boolean expectedResult, int expectedSize) { + assertEquals(expectedResult, OzoneAclUtil.removeAcl(currentAcls, + removedAcl)); + if (currentAcls != null) { + boolean verified = verifyAclRemoved(currentAcls, removedAcl); + assertTrue("removedAcl: " + removedAcl + " should not exist in the" + + " current acls: " + currentAcls, verified); + assertEquals(expectedSize, currentAcls.size()); + } + } + + private boolean verifyAclRemoved(List acls, OzoneAcl removedAcl) { + for (OzoneAcl acl : acls) { + if (acl.getName().equals(removedAcl.getName()) && + acl.getType().equals(removedAcl.getType()) && + acl.getAclScope().equals(removedAcl.getAclScope())) { + BitSet temp = (BitSet) acl.getAclBitSet().clone(); + temp.and(removedAcl.getAclBitSet()); + return !temp.equals(removedAcl.getAclBitSet()); + } + } + return true; + } + + private boolean verifyAclAdded(List acls, OzoneAcl newAcl) { + for (OzoneAcl acl : acls) { + if (acl.getName().equals(newAcl.getName()) && + acl.getType().equals(newAcl.getType()) && + acl.getAclScope().equals(newAcl.getAclScope())) { + BitSet temp = (BitSet) acl.getAclBitSet().clone(); + temp.and(newAcl.getAclBitSet()); + return temp.equals(newAcl.getAclBitSet()); + } + } + return false; + } + + /** + * Helper function to get default acl list for current user. + * + * @return list of ozoneAcls. + * @throws IOException + * */ + private static List getDefaultAcls(OzoneConfiguration conf) { + List ozoneAcls = new ArrayList<>(); + //User ACL + UserGroupInformation ugi; + try { + ugi = UserGroupInformation.getCurrentUser(); + } catch (IOException ioe) { + ugi = UserGroupInformation.createRemoteUser("user0"); + } + + OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); + IAccessAuthorizer.ACLType userRights = aclConfig.getUserDefaultRights(); + IAccessAuthorizer.ACLType groupRights = aclConfig.getGroupDefaultRights(); + + OzoneAclUtil.addAcl(ozoneAcls, new OzoneAcl(USER, + ugi.getUserName(), userRights, ACCESS)); + //Group ACLs of the User + List userGroups = Arrays.asList(ugi.getGroupNames()); + userGroups.stream().forEach((group) -> OzoneAclUtil.addAcl(ozoneAcls, + new OzoneAcl(GROUP, group, groupRights, ACCESS))); + return ozoneAcls; + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index ca506c638618b..84d17adc4ab21 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -144,14 +144,15 @@ public abstract class TestOzoneRpcClientAbstract { private static StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; private static String remoteUserName = "remoteUser"; + private static String remoteGroupName = "remoteGroup"; private static OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, READ, DEFAULT); - private static OzoneAcl defaultGroupAcl = new OzoneAcl(GROUP, remoteUserName, + private static OzoneAcl defaultGroupAcl = new OzoneAcl(GROUP, remoteGroupName, READ, DEFAULT); private static OzoneAcl inheritedUserAcl = new OzoneAcl(USER, remoteUserName, READ, ACCESS); private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP, - remoteUserName, READ, ACCESS); + remoteGroupName, READ, ACCESS); private static String scmId = UUID.randomUUID().toString(); @@ -2280,11 +2281,11 @@ private void validateDefaultAcls(OzoneObj parentObj, OzoneObj childObj, } } List acls = store.getAcl(parentObj); - assertTrue("Current acls:" + StringUtils.join(",", acls) + - " inheritedUserAcl:" + inheritedUserAcl, + assertTrue("Current acls: " + StringUtils.join(",", acls) + + " inheritedUserAcl: " + inheritedUserAcl, acls.contains(defaultUserAcl)); - assertTrue("Current acls:" + StringUtils.join(",", acls) + - " inheritedUserAcl:" + inheritedUserAcl, + assertTrue("Current acls: " + StringUtils.join(",", acls) + + " inheritedGroupAcl: " + inheritedGroupAcl, acls.contains(defaultGroupAcl)); acls = store.getAcl(childObj); @@ -2292,7 +2293,7 @@ private void validateDefaultAcls(OzoneObj parentObj, OzoneObj childObj, " inheritedUserAcl:" + inheritedUserAcl, acls.contains(inheritedUserAcl)); assertTrue("Current acls:" + StringUtils.join(",", acls) + - " inheritedUserAcl:" + inheritedUserAcl, + " inheritedGroupAcl:" + inheritedGroupAcl, acls.contains(inheritedGroupAcl)); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index a4106f3501c83..7ad1a05c2c568 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdds.scm.server.SCMConfigurator; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneTestUtils; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -67,6 +68,7 @@ import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -75,7 +77,6 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.ozone.security.acl.RequestContext; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; @@ -130,6 +131,7 @@ public static void setUp() throws Exception { conf = new OzoneConfiguration(); dir = GenericTestUtils.getRandomizedTestDir(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); + conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); mockScmBlockLocationProtocol = Mockito.mock(ScmBlockLocationProtocol.class); metadataManager = new OmMetadataManagerImpl(conf); volumeManager = new VolumeManagerImpl(metadataManager, conf); @@ -256,7 +258,7 @@ public void openKeyFailureInSafeMode() throws Exception { OmKeyArgs keyArgs = createBuilder() .setKeyName(KEY_NAME) .setDataSize(1000) - .setAcls(OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(), + .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(), ALL, ALL)) .build(); LambdaTestUtils.intercept(OMException.class, @@ -960,7 +962,7 @@ private OmKeyArgs.Builder createBuilder() throws IOException { .setFactor(ReplicationFactor.ONE) .setDataSize(0) .setType(ReplicationType.STAND_ALONE) - .setAcls(OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(), + .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(), ALL, ALL)) .setVolumeName(VOLUME_NAME); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java index a1b023e1aecb5..1fb3b001ff25d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java @@ -57,6 +57,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; @@ -1435,7 +1436,7 @@ private OmKeyInfo getNewOmKeyInfo() throws IOException { .setFactor(HddsProtos.ReplicationFactor.ONE) .setDataSize(0) .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setAcls(OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(), + .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(), ALL, ALL)) .setVolumeName("vol1") .setKeyName(UUID.randomUUID().toString()) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java index 6ecf7025058ce..301e6f5bfbe55 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java @@ -38,9 +38,9 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.junit.BeforeClass; @@ -178,7 +178,7 @@ private void createKey(String volume, .setFactor(HddsProtos.ReplicationFactor.ONE) .setDataSize(0) .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setAcls(OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(), + .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(), ALL, ALL)) .build(); diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java index fbbd3bed85cea..eb694c412f75a 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; @@ -434,7 +435,7 @@ public OutputStream newKeyWriter(KeyArgs args) throws IOException, .setDataSize(args.getSize()) .setType(xceiverClientManager.getType()) .setFactor(xceiverClientManager.getFactor()) - .setAcls(OzoneUtils.getAclList(args.getUserName(), + .setAcls(OzoneAclUtil.getAclList(args.getUserName(), args.getGroups() != null ? Arrays.asList(args.getGroups()) : null, ACLType.ALL, ACLType.ALL)) .build(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 8837c2d1e5704..8a32dd63550f3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -18,7 +18,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.BitSet; import java.util.List; import java.util.Objects; @@ -34,9 +33,9 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; @@ -45,12 +44,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope.*; /** * OM bucket manager. @@ -391,6 +388,7 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { } String volume = obj.getVolumeName(); String bucket = obj.getBucketName(); + boolean changed = false; metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); try { String dbBucketKey = metadataManager.getBucketKey(volume, bucket); @@ -402,43 +400,10 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { BUCKET_NOT_FOUND); } - // Case 1: When we are adding more rights to existing user/group. - boolean addToExistingAcl = false; - for(OzoneAcl a: bucketInfo.getAcls()) { - if(a.getName().equals(acl.getName()) && - a.getType().equals(acl.getType())) { - BitSet bits = (BitSet) acl.getAclBitSet().clone(); - bits.or(a.getAclBitSet()); - - if (bits.equals(a.getAclBitSet())) { - return false; - } - a.getAclBitSet().or(acl.getAclBitSet()); - addToExistingAcl = true; - break; - } - } - - // Case 2: When a completely new acl is added. - if(!addToExistingAcl) { - List newAcls = bucketInfo.getAcls(); - if(newAcls == null) { - newAcls = new ArrayList<>(); - } - newAcls.add(acl); - bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName(bucketInfo.getVolumeName()) - .setBucketName(bucketInfo.getBucketName()) - .setStorageType(bucketInfo.getStorageType()) - .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) - .setCreationTime(bucketInfo.getCreationTime()) - .setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo()) - .addAllMetadata(bucketInfo.getMetadata()) - .setAcls(newAcls) - .build(); + changed = bucketInfo.addAcl(acl); + if (changed) { + metadataManager.getBucketTable().put(dbBucketKey, bucketInfo); } - - metadataManager.getBucketTable().put(dbBucketKey, bucketInfo); } catch (IOException ex) { if (!(ex instanceof OMException)) { LOG.error("Add acl operation failed for bucket:{}/{} acl:{}", @@ -449,7 +414,7 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); } - return true; + return changed; } /** @@ -470,6 +435,7 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { } String volume = obj.getVolumeName(); String bucket = obj.getBucketName(); + boolean removed = false; metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); try { String dbBucketKey = metadataManager.getBucketKey(volume, bucket); @@ -480,33 +446,10 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { throw new OMException("Bucket " + bucket + " is not found", BUCKET_NOT_FOUND); } - - boolean removed = false; - // When we are removing subset of rights from existing acl. - for(OzoneAcl a: bucketInfo.getAcls()) { - if(a.getName().equals(acl.getName()) && - a.getType().equals(acl.getType())) { - BitSet bits = (BitSet) acl.getAclBitSet().clone(); - bits.and(a.getAclBitSet()); - - if (bits.equals(ZERO_BITSET)) { - return false; - } - - a.getAclBitSet().xor(bits); - - if(a.getAclBitSet().equals(ZERO_BITSET)) { - bucketInfo.getAcls().remove(a); - } - removed = true; - break; - } - } - + removed = bucketInfo.removeAcl(acl); if (removed) { metadataManager.getBucketTable().put(dbBucketKey, bucketInfo); } - return removed; } catch (IOException ex) { if (!(ex instanceof OMException)) { LOG.error("Remove acl operation failed for bucket:{}/{} acl:{}", @@ -516,6 +459,7 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { } finally { metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); } + return removed; } /** @@ -546,18 +490,8 @@ public boolean setAcl(OzoneObj obj, List acls) throws IOException { throw new OMException("Bucket " + bucket + " is not found", BUCKET_NOT_FOUND); } - OmBucketInfo updatedBucket = OmBucketInfo.newBuilder() - .setVolumeName(bucketInfo.getVolumeName()) - .setBucketName(bucketInfo.getBucketName()) - .setStorageType(bucketInfo.getStorageType()) - .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) - .setCreationTime(bucketInfo.getCreationTime()) - .setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo()) - .addAllMetadata(bucketInfo.getMetadata()) - .setAcls(acls) - .build(); - - metadataManager.getBucketTable().put(dbBucketKey, updatedBucket); + bucketInfo.setAcls(acls); + metadataManager.getBucketTable().put(dbBucketKey, bucketInfo); } catch (IOException ex) { if (!(ex instanceof OMException)) { LOG.error("Set acl operation failed for bucket:{}/{} acl:{}", @@ -567,7 +501,6 @@ public boolean setAcl(OzoneObj obj, List acls) throws IOException { } finally { metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); } - return true; } @@ -634,7 +567,7 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) throw new OMException("Bucket " + bucket + " is not found", BUCKET_NOT_FOUND); } - boolean hasAccess = OzoneUtils.checkAclRights(bucketInfo.getAcls(), + boolean hasAccess = OzoneAclUtil.checkAclRights(bucketInfo.getAcls(), context); LOG.debug("user:{} has access rights for bucket:{} :{} ", context.getClientUgi(), ozObject.getBucketName(), hasAccess); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index d3e957c92a04a..b58095f934a58 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -20,7 +20,6 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; -import java.util.BitSet; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; @@ -33,11 +32,9 @@ import java.util.concurrent.TimeUnit; import java.security.GeneralSecurityException; import java.security.PrivilegedExceptionAction; -import java.util.stream.Collectors; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; -import com.google.protobuf.ByteString; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; @@ -58,9 +55,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; @@ -74,14 +68,14 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.helpers.OmPartInfo; +import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.ozone.common.BlockGroup; @@ -542,6 +536,7 @@ private OmKeyInfo prepareMultipartKeyInfo(OmKeyArgs args, long size, * @param type * @param size * @param encInfo + * @param omBucketInfo * @return */ private OmKeyInfo createKeyInfo(OmKeyArgs keyArgs, @@ -562,34 +557,7 @@ private OmKeyInfo createKeyInfo(OmKeyArgs keyArgs, .setReplicationType(type) .setReplicationFactor(factor) .setFileEncryptionInfo(encInfo); - List acls = new ArrayList<>(); - if(keyArgs.getAcls() != null) { - acls.addAll(keyArgs.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); - } - - // Inherit DEFAULT acls from prefix. - boolean prefixParentFound = false; - if(prefixManager != null) { - List prefixList = prefixManager.getLongestPrefixPath( - OZONE_URI_DELIMITER + - keyArgs.getVolumeName() + OZONE_URI_DELIMITER + - keyArgs.getBucketName() + OZONE_URI_DELIMITER + - keyArgs.getKeyName()); - - if(prefixList.size() > 0) { - // Add all acls from direct parent to key. - OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1); - if(prefixInfo != null) { - acls.addAll(OzoneUtils.getDefaultAclsProto(prefixInfo.getAcls())); - prefixParentFound = true; - } - } - } - if(!prefixParentFound && omBucketInfo != null) { - acls.addAll(OzoneUtils.getDefaultAclsProto(omBucketInfo.getAcls())); - } - builder.setAcls(acls); + builder.setAcls(getAclsForKey(keyArgs, omBucketInfo)); return builder.build(); } @@ -926,7 +894,7 @@ private OmMultipartInfo createMultipartInfo(OmKeyArgs keyArgs, .setReplicationFactor(keyArgs.getFactor()) .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, locations))) - .setAcls(getAclsForKey(keyArgs, null, bucketInfo)) + .setAcls(getAclsForKey(keyArgs, bucketInfo)) .build(); DBStore store = metadataManager.getStore(); try (BatchOperation batch = store.initBatchOperation()) { @@ -951,15 +919,15 @@ private OmMultipartInfo createMultipartInfo(OmKeyArgs keyArgs, } } - private List getAclsForKey(OmKeyArgs keyArgs, - OmVolumeArgs volArgs, OmBucketInfo bucketInfo) { - List acls = new ArrayList<>(keyArgs.getAcls().size()); + private List getAclsForKey(OmKeyArgs keyArgs, + OmBucketInfo bucketInfo) { + List acls = new ArrayList<>(); - keyArgs.getAcls().stream().map(OzoneAcl::toProtobuf). - collect(Collectors.toList()); + if(keyArgs.getAcls() != null) { + acls.addAll(keyArgs.getAcls()); + } // Inherit DEFAULT acls from prefix. - boolean prefixParentFound = false; if(prefixManager != null) { List prefixList = prefixManager.getLongestPrefixPath( OZONE_URI_DELIMITER + @@ -971,21 +939,22 @@ private List getAclsForKey(OmKeyArgs keyArgs, // Add all acls from direct parent to key. OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1); if(prefixInfo != null) { - acls.addAll(OzoneUtils.getDefaultAclsProto(prefixInfo.getAcls())); - prefixParentFound = true; + if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls())) { + return acls; + } } } } // Inherit DEFAULT acls from bucket only if DEFAULT acls for // prefix are not set. - if (!prefixParentFound && bucketInfo != null) { - acls.addAll(bucketInfo.getAcls().stream().filter(a -> a.getAclScope() - .equals(OzoneAcl.AclScope.DEFAULT)) - .map(OzoneAcl::toProtobufWithAccessType) - .collect(Collectors.toList())); + if (bucketInfo != null) { + if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls())) { + return acls; + } } + // TODO: do we need to further fallback to volume default ACL return acls; } @@ -1201,8 +1170,7 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( .setDataSize(size) .setOmKeyLocationInfos( Collections.singletonList(keyLocationInfoGroup)) - .setAcls(omKeyArgs.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())).build(); + .setAcls(omKeyArgs.getAcls()).build(); } else { // Already a version exists, so we should add it as a new version. // But now as versioning is not supported, just following the commit @@ -1394,6 +1362,8 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { String volume = obj.getVolumeName(); String bucket = obj.getBucketName(); String keyName = obj.getKeyName(); + boolean changed = false; + metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); try { @@ -1404,43 +1374,13 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND); } - List newAcls = new ArrayList<>(keyInfo.getAcls()); - OzoneAclInfo newAcl = null; - for(OzoneAclInfo a: keyInfo.getAcls()) { - if (a.getName().equals(acl.getName()) && - a.getType().equals(OzoneAclType.valueOf(acl.getType().name()))) { - BitSet currentAcls = BitSet.valueOf(a.getRights().toByteArray()); - currentAcls.or(acl.getAclBitSet()); - - newAcl = OzoneAclInfo.newBuilder() - .setType(a.getType()) - .setName(a.getName()) - .setAclScope(a.getAclScope()) - .setRights(ByteString.copyFrom(currentAcls.toByteArray())) - .build(); - newAcls.remove(a); - newAcls.add(newAcl); - break; - } + if (keyInfo.getAcls() == null) { + keyInfo.setAcls(new ArrayList<>()); } - if(newAcl == null) { - newAcls.add(OzoneAcl.toProtobuf(acl)); + changed = keyInfo.addAcl(acl); + if (changed) { + metadataManager.getKeyTable().put(objectKey, keyInfo); } - - OmKeyInfo newObj = new OmKeyInfo.Builder() - .setBucketName(keyInfo.getBucketName()) - .setKeyName(keyInfo.getKeyName()) - .setReplicationFactor(keyInfo.getFactor()) - .setReplicationType(keyInfo.getType()) - .setVolumeName(keyInfo.getVolumeName()) - .setOmKeyLocationInfos(keyInfo.getKeyLocationVersions()) - .setCreationTime(keyInfo.getCreationTime()) - .setModificationTime(keyInfo.getModificationTime()) - .setAcls(newAcls) - .setDataSize(keyInfo.getDataSize()) - .setFileEncryptionInfo(keyInfo.getFileEncryptionInfo()) - .build(); - metadataManager.getKeyTable().put(objectKey, newObj); } catch (IOException ex) { if (!(ex instanceof OMException)) { LOG.error("Add acl operation failed for key:{}/{}/{}", volume, @@ -1450,7 +1390,7 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { } finally { metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); } - return true; + return changed; } /** @@ -1467,6 +1407,7 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { String volume = obj.getVolumeName(); String bucket = obj.getBucketName(); String keyName = obj.getKeyName(); + boolean changed = false; metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); try { @@ -1477,50 +1418,10 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND); } - List newAcls = new ArrayList<>(keyInfo.getAcls()); - OzoneAclInfo newAcl = OzoneAcl.toProtobuf(acl); - - if(newAcls.contains(OzoneAcl.toProtobuf(acl))) { - newAcls.remove(newAcl); - } else { - // Acl to be removed might be a subset of existing acls. - for(OzoneAclInfo a: keyInfo.getAcls()) { - if (a.getName().equals(acl.getName()) && - a.getType().equals(OzoneAclType.valueOf(acl.getType().name()))) { - BitSet currentAcls = BitSet.valueOf(a.getRights().toByteArray()); - acl.getAclBitSet().xor(currentAcls); - currentAcls.and(acl.getAclBitSet()); - newAcl = OzoneAclInfo.newBuilder() - .setType(a.getType()) - .setName(a.getName()) - .setAclScope(a.getAclScope()) - .setRights(ByteString.copyFrom(currentAcls.toByteArray())) - .build(); - newAcls.remove(a); - newAcls.add(newAcl); - break; - } - } - if(newAcl == null) { - newAcls.add(OzoneAcl.toProtobuf(acl)); - } + changed = keyInfo.removeAcl(acl); + if (changed) { + metadataManager.getKeyTable().put(objectKey, keyInfo); } - - OmKeyInfo newObj = new OmKeyInfo.Builder() - .setBucketName(keyInfo.getBucketName()) - .setKeyName(keyInfo.getKeyName()) - .setReplicationFactor(keyInfo.getFactor()) - .setReplicationType(keyInfo.getType()) - .setVolumeName(keyInfo.getVolumeName()) - .setOmKeyLocationInfos(keyInfo.getKeyLocationVersions()) - .setCreationTime(keyInfo.getCreationTime()) - .setModificationTime(keyInfo.getModificationTime()) - .setAcls(newAcls) - .setDataSize(keyInfo.getDataSize()) - .setFileEncryptionInfo(keyInfo.getFileEncryptionInfo()) - .build(); - - metadataManager.getKeyTable().put(objectKey, newObj); } catch (IOException ex) { if (!(ex instanceof OMException)) { LOG.error("Remove acl operation failed for key:{}/{}/{}", volume, @@ -1530,7 +1431,7 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { } finally { metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); } - return true; + return changed; } /** @@ -1547,6 +1448,7 @@ public boolean setAcl(OzoneObj obj, List acls) throws IOException { String volume = obj.getVolumeName(); String bucket = obj.getBucketName(); String keyName = obj.getKeyName(); + boolean changed = false; metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); try { @@ -1557,25 +1459,11 @@ public boolean setAcl(OzoneObj obj, List acls) throws IOException { throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND); } - List newAcls = new ArrayList<>(); - for (OzoneAcl a : acls) { - newAcls.add(OzoneAcl.toProtobuf(a)); - } - OmKeyInfo newObj = new OmKeyInfo.Builder() - .setBucketName(keyInfo.getBucketName()) - .setKeyName(keyInfo.getKeyName()) - .setReplicationFactor(keyInfo.getFactor()) - .setReplicationType(keyInfo.getType()) - .setVolumeName(keyInfo.getVolumeName()) - .setOmKeyLocationInfos(keyInfo.getKeyLocationVersions()) - .setCreationTime(keyInfo.getCreationTime()) - .setModificationTime(keyInfo.getModificationTime()) - .setAcls(newAcls) - .setDataSize(keyInfo.getDataSize()) - .setFileEncryptionInfo(keyInfo.getFileEncryptionInfo()) - .build(); + changed = keyInfo.setAcls(acls); - metadataManager.getKeyTable().put(objectKey, newObj); + if (changed) { + metadataManager.getKeyTable().put(objectKey, keyInfo); + } } catch (IOException ex) { if (!(ex instanceof OMException)) { LOG.error("Set acl operation failed for key:{}/{}/{}", volume, @@ -1585,7 +1473,7 @@ public boolean setAcl(OzoneObj obj, List acls) throws IOException { } finally { metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); } - return true; + return changed; } /** @@ -1610,11 +1498,7 @@ public List getAcl(OzoneObj obj) throws IOException { throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND); } - List acls = new ArrayList<>(); - for (OzoneAclInfo a : keyInfo.getAcls()) { - acls.add(OzoneAcl.fromProtobuf(a)); - } - return acls; + return keyInfo.getAcls(); } catch (IOException ex) { if (!(ex instanceof OMException)) { LOG.error("Get acl operation failed for key:{}/{}/{}", volume, @@ -1675,7 +1559,8 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) objectKey, KEY_NOT_FOUND); } - boolean hasAccess = OzoneUtils.checkAclRight(keyInfo.getAcls(), context); + boolean hasAccess = OzoneAclUtil.checkAclRight( + keyInfo.getAcls(), context); LOG.debug("user:{} has access rights for key:{} :{} ", context.getClientUgi(), ozObject.getKeyName(), hasAccess); return hasAccess; @@ -1838,8 +1723,7 @@ private OmKeyInfo createDirectoryKey(String volumeName, String bucketName, .setReplicationType(ReplicationType.RATIS) .setReplicationFactor(ReplicationFactor.ONE) .setFileEncryptionInfo(encInfo) - .setAcls(acls.stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) + .setAcls(acls) .build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java index 2c27f9f56ae5d..ed4c67ae029ef 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java @@ -21,24 +21,22 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.ozone.util.RadixNode; import org.apache.hadoop.ozone.util.RadixTree; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.utils.db.*; import org.apache.hadoop.utils.db.Table.KeyValue; +import org.apache.hadoop.utils.db.TableIterator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; -import java.util.BitSet; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PREFIX_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; @@ -236,7 +234,7 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) RadixNode lastNode = prefixTree.getLastNodeInPrefixPath(prefixPath); if (lastNode != null && lastNode.getValue() != null) { - boolean hasAccess = OzoneUtils.checkAclRights(lastNode.getValue(). + boolean hasAccess = OzoneAclUtil.checkAclRights(lastNode.getValue(). getAcls(), context); LOG.debug("user:{} has access rights for ozObj:{} ::{} ", context.getClientUgi(), ozObject, hasAccess); @@ -306,110 +304,33 @@ public void validateOzoneObj(OzoneObj obj) throws OMException { public OMPrefixAclOpResult addAcl(OzoneObj ozoneObj, OzoneAcl ozoneAcl, OmPrefixInfo prefixInfo) throws IOException { - List ozoneAclList = null; - if (prefixInfo != null) { - ozoneAclList = prefixInfo.getAcls(); + if (prefixInfo == null) { + prefixInfo = new OmPrefixInfo.Builder().setName(ozoneObj + .getPath()).build(); } + boolean changed = prefixInfo.addAcl(ozoneAcl); + if (changed) { + // update the in-memory prefix tree + prefixTree.insert(ozoneObj.getPath(), prefixInfo); - if (ozoneAclList == null) { - ozoneAclList = new ArrayList<>(); - ozoneAclList.add(ozoneAcl); - } else { - boolean addToExistingAcl = false; - for(OzoneAcl existingAcl: ozoneAclList) { - if(existingAcl.getName().equals(ozoneAcl.getName()) && - existingAcl.getType().equals(ozoneAcl.getType())) { - - BitSet bits = (BitSet) ozoneAcl.getAclBitSet().clone(); - - // We need to do "or" before comparision because think of a case like - // existing acl is 777 and newly added acl is 444, we have already - // that acl set. In this case if we do direct check they will not - // be equal, but if we do or and then check, we shall know it - // has acl's already set or not. - bits.or(existingAcl.getAclBitSet()); - - if (bits.equals(existingAcl.getAclBitSet())) { - return new OMPrefixAclOpResult(null, false); - } else { - existingAcl.getAclBitSet().or(ozoneAcl.getAclBitSet()); - addToExistingAcl = true; - break; - } - } + if (!isRatisEnabled) { + metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); } - if (!addToExistingAcl) { - ozoneAclList.add(ozoneAcl); - } - } - OmPrefixInfo.Builder upiBuilder = OmPrefixInfo.newBuilder(); - upiBuilder.setName(ozoneObj.getPath()).setAcls(ozoneAclList); - if (prefixInfo != null && prefixInfo.getMetadata() != null) { - upiBuilder.addAllMetadata(prefixInfo.getMetadata()); - } - prefixInfo = upiBuilder.build(); - - // update the in-memory prefix tree - prefixTree.insert(ozoneObj.getPath(), prefixInfo); - - if (!isRatisEnabled) { - metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); } - return new OMPrefixAclOpResult(prefixInfo, true); + return new OMPrefixAclOpResult(prefixInfo, changed); } public OMPrefixAclOpResult removeAcl(OzoneObj ozoneObj, OzoneAcl ozoneAcl, OmPrefixInfo prefixInfo) throws IOException { - List list = null; - if (prefixInfo != null) { - list = prefixInfo.getAcls(); - } - - if (list == null) { - return new OMPrefixAclOpResult(null, false); - } - boolean removed = false; - for (OzoneAcl existingAcl: list) { - if (existingAcl.getName().equals(ozoneAcl.getName()) - && existingAcl.getType() == ozoneAcl.getType()) { - BitSet bits = (BitSet) ozoneAcl.getAclBitSet().clone(); - bits.and(existingAcl.getAclBitSet()); - - // This happens when the acl bitset is not existing for current name - // and type. - // Like a case we have 444 permission, 333 is asked to removed. - if (bits.equals(ZERO_BITSET)) { - removed = false; - break; - } - - // We have some matching. Remove them. - existingAcl.getAclBitSet().xor(bits); - - // If existing acl has same bitset as passed acl bitset, remove that - // acl from the list - if (existingAcl.getAclBitSet().equals(ZERO_BITSET)) { - list.remove(existingAcl); - } - removed = true; - break; - } + if (prefixInfo != null) { + removed = prefixInfo.removeAcl(ozoneAcl); } // Nothing is matching to remove. - if (!removed) { - return new OMPrefixAclOpResult(null, false); - } else { - OmPrefixInfo.Builder upiBuilder = OmPrefixInfo.newBuilder(); - upiBuilder.setName(ozoneObj.getPath()).setAcls(list); - if (prefixInfo != null && prefixInfo.getMetadata() != null) { - upiBuilder.addAllMetadata(prefixInfo.getMetadata()); - } - prefixInfo = upiBuilder.build(); - + if (removed) { // Update in-memory prefix tree. - if (list.isEmpty()) { + if (prefixInfo.getAcls().isEmpty()) { prefixTree.removePrefixPath(ozoneObj.getPath()); if (!isRatisEnabled) { metadataManager.getPrefixTable().delete(ozoneObj.getPath()); @@ -420,58 +341,51 @@ public OMPrefixAclOpResult removeAcl(OzoneObj ozoneObj, OzoneAcl ozoneAcl, metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); } } - return new OMPrefixAclOpResult(prefixInfo, true); } + return new OMPrefixAclOpResult(prefixInfo, removed); } public OMPrefixAclOpResult setAcl(OzoneObj ozoneObj, List ozoneAcls, OmPrefixInfo prefixInfo) throws IOException { - OmPrefixInfo.Builder upiBuilder = OmPrefixInfo.newBuilder(); - List aclsToBeSet = new ArrayList<>(ozoneAcls.size()); - aclsToBeSet.addAll(ozoneAcls); - upiBuilder.setName(ozoneObj.getPath()); - if (prefixInfo != null && prefixInfo.getMetadata() != null) { - upiBuilder.addAllMetadata(prefixInfo.getMetadata()); + if (prefixInfo == null) { + prefixInfo = new OmPrefixInfo.Builder().setName(ozoneObj + .getPath()).build(); } - // Inherit DEFAULT acls from prefix. - boolean prefixParentFound = false; - List prefixList = getLongestPrefixPathHelper( - prefixTree.getLongestPrefix(ozoneObj.getPath())); - - if (prefixList.size() > 0) { - // Add all acls from direct parent to key. - OmPrefixInfo parentPrefixInfo = prefixList.get(prefixList.size() - 1); - if (parentPrefixInfo != null) { - aclsToBeSet.addAll(OzoneUtils.getDefaultAcls( - parentPrefixInfo.getAcls())); - prefixParentFound = true; + boolean changed = prefixInfo.setAcls(ozoneAcls); + if (changed) { + List aclsToBeSet = prefixInfo.getAcls(); + // Inherit DEFAULT acls from prefix. + boolean prefixParentFound = false; + List prefixList = getLongestPrefixPathHelper( + prefixTree.getLongestPrefix(ozoneObj.getPath())); + + if (prefixList.size() > 0) { + // Add all acls from direct parent to key. + OmPrefixInfo parentPrefixInfo = prefixList.get(prefixList.size() - 1); + if (parentPrefixInfo != null) { + prefixParentFound = OzoneAclUtil.inheritDefaultAcls(aclsToBeSet, + parentPrefixInfo.getAcls()); + } } - } - // If no parent prefix is found inherit DEFULT acls from bucket. - if (!prefixParentFound) { - String bucketKey = metadataManager.getBucketKey(ozoneObj.getVolumeName(), - ozoneObj.getBucketName()); - OmBucketInfo bucketInfo = metadataManager.getBucketTable(). - get(bucketKey); - if (bucketInfo != null) { - bucketInfo.getAcls().forEach(a -> { - if (a.getAclScope().equals(OzoneAcl.AclScope.DEFAULT)) { - aclsToBeSet.add(new OzoneAcl(a.getType(), a.getName(), - a.getAclBitSet(), OzoneAcl.AclScope.ACCESS)); - } - }); + // If no parent prefix is found inherit DEFAULT acls from bucket. + if (!prefixParentFound) { + String bucketKey = metadataManager.getBucketKey(ozoneObj + .getVolumeName(), ozoneObj.getBucketName()); + OmBucketInfo bucketInfo = metadataManager.getBucketTable(). + get(bucketKey); + if (bucketInfo != null) { + OzoneAclUtil.inheritDefaultAcls(aclsToBeSet, bucketInfo.getAcls()); + } } - } - - prefixInfo = upiBuilder.setAcls(aclsToBeSet).build(); - prefixTree.insert(ozoneObj.getPath(), prefixInfo); - if (!isRatisEnabled) { - metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); + prefixTree.insert(ozoneObj.getPath(), prefixInfo); + if (!isRatisEnabled) { + metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); + } } - return new OMPrefixAclOpResult(prefixInfo, true); + return new OMPrefixAclOpResult(prefixInfo, changed); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index beafeab340cd9..a85ea8b9f038b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -26,6 +26,11 @@ import com.google.common.base.Optional; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,10 +43,6 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponse; @@ -237,7 +238,7 @@ private OmKeyInfo createDirectoryKeyInfo(OzoneManager ozoneManager, .setReplicationType(HddsProtos.ReplicationType.RATIS) .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) .setFileEncryptionInfo(encryptionInfo.orNull()) - .setAcls(keyArgs.getAclsList()) + .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())) .build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 266abf5059b73..a32c0a789ff1b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -32,6 +32,12 @@ import com.google.common.base.Optional; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,11 +57,6 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.ScmClient; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; @@ -352,7 +353,7 @@ protected OmKeyInfo createKeyInfo(@Nonnull KeyArgs keyArgs, .setReplicationFactor(factor) .setFileEncryptionInfo(encInfo); if(keyArgs.getAclsList() != null) { - builder.setAcls(keyArgs.getAclsList()); + builder.setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); } return builder.build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java index a129334ab43d1..8d69a24590540 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java @@ -22,6 +22,7 @@ import java.util.List; import com.google.common.collect.Lists; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -29,7 +30,6 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -43,14 +43,15 @@ public class OMKeyAddAclRequest extends OMKeyAclRequest { LoggerFactory.getLogger(OMKeyAddAclRequest.class); private String path; - private List ozoneAcls; + private List ozoneAcls; public OMKeyAddAclRequest(OMRequest omRequest) { super(omRequest); OzoneManagerProtocolProtos.AddAclRequest addAclRequest = getOmRequest().getAddAclRequest(); path = addAclRequest.getObj().getPath(); - ozoneAcls = Lists.newArrayList(addAclRequest.getAcl()); + ozoneAcls = Lists.newArrayList( + OzoneAcl.fromProtobuf(addAclRequest.getAcl())); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java index 81d59d010a570..0bd81d31a227e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java @@ -22,6 +22,7 @@ import java.util.List; import com.google.common.collect.Lists; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -29,7 +30,6 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; @@ -43,14 +43,15 @@ public class OMKeyRemoveAclRequest extends OMKeyAclRequest { LoggerFactory.getLogger(OMKeyAddAclRequest.class); private String path; - private List ozoneAcls; + private List ozoneAcls; public OMKeyRemoveAclRequest(OMRequest omRequest) { super(omRequest); OzoneManagerProtocolProtos.RemoveAclRequest removeAclRequest = getOmRequest().getRemoveAclRequest(); path = removeAclRequest.getObj().getPath(); - ozoneAcls = Lists.newArrayList(removeAclRequest.getAcl()); + ozoneAcls = Lists.newArrayList( + OzoneAcl.fromProtobuf(removeAclRequest.getAcl())); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java index 977060805d465..24d46f83debef 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java @@ -21,14 +21,16 @@ import java.io.IOException; import java.util.List; +import com.google.common.collect.Lists; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse; @@ -42,14 +44,15 @@ public class OMKeySetAclRequest extends OMKeyAclRequest { LoggerFactory.getLogger(OMKeyAddAclRequest.class); private String path; - private List ozoneAcls; + private List ozoneAcls; public OMKeySetAclRequest(OMRequest omRequest) { super(omRequest); OzoneManagerProtocolProtos.SetAclRequest setAclRequest = getOmRequest().getSetAclRequest(); path = setAclRequest.getObj().getPath(); - ozoneAcls = setAclRequest.getAclList(); + ozoneAcls = Lists.newArrayList( + OzoneAclUtil.fromProtobuf(setAclRequest.getAclList())); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index 94637be429e0f..181d79c104929 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -163,7 +164,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setReplicationFactor(keyArgs.getFactor()) .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setAcls(keyArgs.getAclsList()) + .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())) .build(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index f263c79efd329..cfcedc425fd31 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -23,6 +23,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -231,7 +232,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setDataSize(size) .setOmKeyLocationInfos( Collections.singletonList(keyLocationInfoGroup)) - .setAcls(keyArgs.getAclsList()).build(); + .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())) + .build(); } else { // Already a version exists, so we should add it as a new version. // But now as versioning is not supported, just following the commit diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index c2d62278b8828..46250fca54584 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -403,10 +403,11 @@ private DBUpdatesResponse getOMDBUpdates( private GetAclResponse getAcl(GetAclRequest req) throws IOException { List acls = new ArrayList<>(); - List aclList = impl.getAcl(OzoneObjInfo.fromProtobuf(req.getObj())); - aclList.forEach(a -> acls.add(OzoneAcl.toProtobuf(a))); + if (aclList != null) { + aclList.forEach(a -> acls.add(OzoneAcl.toProtobuf(a))); + } return GetAclResponse.newBuilder().addAllAcls(acls).build(); } From 34dd9ee36674be670013d4fc3d9b8f5b36886812 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Wed, 7 Aug 2019 13:29:30 +0900 Subject: [PATCH 0803/1308] HADOOP-16494. Add SHA-512 checksum to release artifact to comply with the release distribution policy. This closes #1243 --- dev-support/bin/create-release | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release index 35447982903e2..d14c0073a5f17 100755 --- a/dev-support/bin/create-release +++ b/dev-support/bin/create-release @@ -641,7 +641,7 @@ function signartifacts for i in ${ARTIFACTS_DIR}/*; do ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}" - ${GPG} --print-mds "${i}" > "${i}.mds" + sha512sum --tag "${i}" > "${i}.sha512" done if [[ "${ASFRELEASE}" = true ]]; then From 52c77bc1607421037f6f84f695f607bb89b97cb6 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Thu, 22 Aug 2019 08:57:22 +0530 Subject: [PATCH 0804/1308] HDFS-14741. RBF: RecoverLease should be return false when the file is open in multiple destination. Contributed by xuzq --- .../router/RouterClientProtocol.java | 2 +- .../router/TestRouterRpcMultiDestination.java | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 4d8527f906960..95787a1b01802 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -363,7 +363,7 @@ public boolean recoverLease(String src, String clientName) new Class[] {String.class, String.class}, new RemoteParam(), clientName); Object result = rpcClient.invokeSequential( - locations, method, Boolean.class, Boolean.TRUE); + locations, method, Boolean.class, null); return (boolean) result; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 306a45550660c..62e90770eb6c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; @@ -38,6 +39,7 @@ import java.util.Set; import java.util.TreeSet; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -55,6 +57,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.test.GenericTestUtils; @@ -230,6 +233,28 @@ public void testProxyRenameFiles() throws IOException, InterruptedException { testRename2(getRouterContext(), filename1, renamedFile, false); } + /** + * Test recoverLease when the result is false. + */ + @Test + public void testRecoverLease() throws Exception { + Path testPath = new Path("/recovery/test_recovery_lease"); + DistributedFileSystem routerFs = + (DistributedFileSystem) getRouterFileSystem(); + FSDataOutputStream fsDataOutputStream = null; + try { + fsDataOutputStream = routerFs.create(testPath); + fsDataOutputStream.write("hello world".getBytes()); + fsDataOutputStream.hflush(); + + boolean result = routerFs.recoverLease(testPath); + assertFalse(result); + } finally { + IOUtils.closeStream(fsDataOutputStream); + routerFs.delete(testPath, true); + } + } + @Test public void testGetContentSummaryEc() throws Exception { DistributedFileSystem routerDFS = From 5e156b9ddec46d6b7d1336bb88136d8826972e7a Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 15 Aug 2019 11:28:50 +0900 Subject: [PATCH 0805/1308] HADOOP-16061. Upgrade Yetus to 0.10.0 This closes #1298 --- dev-support/bin/yetus-wrapper | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper index 692216e7500d8..b0f71f105d85e 100755 --- a/dev-support/bin/yetus-wrapper +++ b/dev-support/bin/yetus-wrapper @@ -77,7 +77,7 @@ WANTED="$1" shift ARGV=("$@") -HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.8.0} +HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.10.0} BIN=$(yetus_abs "${BASH_SOURCE-$0}") BINDIR=$(dirname "${BIN}") From 76790a1e671c3c10c6083d13fb4fb8b1b3326ccf Mon Sep 17 00:00:00 2001 From: Surendra Singh Lilhore Date: Thu, 22 Aug 2019 12:26:30 +0530 Subject: [PATCH 0806/1308] HDFS-14358. Provide LiveNode and DeadNode filter in DataNode UI. Contributed by hemanthboyina. --- .../src/main/webapps/hdfs/dfshealth.html | 10 ++++++++ .../src/main/webapps/hdfs/dfshealth.js | 23 +++++++++++++++++-- .../src/main/webapps/static/hadoop.css | 7 ++++++ 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 366c80fcc1375..df6e4e8797f0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -23,6 +23,11 @@ Namenode information + @@ -315,9 +320,11 @@

    +

    DataNode State

    + @@ -330,6 +337,7 @@ {#LiveNodes} + @@ -350,6 +358,7 @@ {/LiveNodes} {#DeadNodes} + @@ -357,6 +366,7 @@ + {/DeadNodes}
    State Node Http Address Last contact
    {state} {name} ({xferaddr}) {dnWebAddress} {lastContact}s
    {state} {name} ({xferaddr}) {#helper_relative_time value="{lastContact}"/}
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js index 4e8b362d9ae67..d12a9fbb20325 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js @@ -333,7 +333,11 @@ $('#tab-datanode').html(out); $('#table-datanodes').dataTable( { 'lengthMenu': [ [25, 50, 100, -1], [25, 50, 100, "All"] ], + 'columnDefs': [ + { 'targets': [ 0 ], 'visible': false, 'searchable': false } + ], 'columns': [ + { 'orderDataType': 'ng-value', 'searchable': true , "defaultContent": "" }, { 'orderDataType': 'ng-value', 'searchable': true , "defaultContent": "" }, { 'orderDataType': 'ng-value', 'searchable': true , "defaultContent": ""}, { 'orderDataType': 'ng-value', 'type': 'num' , "defaultContent": 0}, @@ -342,7 +346,22 @@ { 'type': 'num' , "defaultContent": 0}, { 'orderDataType': 'ng-value', 'type': 'num' , "defaultContent": 0}, { 'type': 'string' , "defaultContent": ""} - ]}); + ], + initComplete: function () { + var column = this.api().column([0]); + var select = $('') + .appendTo('#datanodefilter') + .on('change', function () { + var val = $.fn.dataTable.util.escapeRegex( + $(this).val()); + column.search(val ? '^' + val + '$' : '', true, false).draw(); + }); + console.log(select); + column.data().unique().sort().each(function (d, j) { + select.append(''); + }); + } + }); renderHistogram(data); $('#ui-tabs a[href="#tab-datanode"]').tab('show'); }); @@ -469,4 +488,4 @@ function open_hostip_list(x0, x1) { function close_hostip_list() { $("#datanode_ips").remove(); $("#close_ips").remove(); -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css index b3e79e2d5076d..e7a264c74537c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css @@ -314,6 +314,13 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand { left: 75px; } +.datanodestatus{ + width:75px; + height:30px; + color: #555; + display: inline-block; +} + .bar rect { fill: #5FA33F; } From ee7c261e1e81f836bb18ca7f92a72abb056faf8a Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Thu, 22 Aug 2019 18:37:16 +0900 Subject: [PATCH 0807/1308] HDFS-14763. Fix package name of audit log class in Dynamometer document (#1335) --- .../hadoop-dynamometer/src/site/markdown/Dynamometer.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-tools/hadoop-dynamometer/src/site/markdown/Dynamometer.md b/hadoop-tools/hadoop-dynamometer/src/site/markdown/Dynamometer.md index 39dd0dbbeef2c..fee569a58d474 100644 --- a/hadoop-tools/hadoop-dynamometer/src/site/markdown/Dynamometer.md +++ b/hadoop-tools/hadoop-dynamometer/src/site/markdown/Dynamometer.md @@ -144,7 +144,7 @@ via the `auditreplay.command-parser.class` configuration. One mapper will automa audit log file within the audit log directory specified at launch time. The default is a direct format, -`com.linkedin.dynamometer.workloadgenerator.audit.AuditLogDirectParser`. This accepts files in the format produced +`org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditLogDirectParser`. This accepts files in the format produced by a standard configuration audit logger, e.g. lines like: ``` 1970-01-01 00:00:42,000 INFO FSNamesystem.audit: allowed=true ugi=hdfs ip=/127.0.0.1 cmd=open src=/tmp/foo dst=null perm=null proto=rpc @@ -154,7 +154,7 @@ the Unix epoch) the start time of the audit traces. This is needed for all mappe example, if the above line was the first audit event, you would specify `auditreplay.log-start-time.ms=42000`. Within a file, the audit logs must be in order of ascending timestamp. -The other supported format is `com.linkedin.dynamometer.workloadgenerator.audit.AuditLogHiveTableParser`. This accepts +The other supported format is `org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditLogHiveTableParser`. This accepts files in the format produced by a Hive query with output fields, in order: * `relativeTimestamp`: event time offset, in milliseconds, from the start of the trace From 69ddb36876c0b3819e5409d83b27d18d1da89b22 Mon Sep 17 00:00:00 2001 From: Ewan Higgs Date: Thu, 22 Aug 2019 13:56:47 +0200 Subject: [PATCH 0808/1308] HADOOP-16193. Add extra S3A MPU test to see what happens if a file is created during the MPU. Contributed by Steve Loughran --- .../ITestS3AContractMultipartUploader.java | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java index 059312a8103a3..0ffe85b755192 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java @@ -17,20 +17,31 @@ */ package org.apache.hadoop.fs.contract.s3a; +import java.io.ByteArrayInputStream; +import java.util.HashMap; +import java.util.Map; + +import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.MultipartUploader; +import org.apache.hadoop.fs.PartHandle; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.UploadHandle; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.WriteOperationHelper; +import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; import static org.apache.hadoop.fs.s3a.S3ATestConstants.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; import static org.apache.hadoop.fs.s3a.scale.AbstractSTestS3AHugeFiles.DEFAULT_HUGE_PARTITION_SIZE; +import static org.apache.hadoop.test.LambdaTestUtils.eventually; /** * Test MultipartUploader with S3A. @@ -159,4 +170,47 @@ public void testDirectoryInTheWay() throws Exception { public void testMultipartUploadReverseOrder() throws Exception { ContractTestUtils.skip("skipped for speed"); } + + /** + * This creates and then deletes a zero-byte file while an upload + * is in progress, and verifies that the uploaded file is ultimately + * visible. + */ + @Test + public void testMultipartOverlapWithTransientFile() throws Throwable { + // until there's a way to explicitly ask for a multipart uploader from a + // specific FS, explicitly create one bonded to the raw FS. + describe("testMultipartOverlapWithTransientFile"); + S3AFileSystem fs = getFileSystem(); + Path path = path("testMultipartOverlapWithTransientFile"); + fs.delete(path, true); + MultipartUploader mpu = mpu(1); + UploadHandle upload1 = mpu.initialize(path); + byte[] dataset = dataset(1024, '0', 10); + final Map handles = new HashMap<>(); + LOG.info("Uploading multipart entry"); + PartHandle value = mpu.putPart(path, new ByteArrayInputStream(dataset), 1, + upload1, + dataset.length); + // upload 1K + handles.put(1, value); + // confirm the path is absent + ContractTestUtils.assertPathDoesNotExist(fs, + "path being uploaded", path); + // now create an empty file + ContractTestUtils.touch(fs, path); + final FileStatus touchStatus = fs.getFileStatus(path); + LOG.info("0-byte file has been created: {}", touchStatus); + fs.delete(path, false); + // now complete the upload + mpu.complete(path, handles, upload1); + + // wait for the data to arrive + eventually(timeToBecomeConsistentMillis(), 500, () -> { + FileStatus mpuStatus = fs.getFileStatus(path); + assertTrue("File is empty in " + mpuStatus, mpuStatus.getLen() > 0); + return mpuStatus; + }); + + } } From 61b2df23317767833b327e5f69dbe73e8d4f0fc1 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Thu, 22 Aug 2019 17:23:58 +0100 Subject: [PATCH 0809/1308] HADOOP-16470. Make last AWS credential provider in default auth chain EC2ContainerCredentialsProviderWrapper. Contributed by Steve Loughran. Contains HADOOP-16471. Restore (documented) fs.s3a.SharedInstanceProfileCredentialsProvider. Change-Id: I06b99b57459cac80bf743c5c54f04e59bb54c2f8 --- .../src/main/resources/core-default.xml | 4 +- .../s3a/SharedInstanceCredentialProvider.java | 44 +++++++++++++++++++ .../auth/IAMInstanceCredentialsProvider.java | 34 ++++++++------ .../site/markdown/tools/hadoop-aws/index.md | 4 +- 4 files changed, 68 insertions(+), 18 deletions(-) create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceCredentialProvider.java diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 05bba0eb8c135..2b78ede7568b1 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -1092,8 +1092,8 @@ configuration of AWS access key ID and secret access key in environment variables named AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY, as documented in the AWS SDK. - * com.amazonaws.auth.InstanceProfileCredentialsProvider: supports use - of instance profile credentials if running in an EC2 VM. + * org.apache.hadoop.fs.s3a.auth.IAMInstanceCredentialsProvider: picks up + IAM credentials of any EC2 VM or AWS container in which the process is running.
    diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceCredentialProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceCredentialProvider.java new file mode 100644 index 0000000000000..5eba675cb82c2 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceCredentialProvider.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.s3a.auth.IAMInstanceCredentialsProvider; +import org.apache.hadoop.fs.s3a.auth.NoAwsCredentialsException; + +/** + * This credential provider has jittered between existing and non-existing, + * but it turns up in documentation enough that it has been restored. + * It extends {@link IAMInstanceCredentialsProvider} to pick up its + * bindings, which are currently to use the + * {@code EC2ContainerCredentialsProviderWrapper} class for IAM and container + * authentication. + *

    + * When it fails to authenticate, it raises a + * {@link NoAwsCredentialsException} which can be recognized by retry handlers + * as a non-recoverable failure. + *

    + * It is implicitly public; marked evolving as we can change its semantics. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class SharedInstanceCredentialProvider extends + IAMInstanceCredentialsProvider { +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/IAMInstanceCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/IAMInstanceCredentialsProvider.java index 7ff451005e2a2..1bb30ed5c0dc9 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/IAMInstanceCredentialsProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/IAMInstanceCredentialsProvider.java @@ -24,38 +24,44 @@ import com.amazonaws.AmazonClientException; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.auth.InstanceProfileCredentialsProvider; +import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** - * This is going to be an IAM credential provider which performs - * async refresh for lower-latency on IO calls. - * Initially it does not do this, simply shares the single IAM instance - * across all instances. This makes it less expensive to declare. - * + * This is an IAM credential provider which wraps + * an {@code EC2ContainerCredentialsProviderWrapper} + * to provide credentials when the S3A connector is instantiated on AWS EC2 + * or the AWS container services. + *

    + * When it fails to authenticate, it raises a + * {@link NoAwsCredentialsException} which can be recognized by retry handlers + * as a non-recoverable failure. + *

    + * It is implicitly public; marked evolving as we can change its semantics. */ -@InterfaceAudience.Private -@InterfaceStability.Unstable +@InterfaceAudience.Public +@InterfaceStability.Evolving public class IAMInstanceCredentialsProvider implements AWSCredentialsProvider, Closeable { - private static final InstanceProfileCredentialsProvider INSTANCE = - InstanceProfileCredentialsProvider.getInstance(); + private final AWSCredentialsProvider provider = + new EC2ContainerCredentialsProviderWrapper(); public IAMInstanceCredentialsProvider() { } /** * Ask for the credentials. - * as it invariably means "you aren't running on EC2" + * Failure invariably means "you aren't running in an EC2 VM or AWS container". * @return the credentials + * @throws NoAwsCredentialsException on auth failure to indicate non-recoverable. */ @Override public AWSCredentials getCredentials() { try { - return INSTANCE.getCredentials(); + return provider.getCredentials(); } catch (AmazonClientException e) { throw new NoAwsCredentialsException("IAMInstanceCredentialsProvider", e.getMessage(), @@ -65,11 +71,11 @@ public AWSCredentials getCredentials() { @Override public void refresh() { - INSTANCE.refresh(); + provider.refresh(); } @Override public void close() throws IOException { - // until async, no-op. + // no-op. } } diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md index 704e49b138a2a..7b6eb834eb2b1 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md @@ -539,8 +539,8 @@ This means that the default S3A authentication chain can be defined as configuration of AWS access key ID and secret access key in environment variables named AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY, as documented in the AWS SDK. - * com.amazonaws.auth.InstanceProfileCredentialsProvider: supports use - of instance profile credentials if running in an EC2 VM. + * org.apache.hadoop.fs.s3a.auth.IAMInstanceCredentialsProvider: picks up + IAM credentials of any EC2 VM or AWS container in which the process is running. ``` From 63c295e29840587eb6eb4a0fa258c55002e3229a Mon Sep 17 00:00:00 2001 From: Erik Krogen Date: Thu, 22 Aug 2019 09:57:00 -0700 Subject: [PATCH 0810/1308] HDFS-14755. [Dynamometer] Enhance compatibility of Dynamometer with branch-2 builds. Contributed by Takanobu Asanuma. --- .../apache/hadoop/tools/dynamometer/SimulatedDataNodes.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/SimulatedDataNodes.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/SimulatedDataNodes.java index 8def3ff06346e..520077e0823cb 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/SimulatedDataNodes.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/SimulatedDataNodes.java @@ -109,8 +109,10 @@ public int run(String[] args) throws Exception { } System.out.println("DataNodes will connect to NameNode at " + nameNodeAdr); - System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, - DataNode.getStorageLocations(getConf()).get(0).getUri().getPath()); + String loc = DataNode.getStorageLocations(getConf()).get(0).toString(); + loc = loc.substring(loc.indexOf("]") + 1); // delete storage type + String path = new URI(loc).getPath(); + System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, path); SimulatedFSDataset.setFactory(getConf()); getConf().setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, STORAGE_CAPACITY); From e04dcfdc57434858884601ac647522f1160830f7 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 22 Aug 2019 10:22:38 -0700 Subject: [PATCH 0811/1308] HDFS-14583. FileStatus#toString() will throw IllegalArgumentException. Contributed by xuzq. --- .../hdfs/protocol/HdfsLocatedFileStatus.java | 2 +- .../hdfs/protocol/HdfsNamedFileStatus.java | 2 +- .../apache/hadoop/hdfs/web/TestJsonUtil.java | 45 +++++++++++++++++++ 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java index 1490e4e4ef624..bf4e0d2f9f16e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java @@ -108,7 +108,7 @@ public void setGroup(String group) { @Override public boolean isSymlink() { - return uSymlink != null; + return uSymlink != null && uSymlink.length > 0; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsNamedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsNamedFileStatus.java index 311f9d0e51947..9434423d721b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsNamedFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsNamedFileStatus.java @@ -95,7 +95,7 @@ public void setGroup(String group) { @Override public boolean isSymlink() { - return uSymlink != null; + return uSymlink != null && uSymlink.length > 0; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index 3ffc35fe6ed0e..2a3680cdb1a70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -21,6 +21,8 @@ import static org.apache.hadoop.fs.permission.AclEntryType.*; import static org.apache.hadoop.fs.permission.FsAction.*; import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import java.io.IOException; import java.util.EnumSet; @@ -47,6 +49,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus.Flags; import org.apache.hadoop.io.erasurecode.ECSchema; +import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; @@ -107,6 +110,48 @@ public void testHdfsFileStatusWithEcPolicy() throws IOException { Assert.assertEquals(fstatus, fs2); } + /** + * Verify isSymlink when symlink ie empty. + */ + @Test + public void testHdfsFileStatus() throws Exception { + HdfsFileStatus hdfsFileStatus = new HdfsFileStatus.Builder() + .replication(1) + .blocksize(1024) + .perm(new FsPermission((short) 777)) + .owner("owner") + .group("group") + .symlink(new byte[0]) + .path(new byte[0]) + .fileId(1010) + .isdir(true) + .build(); + + assertFalse(hdfsFileStatus.isSymlink()); + LambdaTestUtils.intercept(IOException.class, + "Path " + hdfsFileStatus.getPath() + " is not a symbolic link", + () -> hdfsFileStatus.getSymlink()); + + String expectString = new StringBuilder() + .append("HdfsLocatedFileStatus") + .append("{") + .append("path=" + null) + .append("; isDirectory=" + true) + .append("; modification_time=" + 0) + .append("; access_time=" + 0) + .append("; owner=" + "owner") + .append("; group=" + "group") + .append("; permission=" + "r----x--t") + .append("; isSymlink=" + false) + .append("; hasAcl=" + false) + .append("; isEncrypted=" + false) + .append("; isErasureCoded=" + false) + .append("}") + .toString(); + + assertEquals(expectString, hdfsFileStatus.toString()); + } + @Test public void testHdfsFileStatusWithoutEcPolicy() throws IOException { final long now = Time.now(); From 28fb4b527afec93926127a93e4b94a157c0f64f1 Mon Sep 17 00:00:00 2001 From: avijayanhwx <14299376+avijayanhwx@users.noreply.github.com> Date: Thu, 22 Aug 2019 10:57:22 -0700 Subject: [PATCH 0812/1308] HDDS-2008 : Wrong package for RatisHelper class in hadoop-hdds/common module. (#1333) --- .../java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java | 2 +- .../java/org/apache/{ => hadoop/hdds}/ratis/RatisHelper.java | 3 ++- .../java/org/apache/{ => hadoop/hdds}/ratis/package-info.java | 2 +- .../common/transport/server/ratis/XceiverServerRatis.java | 2 +- .../commandhandler/TestCloseContainerCommandHandler.java | 2 +- .../hadoop/hdds/scm/pipeline/RatisPipelineProvider.java | 2 +- .../apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java | 2 +- .../test/java/org/apache/hadoop/ozone/RatisTestHelper.java | 4 ++-- 8 files changed, 10 insertions(+), 9 deletions(-) rename hadoop-hdds/common/src/main/java/org/apache/{ => hadoop/hdds}/ratis/RatisHelper.java (99%) rename hadoop-hdds/common/src/main/java/org/apache/{ => hadoop/hdds}/ratis/package-info.java (95%) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index 98cba0b10392d..e2f0299ce3c5a 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.util.Time; -import org.apache.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.ratis.client.RaftClient; import org.apache.ratis.grpc.GrpcTlsConfig; import org.apache.ratis.proto.RaftProtos; diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java similarity index 99% rename from hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index 557815b815a41..318a94b63f3ca 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.ratis; +package org.apache.hadoop.hdds.ratis; import java.io.IOException; import java.util.ArrayList; @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.ratis.RaftConfigKeys; import org.apache.ratis.client.RaftClient; import org.apache.ratis.client.RaftClientConfigKeys; import org.apache.ratis.conf.RaftProperties; diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java similarity index 95% rename from hadoop-hdds/common/src/main/java/org/apache/ratis/package-info.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java index c13c20c60604b..e52dc7ffc70bb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/ratis/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.ratis; +package org.apache.hadoop.hdds.ratis; /** * This package contains classes related to Apache Ratis. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 4cb4cbbd3796e..b4021cf657b31 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -44,7 +44,7 @@ import io.opentracing.Scope; import org.apache.ratis.RaftConfigKeys; -import org.apache.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.grpc.GrpcConfigKeys; import org.apache.ratis.grpc.GrpcFactory; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java index f80247089b27b..219b504a9afac 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java @@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.ratis.client.RaftClient; import org.apache.ratis.protocol.RaftGroup; import org.apache.ratis.protocol.RaftGroupId; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index d3b02e6253aaf..14fde0785a4fa 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.io.MultipleIOException; -import org.apache.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.ratis.client.RaftClient; import org.apache.ratis.grpc.GrpcTlsConfig; import org.apache.ratis.protocol.RaftClientReply; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java index 85e4c37185158..d9aec34b8bd24 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.ratis.client.RaftClient; import org.apache.ratis.grpc.GrpcTlsConfig; import org.apache.ratis.protocol.RaftGroup; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java index da06c59d190e9..a1243e87f7bb8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.ratis.client.RaftClient; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.rpc.RpcType; @@ -43,7 +43,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.ratis.RatisHelper.newRaftClient; +import static org.apache.hadoop.hdds.ratis.RatisHelper.newRaftClient; /** * Helpers for Ratis tests. From 4028cac56d469c566f2dbad9e9f11c36c53f5ee9 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Thu, 22 Aug 2019 15:00:17 -0700 Subject: [PATCH 0813/1308] HDDS-1347. In OM HA getS3Secret call Should happen only leader OM. (#670) --- .../org/apache/hadoop/ozone/OzoneConsts.java | 1 + .../apache/hadoop/ozone/audit/OMAction.java | 4 +- .../ozone/om/exceptions/OMException.java | 5 +- .../src/main/proto/OzoneManagerProtocol.proto | 14 ++ .../hadoop/ozone/TestSecureOzoneCluster.java | 12 +- .../apache/hadoop/ozone/om/OzoneManager.java | 8 + .../s3/security/S3GetSecretRequest.java | 193 ++++++++++++++++++ .../om/request/s3/security/package-info.java | 22 ++ .../s3/security/S3GetSecretResponse.java | 56 +++++ .../om/response/s3/security/package-info.java | 22 ++ 10 files changed, 333 insertions(+), 4 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/package-info.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index d9b33d8341b60..80e9260c71573 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -268,6 +268,7 @@ private OzoneConsts() { public static final String PART_NUMBER_MARKER = "partNumberMarker"; public static final String MAX_PARTS = "maxParts"; public static final String S3_BUCKET = "s3Bucket"; + public static final String S3_GETSECRET_USER = "S3GetSecretUser"; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java index ebcd4390954c0..97d4afc46ed98 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java @@ -69,7 +69,9 @@ public enum OMAction implements AuditAction { CREATE_DIRECTORY, CREATE_FILE, LOOKUP_FILE, - LIST_STATUS; + LIST_STATUS, + + GET_S3_SECRET; @Override public String getAction() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java index 1e291edfcdc4b..268471a62c3ec 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java @@ -207,7 +207,10 @@ public enum ResultCodes { RATIS_ERROR, // Error in Ratis server - INVALID_PATH_IN_ACL_REQUEST // Error code when path name is invalid during + INVALID_PATH_IN_ACL_REQUEST, // Error code when path name is invalid during // acl requests. + + USER_MISMATCH // Error code when requested user name passed is different + // from remote user. } } diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index ded16070bf35e..7d5f0987bf59c 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -156,6 +156,8 @@ message OMRequest { optional GetAclRequest getAclRequest = 78; optional PurgeKeysRequest purgeKeysRequest = 81; + + optional UpdateGetS3SecretRequest updateGetS3SecretRequest = 82; } message OMResponse { @@ -287,6 +289,9 @@ enum Status { RATIS_ERROR = 52; INVALID_PATH_IN_ACL_REQUEST = 53; // Invalid path name in acl request. + + USER_MISMATCH = 54; // Error code when requested user name passed is + // different from remote user. } @@ -1050,6 +1055,15 @@ message GetS3SecretResponse { required S3Secret s3Secret = 2; } +/** + This will be used internally by OM to replicate S3 Secret across quorum of + OM's. +*/ +message UpdateGetS3SecretRequest { + required string kerberosID = 1; + required string awsSecret = 2; +} + /** The OM service that takes care of Ozone namespace. */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 853b6a2c309cd..709c43f43c09a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -104,6 +104,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import static org.slf4j.event.Level.INFO; /** @@ -689,11 +690,11 @@ public void testGetS3Secret() throws Exception { //Creates a secret since it does not exist S3SecretValue firstAttempt = omClient - .getS3Secret("HADOOP/JOHNDOE"); + .getS3Secret(UserGroupInformation.getCurrentUser().getUserName()); //Fetches the secret from db since it was created in previous step S3SecretValue secondAttempt = omClient - .getS3Secret("HADOOP/JOHNDOE"); + .getS3Secret(UserGroupInformation.getCurrentUser().getUserName()); //secret fetched on both attempts must be same assertTrue(firstAttempt.getAwsSecret() @@ -703,6 +704,13 @@ public void testGetS3Secret() throws Exception { assertTrue(firstAttempt.getAwsAccessKey() .equals(secondAttempt.getAwsAccessKey())); + + try { + omClient.getS3Secret("HADOOP/JOHNDOE"); + fail("testGetS3Secret failed"); + } catch (IOException ex) { + GenericTestUtils.assertExceptionContains("USER_MISMATCH", ex); + } } finally { if(om != null){ om.stop(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index dbd5d39881186..bbbd61cada223 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -2683,6 +2683,14 @@ public void deleteS3Bucket(String s3BucketName) throws IOException { * {@inheritDoc} */ public S3SecretValue getS3Secret(String kerberosID) throws IOException{ + UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser(); + + // Check whether user name passed is matching with the current user or not. + if (!user.getUserName().equals(kerberosID)) { + throw new OMException("User mismatch. Requested user name is " + + "mismatched " + kerberosID +", with current user " + + user.getUserName(), OMException.ResultCodes.USER_MISMATCH); + } return s3SecretManager.getS3Secret(kerberosID); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java new file mode 100644 index 0000000000000..60f808c55aacc --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java @@ -0,0 +1,193 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.security; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import com.google.common.base.Optional; +import org.apache.commons.codec.digest.DigestUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.S3SecretValue; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.s3.security.S3GetSecretResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UpdateGetS3SecretRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Secret; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheValue; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_SECRET_LOCK; + +/** + * Handles GetS3Secret request. + */ +public class S3GetSecretRequest extends OMClientRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(S3GetSecretRequest.class); + + public S3GetSecretRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + GetS3SecretRequest s3GetSecretRequest = + getOmRequest().getGetS3SecretRequest(); + + // Generate S3 Secret to be used by OM quorum. + String kerberosID = s3GetSecretRequest.getKerberosID(); + + UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser(); + if (!user.getUserName().equals(kerberosID)) { + throw new OMException("User mismatch. Requested user name is " + + "mismatched " + kerberosID +", with current user " + + user.getUserName(), OMException.ResultCodes.USER_MISMATCH); + } + + String s3Secret = DigestUtils.sha256Hex(OmUtils.getSHADigest()); + + UpdateGetS3SecretRequest updateGetS3SecretRequest = + UpdateGetS3SecretRequest.newBuilder() + .setAwsSecret(s3Secret) + .setKerberosID(kerberosID).build(); + + // Client issues GetS3Secret request, when received by OM leader + // it will generate s3Secret. Original GetS3Secret request is + // converted to UpdateGetS3Secret request with the generated token + // information. This updated request will be submitted to Ratis. In this + // way S3Secret created by leader, will be replicated across all + // OMs. With this approach, original GetS3Secret request from + // client does not need any proto changes. + OMRequest.Builder omRequest = OMRequest.newBuilder() + .setUserInfo(getUserInfo()) + .setUpdateGetS3SecretRequest(updateGetS3SecretRequest) + .setCmdType(getOmRequest().getCmdType()) + .setClientId(getOmRequest().getClientId()); + + if (getOmRequest().hasTraceID()) { + omRequest.setTraceID(getOmRequest().getTraceID()); + } + + return omRequest.build(); + + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long transactionLogIndex, + OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { + + + OMClientResponse omClientResponse = null; + OMResponse.Builder omResponse = OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.GetS3Secret) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setSuccess(true); + boolean acquiredLock = false; + IOException exception = null; + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + UpdateGetS3SecretRequest updateGetS3SecretRequest = + getOmRequest().getUpdateGetS3SecretRequest(); + String kerberosID = updateGetS3SecretRequest.getKerberosID(); + try { + String awsSecret = updateGetS3SecretRequest.getAwsSecret(); + acquiredLock = + omMetadataManager.getLock().acquireLock(S3_SECRET_LOCK, kerberosID); + + S3SecretValue s3SecretValue = + omMetadataManager.getS3SecretTable().get(kerberosID); + + // If s3Secret for user is not in S3Secret table, add the Secret to cache. + if (s3SecretValue == null) { + omMetadataManager.getS3SecretTable().addCacheEntry( + new CacheKey<>(kerberosID), + new CacheValue<>(Optional.of(new S3SecretValue(kerberosID, + awsSecret)), transactionLogIndex)); + } else { + // If it already exists, use the existing one. + awsSecret = s3SecretValue.getAwsSecret(); + } + + GetS3SecretResponse.Builder getS3SecretResponse = GetS3SecretResponse + .newBuilder().setS3Secret(S3Secret.newBuilder() + .setAwsSecret(awsSecret).setKerberosID(kerberosID)); + + if (s3SecretValue == null) { + omClientResponse = + new S3GetSecretResponse(new S3SecretValue(kerberosID, awsSecret), + omResponse.setGetS3SecretResponse(getS3SecretResponse).build()); + } else { + // As when it already exists, we don't need to add to DB again. So + // set the value to null. + omClientResponse = new S3GetSecretResponse(null, + omResponse.setGetS3SecretResponse(getS3SecretResponse).build()); + } + + } catch (IOException ex) { + exception = ex; + omClientResponse = new S3GetSecretResponse(null, + createErrorOMResponse(omResponse, ex)); + } finally { + if (omClientResponse != null) { + omClientResponse.setFlushFuture(ozoneManagerDoubleBufferHelper.add( + omClientResponse, transactionLogIndex)); + } + if (acquiredLock) { + omMetadataManager.getLock().releaseLock(S3_SECRET_LOCK, kerberosID); + } + } + + + Map auditMap = new HashMap<>(); + auditMap.put(OzoneConsts.S3_GETSECRET_USER, kerberosID); + + // audit log + auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( + OMAction.GET_S3_SECRET, auditMap, + exception, getOmRequest().getUserInfo())); + + if (exception == null) { + LOG.debug("Secret for accessKey:{} is generated Successfully", + kerberosID); + } else { + LOG.error("Secret for accessKey:{} is generation failed", kerberosID, + exception); + } + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/package-info.java new file mode 100644 index 0000000000000..94a6b116869b8 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Package contains classes related to S3 security requests. + */ +package org.apache.hadoop.ozone.om.request.s3.security; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java new file mode 100644 index 0000000000000..61e20160e2390 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.security; + +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.S3SecretValue; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.utils.db.BatchOperation; + +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import java.io.IOException; + +/** + * Response for GetS3Secret request. + */ +public class S3GetSecretResponse extends OMClientResponse { + + + private S3SecretValue s3SecretValue; + + public S3GetSecretResponse(@Nullable S3SecretValue s3SecretValue, + @Nonnull OMResponse omResponse) { + super(omResponse); + this.s3SecretValue = s3SecretValue; + } + + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + + if (s3SecretValue != null && + getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { + omMetadataManager.getS3SecretTable().putWithBatch(batchOperation, + s3SecretValue.getKerberosID(), s3SecretValue); + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java new file mode 100644 index 0000000000000..d9024d1c85f27 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Package contains classes related to S3 security responses. + */ +package org.apache.hadoop.ozone.om.request.s3.security; From 93daf69f90df650a6c5fb33f79e51878ad8985c9 Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Thu, 22 Aug 2019 15:19:40 -0700 Subject: [PATCH 0814/1308] HDFS-14675. Increase Balancer Defaults Further. Contributed by Stephen O'Donnell. Signed-off-by: Wei-Chiu Chuang Reviewed-by: Gabor Bota --- .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 ++-- .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 15f5a417cb167..9bd124627f818 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -110,11 +110,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY = HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY; public static final long DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = - 10 * 1024*1024; + 100 * 1024*1024; public static final String DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY = "dfs.datanode.balance.max.concurrent.moves"; public static final int - DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT = 50; + DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT = 100; @Deprecated public static final String DFS_DATANODE_READAHEAD_BYTES_KEY = HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index ee24c7a7b44b6..79811aad7bf5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -990,7 +990,7 @@ dfs.datanode.balance.bandwidthPerSec - 10m + 100m Specifies the maximum amount of bandwidth that each datanode can utilize for the balancing purpose in term of @@ -4068,7 +4068,7 @@ dfs.datanode.balance.max.concurrent.moves - 50 + 100 Maximum number of threads for Datanode balancer pending moves. This value is reconfigurable via the "dfsadmin -reconfig" command. From b67812ea2111fa11bdd76096b923c93e1bdf2923 Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Fri, 23 Aug 2019 01:09:57 +0100 Subject: [PATCH 0815/1308] HDFS-14617. Improve fsimage load time by writing sub-sections to the fsimage index (#1028). Contributed by Stephen O'Donnell. Reviewed-by: He Xiaoqiao --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 16 ++ .../hadoop/hdfs/server/namenode/FSImage.java | 3 +- .../server/namenode/FSImageFormatPBINode.java | 239 +++++++++++++++-- .../namenode/FSImageFormatProtobuf.java | 252 +++++++++++++++++- .../snapshot/FSImageFormatPBSnapshot.java | 9 +- .../src/main/resources/hdfs-default.xml | 51 ++++ .../hdfs/server/namenode/FSImageTestUtil.java | 23 ++ .../hdfs/server/namenode/TestFSImage.java | 154 ++++++++++- .../namenode/TestFSImageWithSnapshot.java | 3 +- 9 files changed, 719 insertions(+), 31 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 9bd124627f818..95806defc603b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -883,6 +883,22 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_IMAGE_TRANSFER_CHUNKSIZE_KEY = "dfs.image.transfer.chunksize"; public static final int DFS_IMAGE_TRANSFER_CHUNKSIZE_DEFAULT = 64 * 1024; + public static final String DFS_IMAGE_PARALLEL_LOAD_KEY = + "dfs.image.parallel.load"; + public static final boolean DFS_IMAGE_PARALLEL_LOAD_DEFAULT = true; + + public static final String DFS_IMAGE_PARALLEL_TARGET_SECTIONS_KEY = + "dfs.image.parallel.target.sections"; + public static final int DFS_IMAGE_PARALLEL_TARGET_SECTIONS_DEFAULT = 12; + + public static final String DFS_IMAGE_PARALLEL_INODE_THRESHOLD_KEY = + "dfs.image.parallel.inode.threshold"; + public static final int DFS_IMAGE_PARALLEL_INODE_THRESHOLD_DEFAULT = 1000000; + + public static final String DFS_IMAGE_PARALLEL_THREADS_KEY = + "dfs.image.parallel.threads"; + public static final int DFS_IMAGE_PARALLEL_THREADS_DEFAULT = 4; + // Edit Log segment transfer timeout public static final String DFS_EDIT_LOG_TRANSFER_TIMEOUT_KEY = "dfs.edit.log.transfer.timeout"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index cfba091976eb5..cea18b7f00382 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -985,7 +985,8 @@ void saveFSImage(SaveNamespaceContext context, StorageDirectory sd, File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid); File dstFile = NNStorage.getStorageFile(sd, dstType, txid); - FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context); + FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context, + conf); FSImageCompression compression = FSImageCompression.createCompression(conf); long numErrors = saver.save(newFile, compression); if (numErrors > 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index 6825a5c4857db..d84e8c5b1fc00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -25,6 +25,11 @@ import java.util.Collection; import java.util.Iterator; import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReentrantLock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -90,6 +95,8 @@ public final class FSImageFormatPBINode { private static final Logger LOG = LoggerFactory.getLogger(FSImageFormatPBINode.class); + private static final int DIRECTORY_ENTRY_BATCH_SIZE = 1000; + // the loader must decode all fields referencing serial number based fields // via to methods with the string table. public final static class Loader { @@ -197,16 +204,66 @@ public static void updateBlocksMap(INodeFile file, BlockManager bm) { private final FSDirectory dir; private final FSNamesystem fsn; private final FSImageFormatProtobuf.Loader parent; + private ReentrantLock cacheNameMapLock; + private ReentrantLock blockMapLock; Loader(FSNamesystem fsn, final FSImageFormatProtobuf.Loader parent) { this.fsn = fsn; this.dir = fsn.dir; this.parent = parent; + cacheNameMapLock = new ReentrantLock(true); + blockMapLock = new ReentrantLock(true); + } + + void loadINodeDirectorySectionInParallel(ExecutorService service, + ArrayList sections, String compressionCodec) + throws IOException { + LOG.info("Loading the INodeDirectory section in parallel with {} sub-" + + "sections", sections.size()); + CountDownLatch latch = new CountDownLatch(sections.size()); + final CopyOnWriteArrayList exceptions = + new CopyOnWriteArrayList<>(); + for (FileSummary.Section s : sections) { + service.submit(() -> { + InputStream ins = null; + try { + ins = parent.getInputStreamForSection(s, + compressionCodec); + loadINodeDirectorySection(ins); + } catch (Exception e) { + LOG.error("An exception occurred loading INodeDirectories in " + + "parallel", e); + exceptions.add(new IOException(e)); + } finally { + latch.countDown(); + try { + if (ins != null) { + ins.close(); + } + } catch (IOException ioe) { + LOG.warn("Failed to close the input stream, ignoring", ioe); + } + } + }); + } + try { + latch.await(); + } catch (InterruptedException e) { + LOG.error("Interrupted waiting for countdown latch", e); + throw new IOException(e); + } + if (exceptions.size() != 0) { + LOG.error("{} exceptions occurred loading INodeDirectories", + exceptions.size()); + throw exceptions.get(0); + } + LOG.info("Completed loading all INodeDirectory sub-sections"); } void loadINodeDirectorySection(InputStream in) throws IOException { final List refList = parent.getLoaderContext() .getRefList(); + ArrayList inodeList = new ArrayList<>(); while (true) { INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry .parseDelimitedFrom(in); @@ -217,33 +274,159 @@ void loadINodeDirectorySection(InputStream in) throws IOException { INodeDirectory p = dir.getInode(e.getParent()).asDirectory(); for (long id : e.getChildrenList()) { INode child = dir.getInode(id); - addToParent(p, child); + if (addToParent(p, child)) { + if (child.isFile()) { + inodeList.add(child); + } + if (inodeList.size() >= DIRECTORY_ENTRY_BATCH_SIZE) { + addToCacheAndBlockMap(inodeList); + inodeList.clear(); + } + } else { + LOG.warn("Failed to add the inode {} to the directory {}", + child.getId(), p.getId()); + } } + for (int refId : e.getRefChildrenList()) { INodeReference ref = refList.get(refId); - addToParent(p, ref); + if (addToParent(p, ref)) { + if (ref.isFile()) { + inodeList.add(ref); + } + if (inodeList.size() >= DIRECTORY_ENTRY_BATCH_SIZE) { + addToCacheAndBlockMap(inodeList); + inodeList.clear(); + } + } else { + LOG.warn("Failed to add the inode reference {} to the directory {}", + ref.getId(), p.getId()); + } + } + } + addToCacheAndBlockMap(inodeList); + } + + private void addToCacheAndBlockMap(ArrayList inodeList) { + try { + cacheNameMapLock.lock(); + for (INode i : inodeList) { + dir.cacheName(i); + } + } finally { + cacheNameMapLock.unlock(); + } + + try { + blockMapLock.lock(); + for (INode i : inodeList) { + updateBlocksMap(i.asFile(), fsn.getBlockManager()); } + } finally { + blockMapLock.unlock(); } } void loadINodeSection(InputStream in, StartupProgress prog, Step currentStep) throws IOException { - INodeSection s = INodeSection.parseDelimitedFrom(in); - fsn.dir.resetLastInodeId(s.getLastInodeId()); - long numInodes = s.getNumInodes(); - LOG.info("Loading " + numInodes + " INodes."); - prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numInodes); + loadINodeSectionHeader(in, prog, currentStep); Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep); - for (int i = 0; i < numInodes; ++i) { + int totalLoaded = loadINodesInSection(in, counter); + LOG.info("Successfully loaded {} inodes", totalLoaded); + } + + private int loadINodesInSection(InputStream in, Counter counter) + throws IOException { + // As the input stream is a LimitInputStream, the reading will stop when + // EOF is encountered at the end of the stream. + int cntr = 0; + while (true) { INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in); + if (p == null) { + break; + } if (p.getId() == INodeId.ROOT_INODE_ID) { - loadRootINode(p); + synchronized(this) { + loadRootINode(p); + } } else { INode n = loadINode(p); - dir.addToInodeMap(n); + synchronized(this) { + dir.addToInodeMap(n); + } + } + cntr++; + if (counter != null) { + counter.increment(); + } + } + return cntr; + } + + + private long loadINodeSectionHeader(InputStream in, StartupProgress prog, + Step currentStep) throws IOException { + INodeSection s = INodeSection.parseDelimitedFrom(in); + fsn.dir.resetLastInodeId(s.getLastInodeId()); + long numInodes = s.getNumInodes(); + LOG.info("Loading " + numInodes + " INodes."); + prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numInodes); + return numInodes; + } + + void loadINodeSectionInParallel(ExecutorService service, + ArrayList sections, + String compressionCodec, StartupProgress prog, + Step currentStep) throws IOException { + LOG.info("Loading the INode section in parallel with {} sub-sections", + sections.size()); + long expectedInodes = 0; + CountDownLatch latch = new CountDownLatch(sections.size()); + AtomicInteger totalLoaded = new AtomicInteger(0); + final CopyOnWriteArrayList exceptions = + new CopyOnWriteArrayList<>(); + + for (int i=0; i < sections.size(); i++) { + FileSummary.Section s = sections.get(i); + InputStream ins = parent.getInputStreamForSection(s, compressionCodec); + if (i == 0) { + // The first inode section has a header which must be processed first + expectedInodes = loadINodeSectionHeader(ins, prog, currentStep); } - counter.increment(); + service.submit(() -> { + try { + totalLoaded.addAndGet(loadINodesInSection(ins, null)); + prog.setCount(Phase.LOADING_FSIMAGE, currentStep, + totalLoaded.get()); + } catch (Exception e) { + LOG.error("An exception occurred loading INodes in parallel", e); + exceptions.add(new IOException(e)); + } finally { + latch.countDown(); + try { + ins.close(); + } catch (IOException ioe) { + LOG.warn("Failed to close the input stream, ignoring", ioe); + } + } + }); + } + try { + latch.await(); + } catch (InterruptedException e) { + LOG.info("Interrupted waiting for countdown latch"); } + if (exceptions.size() != 0) { + LOG.error("{} exceptions occurred loading INodes", exceptions.size()); + throw exceptions.get(0); + } + if (totalLoaded.get() != expectedInodes) { + throw new IOException("Expected to load "+expectedInodes+" in " + + "parallel, but loaded "+totalLoaded.get()+". The image may " + + "be corrupt."); + } + LOG.info("Completed loading all INode sections. Loaded {} inodes.", + totalLoaded.get()); } /** @@ -261,22 +444,18 @@ void loadFilesUnderConstructionSection(InputStream in) throws IOException { } } - private void addToParent(INodeDirectory parent, INode child) { - if (parent == dir.rootDir && FSDirectory.isReservedName(child)) { + private boolean addToParent(INodeDirectory parentDir, INode child) { + if (parentDir == dir.rootDir && FSDirectory.isReservedName(child)) { throw new HadoopIllegalArgumentException("File name \"" + child.getLocalName() + "\" is reserved. Please " + " change the name of the existing file or directory to another " + "name before upgrading to this release."); } // NOTE: This does not update space counts for parents - if (!parent.addChildAtLoading(child)) { - return; - } - dir.cacheName(child); - - if (child.isFile()) { - updateBlocksMap(child.asFile(), fsn.getBlockManager()); + if (!parentDir.addChildAtLoading(child)) { + return false; } + return true; } private INode loadINode(INodeSection.INode n) { @@ -527,6 +706,7 @@ void serializeINodeDirectorySection(OutputStream out) throws IOException { final ArrayList refList = parent.getSaverContext() .getRefList(); int i = 0; + int outputInodes = 0; while (iter.hasNext()) { INodeWithAdditionalFields n = iter.next(); if (!n.isDirectory()) { @@ -558,6 +738,7 @@ void serializeINodeDirectorySection(OutputStream out) throws IOException { refList.add(inode.asReference()); b.addRefChildren(refList.size() - 1); } + outputInodes++; } INodeDirectorySection.DirEntry e = b.build(); e.writeDelimitedTo(out); @@ -567,9 +748,15 @@ void serializeINodeDirectorySection(OutputStream out) throws IOException { if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { context.checkCancelled(); } + if (outputInodes >= parent.getInodesPerSubSection()) { + outputInodes = 0; + parent.commitSubSection(summary, + FSImageFormatProtobuf.SectionName.INODE_DIR_SUB); + } } - parent.commitSection(summary, - FSImageFormatProtobuf.SectionName.INODE_DIR); + parent.commitSectionAndSubSection(summary, + FSImageFormatProtobuf.SectionName.INODE_DIR, + FSImageFormatProtobuf.SectionName.INODE_DIR_SUB); } void serializeINodeSection(OutputStream out) throws IOException { @@ -589,8 +776,14 @@ void serializeINodeSection(OutputStream out) throws IOException { if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { context.checkCancelled(); } + if (i % parent.getInodesPerSubSection() == 0) { + parent.commitSubSection(summary, + FSImageFormatProtobuf.SectionName.INODE_SUB); + } } - parent.commitSection(summary, FSImageFormatProtobuf.SectionName.INODE); + parent.commitSectionAndSubSection(summary, + FSImageFormatProtobuf.SectionName.INODE, + FSImageFormatProtobuf.SectionName.INODE_SUB); } void serializeFilesUCSection(OutputStream out) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index b887a1438e249..3144d4b17cfa6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -40,7 +40,11 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.Iterator; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.io.compress.CompressionOutputStream; @@ -150,6 +154,8 @@ public static final class Loader implements FSImageFormat.AbstractLoader { */ private final boolean requireSameLayoutVersion; + private File filename; + Loader(Configuration conf, FSNamesystem fsn, boolean requireSameLayoutVersion) { this.conf = conf; @@ -229,6 +235,7 @@ public String toString() { } void load(File file) throws IOException { + filename = file; long start = Time.monotonicNow(); DigestThread dt = new DigestThread(file); dt.start(); @@ -250,6 +257,96 @@ void load(File file) throws IOException { } } + /** + * Given a FSImage FileSummary.section, return a LimitInput stream set to + * the starting position of the section and limited to the section length. + * @param section The FileSummary.Section containing the offset and length + * @param compressionCodec The compression codec in use, if any + * @return An InputStream for the given section + * @throws IOException + */ + public InputStream getInputStreamForSection(FileSummary.Section section, + String compressionCodec) + throws IOException { + FileInputStream fin = new FileInputStream(filename); + FileChannel channel = fin.getChannel(); + channel.position(section.getOffset()); + InputStream in = new BufferedInputStream(new LimitInputStream(fin, + section.getLength())); + + in = FSImageUtil.wrapInputStreamForCompression(conf, + compressionCodec, in); + return in; + } + + /** + * Takes an ArrayList of Section's and removes all Section's whose + * name ends in _SUB, indicating they are sub-sections. The original + * array list is modified and a new list of the removed Section's is + * returned. + * @param sections Array List containing all Sections and Sub Sections + * in the image. + * @return ArrayList of the sections removed, or an empty list if none are + * removed. + */ + private ArrayList getAndRemoveSubSections( + ArrayList sections) { + ArrayList subSections = new ArrayList<>(); + Iterator iter = sections.iterator(); + while (iter.hasNext()) { + FileSummary.Section s = iter.next(); + String name = s.getName(); + if (name.matches(".*_SUB$")) { + subSections.add(s); + iter.remove(); + } + } + return subSections; + } + + /** + * Given an ArrayList of Section's, return all Section's with the given + * name, or an empty list if none are found. + * @param sections ArrayList of the Section's to search though + * @param name The name of the Sections to search for + * @return ArrayList of the sections matching the given name + */ + private ArrayList getSubSectionsOfName( + ArrayList sections, SectionName name) { + ArrayList subSec = new ArrayList<>(); + for (FileSummary.Section s : sections) { + String n = s.getName(); + SectionName sectionName = SectionName.fromString(n); + if (sectionName == name) { + subSec.add(s); + } + } + return subSec; + } + + /** + * Checks the number of threads configured for parallel loading and + * return an ExecutorService with configured number of threads. If the + * thread count is set to less than 1, it will be reset to the default + * value + * @return ExecutorServie with the correct number of threads + */ + private ExecutorService getParallelExecutorService() { + int threads = conf.getInt(DFSConfigKeys.DFS_IMAGE_PARALLEL_THREADS_KEY, + DFSConfigKeys.DFS_IMAGE_PARALLEL_THREADS_DEFAULT); + if (threads < 1) { + LOG.warn("Parallel is enabled and {} is set to {}. Setting to the " + + "default value {}", DFSConfigKeys.DFS_IMAGE_PARALLEL_THREADS_KEY, + threads, DFSConfigKeys.DFS_IMAGE_PARALLEL_THREADS_DEFAULT); + threads = DFSConfigKeys.DFS_IMAGE_PARALLEL_THREADS_DEFAULT; + } + ExecutorService executorService = Executors.newFixedThreadPool( + threads); + LOG.info("The fsimage will be loaded in parallel using {} threads", + threads); + return executorService; + } + private void loadInternal(RandomAccessFile raFile, FileInputStream fin) throws IOException { if (!FSImageUtil.checkFileFormat(raFile)) { @@ -294,6 +391,14 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) { * a particular step to be started for once. */ Step currentStep = null; + boolean loadInParallel = enableParallelSaveAndLoad(conf); + + ExecutorService executorService = null; + ArrayList subSections = + getAndRemoveSubSections(sections); + if (loadInParallel) { + executorService = getParallelExecutorService(); + } for (FileSummary.Section s : sections) { channel.position(s.getOffset()); @@ -308,6 +413,8 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) { if (sectionName == null) { throw new IOException("Unrecognized section " + n); } + + ArrayList stageSubSections; switch (sectionName) { case NS_INFO: loadNameSystemSection(in); @@ -318,14 +425,28 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) { case INODE: { currentStep = new Step(StepType.INODES); prog.beginStep(Phase.LOADING_FSIMAGE, currentStep); - inodeLoader.loadINodeSection(in, prog, currentStep); + stageSubSections = getSubSectionsOfName( + subSections, SectionName.INODE_SUB); + if (loadInParallel && (stageSubSections.size() > 0)) { + inodeLoader.loadINodeSectionInParallel(executorService, + stageSubSections, summary.getCodec(), prog, currentStep); + } else { + inodeLoader.loadINodeSection(in, prog, currentStep); + } } break; case INODE_REFERENCE: snapshotLoader.loadINodeReferenceSection(in); break; case INODE_DIR: - inodeLoader.loadINodeDirectorySection(in); + stageSubSections = getSubSectionsOfName( + subSections, SectionName.INODE_DIR_SUB); + if (loadInParallel && stageSubSections.size() > 0) { + inodeLoader.loadINodeDirectorySectionInParallel(executorService, + stageSubSections, summary.getCodec()); + } else { + inodeLoader.loadINodeDirectorySection(in); + } break; case FILES_UNDERCONSTRUCTION: inodeLoader.loadFilesUnderConstructionSection(in); @@ -362,6 +483,9 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) { break; } } + if (executorService != null) { + executorService.shutdown(); + } } private void loadNameSystemSection(InputStream in) throws IOException { @@ -450,12 +574,34 @@ private void loadErasureCodingSection(InputStream in) } } + private static boolean enableParallelSaveAndLoad(Configuration conf) { + boolean loadInParallel = + conf.getBoolean(DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY, + DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_DEFAULT); + boolean compressionEnabled = conf.getBoolean( + DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, + DFSConfigKeys.DFS_IMAGE_COMPRESS_DEFAULT); + + if (loadInParallel) { + if (compressionEnabled) { + LOG.warn("Parallel Image loading and saving is not supported when {}" + + " is set to true. Parallel will be disabled.", + DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY); + loadInParallel = false; + } + } + return loadInParallel; + } + public static final class Saver { public static final int CHECK_CANCEL_INTERVAL = 4096; + private boolean writeSubSections = false; + private int inodesPerSubSection = Integer.MAX_VALUE; private final SaveNamespaceContext context; private final SaverContext saverContext; private long currentOffset = FSImageUtil.MAGIC_HEADER.length; + private long subSectionOffset = currentOffset; private MD5Hash savedDigest; private FileChannel fileChannel; @@ -463,10 +609,12 @@ public static final class Saver { private OutputStream sectionOutputStream; private CompressionCodec codec; private OutputStream underlyingOutputStream; + private Configuration conf; - Saver(SaveNamespaceContext context) { + Saver(SaveNamespaceContext context, Configuration conf) { this.context = context; this.saverContext = new SaverContext(); + this.conf = conf; } public MD5Hash getSavedDigest() { @@ -481,6 +629,29 @@ public SaverContext getSaverContext() { return saverContext; } + public int getInodesPerSubSection() { + return inodesPerSubSection; + } + + public boolean shouldWriteSubSections() { + return writeSubSections; + } + + /** + * Commit the length and offset of a fsimage section to the summary index, + * including the sub section, which will be committed before the section is + * committed. + * @param summary The image summary object + * @param name The name of the section to commit + * @param subSectionName The name of the sub-section to commit + * @throws IOException + */ + public void commitSectionAndSubSection(FileSummary.Builder summary, + SectionName name, SectionName subSectionName) throws IOException { + commitSubSection(summary, subSectionName); + commitSection(summary, name); + } + public void commitSection(FileSummary.Builder summary, SectionName name) throws IOException { long oldOffset = currentOffset; @@ -495,6 +666,35 @@ public void commitSection(FileSummary.Builder summary, SectionName name) summary.addSections(FileSummary.Section.newBuilder().setName(name.name) .setLength(length).setOffset(currentOffset)); currentOffset += length; + subSectionOffset = currentOffset; + } + + /** + * Commit the length and offset of a fsimage sub-section to the summary + * index. + * @param summary The image summary object + * @param name The name of the sub-section to commit + * @throws IOException + */ + public void commitSubSection(FileSummary.Builder summary, SectionName name) + throws IOException { + if (!writeSubSections) { + return; + } + + LOG.debug("Saving a subsection for {}", name.toString()); + // The output stream must be flushed before the length is obtained + // as the flush can move the length forward. + sectionOutputStream.flush(); + long length = fileChannel.position() - subSectionOffset; + if (length == 0) { + LOG.warn("The requested section for {} is empty. It will not be " + + "output to the image", name.toString()); + return; + } + summary.addSections(FileSummary.Section.newBuilder().setName(name.name) + .setLength(length).setOffset(subSectionOffset)); + subSectionOffset += length; } private void flushSectionOutputStream() throws IOException { @@ -509,6 +709,7 @@ private void flushSectionOutputStream() throws IOException { * @throws IOException on fatal error. */ long save(File file, FSImageCompression compression) throws IOException { + enableSubSectionsIfRequired(); FileOutputStream fout = new FileOutputStream(file); fileChannel = fout.getChannel(); try { @@ -525,6 +726,47 @@ long save(File file, FSImageCompression compression) throws IOException { } } + private void enableSubSectionsIfRequired() { + boolean parallelEnabled = enableParallelSaveAndLoad(conf); + int inodeThreshold = conf.getInt( + DFSConfigKeys.DFS_IMAGE_PARALLEL_INODE_THRESHOLD_KEY, + DFSConfigKeys.DFS_IMAGE_PARALLEL_INODE_THRESHOLD_DEFAULT); + int targetSections = conf.getInt( + DFSConfigKeys.DFS_IMAGE_PARALLEL_TARGET_SECTIONS_KEY, + DFSConfigKeys.DFS_IMAGE_PARALLEL_TARGET_SECTIONS_DEFAULT); + + if (parallelEnabled) { + if (targetSections <= 0) { + LOG.warn("{} is set to {}. It must be greater than zero. Setting to" + + " default of {}", + DFSConfigKeys.DFS_IMAGE_PARALLEL_TARGET_SECTIONS_KEY, + targetSections, + DFSConfigKeys.DFS_IMAGE_PARALLEL_TARGET_SECTIONS_DEFAULT); + targetSections = + DFSConfigKeys.DFS_IMAGE_PARALLEL_TARGET_SECTIONS_DEFAULT; + } + if (inodeThreshold <= 0) { + LOG.warn("{} is set to {}. It must be greater than zero. Setting to" + + " default of {}", + DFSConfigKeys.DFS_IMAGE_PARALLEL_INODE_THRESHOLD_KEY, + inodeThreshold, + DFSConfigKeys.DFS_IMAGE_PARALLEL_INODE_THRESHOLD_DEFAULT); + inodeThreshold = + DFSConfigKeys.DFS_IMAGE_PARALLEL_INODE_THRESHOLD_DEFAULT; + } + int inodeCount = context.getSourceNamesystem().dir.getInodeMapSize(); + // Only enable parallel sections if there are enough inodes + if (inodeCount >= inodeThreshold) { + writeSubSections = true; + // Calculate the inodes per section rounded up to the nearest int + inodesPerSubSection = (inodeCount + targetSections - 1) / + targetSections; + } + } else { + writeSubSections = false; + } + } + private static void saveFileSummary(OutputStream out, FileSummary summary) throws IOException { summary.writeDelimitedTo(out); @@ -737,11 +979,15 @@ public enum SectionName { EXTENDED_ACL("EXTENDED_ACL"), ERASURE_CODING("ERASURE_CODING"), INODE("INODE"), + INODE_SUB("INODE_SUB"), INODE_REFERENCE("INODE_REFERENCE"), + INODE_REFERENCE_SUB("INODE_REFERENCE_SUB"), SNAPSHOT("SNAPSHOT"), INODE_DIR("INODE_DIR"), + INODE_DIR_SUB("INODE_DIR_SUB"), FILES_UNDERCONSTRUCTION("FILES_UNDERCONSTRUCTION"), SNAPSHOT_DIFF("SNAPSHOT_DIFF"), + SNAPSHOT_DIFF_SUB("SNAPSHOT_DIFF_SUB"), SECRET_MANAGER("SECRET_MANAGER"), CACHE_MANAGER("CACHE_MANAGER"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java index 2157554cd62ed..cd5051dd3924c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java @@ -529,9 +529,14 @@ public void serializeSnapshotDiffSection(OutputStream out) if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { context.checkCancelled(); } + if (i % parent.getInodesPerSubSection() == 0) { + parent.commitSubSection(headers, + FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF_SUB); + } } - parent.commitSection(headers, - FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF); + parent.commitSectionAndSubSection(headers, + FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF, + FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF_SUB); } private void serializeFileDiffList(INodeFile file, OutputStream out) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 79811aad7bf5b..74c4f40938924 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -1385,6 +1385,57 @@ + + dfs.image.parallel.load + true + + If true, write sub-section entries to the fsimage index so it can + be loaded in parallel. Also controls whether parallel loading + will be used for an image previously created with sub-sections. + If the image contains sub-sections and this is set to false, + parallel loading will not be used. + Parallel loading is not compatible with image compression, + so if dfs.image.compress is set to true this setting will be + ignored and no parallel loading will occur. + + + + + dfs.image.parallel.target.sections + 12 + + Controls the number of sub-sections that will be written to + fsimage for each section. This should be larger than + dfs.image.parallel.threads, otherwise all threads will not be + used when loading. Ideally, have at least twice the number + of target sections as threads, so each thread must load more + than one section to avoid one long running section affecting + the load time. + + + + + dfs.image.parallel.inode.threshold + 1000000 + + If the image contains less inodes than this setting, then + do not write sub-sections and hence disable parallel loading. + This is because small images load very quickly in serial and + parallel loading is not needed. + + + + + dfs.image.parallel.threads + 4 + + The number of threads to use when dfs.image.parallel.load is + enabled. This setting should be less than + dfs.image.parallel.target.sections. The optimal number of + threads will depend on the hardware and environment. + + + dfs.edit.log.transfer.timeout 30000 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 985ab35ba1dcc..c82d317d88e32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -606,4 +606,27 @@ public static long getStorageTxId(NameNode node, URI storageUri) getStorageDirectory(storageUri); return NNStorage.readTransactionIdFile(sDir); } + + /** + * Returns the summary section from the latest fsimage stored on the cluster. + * This is effectively the image index which contains the offset of each + * section and subsection. + * @param cluster The cluster to load the image from + * @return The FileSummary section of the fsimage + * @throws IOException + */ + public static FsImageProto.FileSummary getLatestImageSummary( + MiniDFSCluster cluster) throws IOException { + RandomAccessFile raFile = null; + try { + File image = FSImageTestUtil.findLatestImageFile(FSImageTestUtil + .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0)); + raFile = new RandomAccessFile(image, "r"); + return FSImageUtil.loadSummary(raFile); + } finally { + if (raFile != null) { + raFile.close(); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index 0beb7582e945f..793a749be21c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -32,8 +32,10 @@ import java.io.ByteArrayOutputStream; import java.io.ByteArrayInputStream; import java.io.IOException; +import java.util.ArrayList; import java.util.EnumSet; +import com.google.common.collect.Lists; import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; import org.apache.hadoop.hdfs.protocol.Block; @@ -72,6 +74,8 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; +import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section; +import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; @@ -1000,4 +1004,152 @@ private boolean isPolicyEnabledInFsImage(ErasureCodingPolicy testPolicy) { } throw new AssertionError("Policy is not found!"); } -} + + private ArrayList

    getSubSectionsOfName(ArrayList
    sections, + FSImageFormatProtobuf.SectionName name) { + ArrayList
    subSec = new ArrayList<>(); + for (Section s : sections) { + if (s.getName().equals(name.toString())) { + subSec.add(s); + } + } + return subSec; + } + + private MiniDFSCluster createAndLoadParallelFSImage(Configuration conf) + throws IOException { + conf.set(DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY, "true"); + conf.set(DFSConfigKeys.DFS_IMAGE_PARALLEL_INODE_THRESHOLD_KEY, "1"); + conf.set(DFSConfigKeys.DFS_IMAGE_PARALLEL_TARGET_SECTIONS_KEY, "4"); + conf.set(DFSConfigKeys.DFS_IMAGE_PARALLEL_THREADS_KEY, "4"); + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + cluster.waitActive(); + DistributedFileSystem fs = cluster.getFileSystem(); + + // Create 10 directories, each containing 5 files + String baseDir = "/abc/def"; + for (int i=0; i<10; i++) { + Path dir = new Path(baseDir+"/"+i); + for (int j=0; j<5; j++) { + Path f = new Path(dir, Integer.toString(j)); + FSDataOutputStream os = fs.create(f); + os.write(1); + os.close(); + } + } + + // checkpoint + fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + fs.saveNamespace(); + fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + + cluster.restartNameNode(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + + // Ensure all the files created above exist, proving they were loaded + // correctly + for (int i=0; i<10; i++) { + Path dir = new Path(baseDir+"/"+i); + assertTrue(fs.getFileStatus(dir).isDirectory()); + for (int j=0; j<5; j++) { + Path f = new Path(dir, Integer.toString(j)); + assertTrue(fs.exists(f)); + } + } + return cluster; + } + + @Test + public void testParallelSaveAndLoad() throws IOException { + Configuration conf = new Configuration(); + + MiniDFSCluster cluster = null; + try { + cluster = createAndLoadParallelFSImage(conf); + + // Obtain the image summary section to check the sub-sections + // are being correctly created when the image is saved. + FsImageProto.FileSummary summary = FSImageTestUtil. + getLatestImageSummary(cluster); + ArrayList
    sections = Lists.newArrayList( + summary.getSectionsList()); + + ArrayList
    inodeSubSections = + getSubSectionsOfName(sections, SectionName.INODE_SUB); + ArrayList
    dirSubSections = + getSubSectionsOfName(sections, SectionName.INODE_DIR_SUB); + Section inodeSection = + getSubSectionsOfName(sections, SectionName.INODE).get(0); + Section dirSection = getSubSectionsOfName(sections, + SectionName.INODE_DIR).get(0); + + // Expect 4 sub-sections for inodes and directories as target Sections + // is 4 + assertEquals(4, inodeSubSections.size()); + assertEquals(4, dirSubSections.size()); + + // Expect the sub-section offset and lengths do not overlap and cover a + // continuous range of the file. They should also line up with the parent + ensureSubSectionsAlignWithParent(inodeSubSections, inodeSection); + ensureSubSectionsAlignWithParent(dirSubSections, dirSection); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + @Test + public void testNoParallelSectionsWithCompressionEnabled() + throws IOException { + Configuration conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true); + conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, + "org.apache.hadoop.io.compress.GzipCodec"); + + MiniDFSCluster cluster = null; + try { + cluster = createAndLoadParallelFSImage(conf); + + // Obtain the image summary section to check the sub-sections + // are being correctly created when the image is saved. + FsImageProto.FileSummary summary = FSImageTestUtil. + getLatestImageSummary(cluster); + ArrayList
    sections = Lists.newArrayList( + summary.getSectionsList()); + + ArrayList
    inodeSubSections = + getSubSectionsOfName(sections, SectionName.INODE_SUB); + ArrayList
    dirSubSections = + getSubSectionsOfName(sections, SectionName.INODE_DIR_SUB); + + // As compression is enabled, there should be no sub-sections in the + // image header + assertEquals(0, inodeSubSections.size()); + assertEquals(0, dirSubSections.size()); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + private void ensureSubSectionsAlignWithParent(ArrayList
    subSec, + Section parent) { + // For each sub-section, check its offset + length == the next section + // offset + for (int i=0; i Date: Fri, 23 Aug 2019 02:55:42 +0200 Subject: [PATCH 0816/1308] HDDS-1808. TestRatisPipelineCreateAndDestory times out (#1338) --- ...dDestory.java => TestRatisPipelineCreateAndDestroy.java} | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/{TestRatisPipelineCreateAndDestory.java => TestRatisPipelineCreateAndDestroy.java} (97%) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java similarity index 97% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestory.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java index 9fd8aae0f0f13..6ace90cb248ee 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestory.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java @@ -39,7 +39,7 @@ /** * Tests for RatisPipelineUtils. */ -public class TestRatisPipelineCreateAndDestory { +public class TestRatisPipelineCreateAndDestroy { private static MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); @@ -63,7 +63,7 @@ public void cleanup() { cluster.shutdown(); } - @Test(timeout = 30000) + @Test(timeout = 180000) public void testAutomaticPipelineCreationOnPipelineDestroy() throws Exception { init(6); @@ -79,7 +79,7 @@ public void testAutomaticPipelineCreationOnPipelineDestroy() waitForPipelines(2); } - @Test(timeout = 30000) + @Test(timeout = 180000) public void testPipelineCreationOnNodeRestart() throws Exception { conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 5, TimeUnit.SECONDS); From 84b1982060422760702eca6f1ef515c6ad3e85a5 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Fri, 23 Aug 2019 14:38:50 +0900 Subject: [PATCH 0817/1308] YARN-9774. Fix order of arguments for assertEquals in TestSLSUtils. Contributed by Nikhil Navadiya. --- .../org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java index 74907202b1014..423d6b2c7fe84 100644 --- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java +++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java @@ -37,13 +37,13 @@ public class TestSLSUtils { public void testGetRackHostname() { String str = "/rack1/node1"; String rackHostname[] = SLSUtils.getRackHostName(str); - Assert.assertEquals(rackHostname[0], "rack1"); - Assert.assertEquals(rackHostname[1], "node1"); + Assert.assertEquals("rack1", rackHostname[0]); + Assert.assertEquals("node1", rackHostname[1]); str = "/rackA/rackB/node1"; rackHostname = SLSUtils.getRackHostName(str); - Assert.assertEquals(rackHostname[0], "rackA/rackB"); - Assert.assertEquals(rackHostname[1], "node1"); + Assert.assertEquals("rackA/rackB", rackHostname[0]); + Assert.assertEquals("node1", rackHostname[1]); } @Test From abc8fde4caea0e197568ee28392c46f1ce0d42e1 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Fri, 23 Aug 2019 14:44:34 +0900 Subject: [PATCH 0818/1308] HDFS-13596. NN restart fails after RollingUpgrade from 2.x to 3.x. Contributed by Fei Hui. --- .../qjournal/client/QuorumJournalManager.java | 2 +- .../qjournal/client/QuorumOutputStream.java | 5 +- .../server/namenode/BackupJournalManager.java | 2 +- .../namenode/EditLogBackupOutputStream.java | 8 +- .../namenode/EditLogFileOutputStream.java | 3 +- .../server/namenode/EditLogOutputStream.java | 16 ++++ .../server/namenode/EditsDoubleBuffer.java | 10 +-- .../hdfs/server/namenode/FSEditLog.java | 4 +- .../hdfs/server/namenode/FSEditLogOp.java | 22 +++++- .../hdfs/server/namenode/FSNamesystem.java | 24 ++++++ .../hadoop/hdfs/qjournal/QJMTestUtil.java | 4 +- .../hdfs/server/namenode/TestEditLog.java | 76 +++++++++++++++++++ .../namenode/TestEditsDoubleBuffer.java | 9 ++- 13 files changed, 163 insertions(+), 22 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java index 703443e6730c0..ce12b4f09137d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java @@ -433,7 +433,7 @@ public EditLogOutputStream startLogSegment(long txId, int layoutVersion) loggers.waitForWriteQuorum(q, startSegmentTimeoutMs, "startLogSegment(" + txId + ")"); return new QuorumOutputStream(loggers, txId, outputBufferCapacity, - writeTxnsTimeoutMs); + writeTxnsTimeoutMs, layoutVersion); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java index e094b21a6329c..26f978a284e8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java @@ -36,17 +36,18 @@ class QuorumOutputStream extends EditLogOutputStream { public QuorumOutputStream(AsyncLoggerSet loggers, long txId, int outputBufferCapacity, - int writeTimeoutMs) throws IOException { + int writeTimeoutMs, int logVersion) throws IOException { super(); this.buf = new EditsDoubleBuffer(outputBufferCapacity); this.loggers = loggers; this.segmentTxId = txId; this.writeTimeoutMs = writeTimeoutMs; + setCurrentLogVersion(logVersion); } @Override public void write(FSEditLogOp op) throws IOException { - buf.writeOp(op); + buf.writeOp(op, getCurrentLogVersion()); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java index eac91bf483255..c937987f1caa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java @@ -59,7 +59,7 @@ public boolean hasSomeData() { public EditLogOutputStream startLogSegment(long txId, int layoutVersion) throws IOException { EditLogBackupOutputStream stm = new EditLogBackupOutputStream(bnReg, - journalInfo); + journalInfo, layoutVersion); stm.startLogSegment(txId); return stm; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java index 43fc949dc8ec6..532574ee35de7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java @@ -54,8 +54,8 @@ class EditLogBackupOutputStream extends EditLogOutputStream { private EditsDoubleBuffer doubleBuf; EditLogBackupOutputStream(NamenodeRegistration bnReg, // backup node - JournalInfo journalInfo) // active name-node - throws IOException { + JournalInfo journalInfo, int logVersion) // active name-node + throws IOException { super(); this.bnRegistration = bnReg; this.journalInfo = journalInfo; @@ -71,11 +71,12 @@ class EditLogBackupOutputStream extends EditLogOutputStream { } this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE); this.out = new DataOutputBuffer(DEFAULT_BUFFER_SIZE); + setCurrentLogVersion(logVersion); } @Override // EditLogOutputStream public void write(FSEditLogOp op) throws IOException { - doubleBuf.writeOp(op); + doubleBuf.writeOp(op, getCurrentLogVersion()); } @Override @@ -90,6 +91,7 @@ public void writeRaw(byte[] bytes, int offset, int length) throws IOException { public void create(int layoutVersion) throws IOException { assert doubleBuf.isFlushed() : "previous data is not flushed yet"; this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE); + setCurrentLogVersion(layoutVersion); } @Override // EditLogOutputStream diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java index 9f06ce9d5fcf7..4dbe2720ddaa0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java @@ -95,7 +95,7 @@ public EditLogFileOutputStream(Configuration conf, File name, int size) @Override public void write(FSEditLogOp op) throws IOException { - doubleBuf.writeOp(op); + doubleBuf.writeOp(op, getCurrentLogVersion()); } /** @@ -121,6 +121,7 @@ public void create(int layoutVersion) throws IOException { writeHeader(layoutVersion, doubleBuf.getCurrentBuf()); setReadyToFlush(); flush(); + setCurrentLogVersion(layoutVersion); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java index b4ca2d6c0df4e..27733cf404162 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java @@ -35,6 +35,8 @@ public abstract class EditLogOutputStream implements Closeable { // these are statistics counters private long numSync; // number of sync(s) to disk private long totalTimeSync; // total time to sync + // The version of the current edit log + private int currentLogVersion; public EditLogOutputStream() throws IOException { numSync = totalTimeSync = 0; @@ -147,4 +149,18 @@ protected long getNumSync() { public String generateReport() { return toString(); } + + /** + * @return The version of the current edit log + */ + public int getCurrentLogVersion() { + return currentLogVersion; + } + + /** + * @param logVersion The version of the current edit log + */ + public void setCurrentLogVersion(int logVersion) { + this.currentLogVersion = logVersion; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java index 93f35f76e3cd3..be68f6d609008 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java @@ -56,9 +56,9 @@ public EditsDoubleBuffer(int defaultBufferSize) { bufReady = new TxnBuffer(initBufferSize); } - - public void writeOp(FSEditLogOp op) throws IOException { - bufCurrent.writeOp(op); + + public void writeOp(FSEditLogOp op, int logVersion) throws IOException { + bufCurrent.writeOp(op, logVersion); } public void writeRaw(byte[] bytes, int offset, int length) throws IOException { @@ -149,13 +149,13 @@ public TxnBuffer(int initBufferSize) { reset(); } - public void writeOp(FSEditLogOp op) throws IOException { + public void writeOp(FSEditLogOp op, int logVersion) throws IOException { if (firstTxId == HdfsServerConstants.INVALID_TXID) { firstTxId = op.txid; } else { assert op.txid > firstTxId; } - writer.writeOp(op); + writer.writeOp(op, logVersion); numTxns++; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 5b348e592115d..71b59fd28e3c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -155,7 +155,9 @@ private enum State { //initialize private JournalSet journalSet = null; - private EditLogOutputStream editLogStream = null; + + @VisibleForTesting + EditLogOutputStream editLogStream = null; // a monotonically increasing counter that represents transactionIds. // All of the threads which update/increment txid are synchronized, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index b93dcb7d5adbc..1482f2cd9114a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -282,6 +282,11 @@ abstract void readFields(DataInputStream in, int logVersion) public abstract void writeFields(DataOutputStream out) throws IOException; + public void writeFields(DataOutputStream out, int logVersion) + throws IOException { + writeFields(out); + } + static interface BlockListUpdatingOp { Block[] getBlocks(); String getPath(); @@ -546,6 +551,12 @@ T setErasureCodingPolicyId(byte ecPolicyId) { @Override public void writeFields(DataOutputStream out) throws IOException { + throw new IOException("Unsupported without logversion"); + } + + @Override + public void writeFields(DataOutputStream out, int logVersion) + throws IOException { FSImageSerialization.writeLong(inodeId, out); FSImageSerialization.writeString(path, out); FSImageSerialization.writeShort(replication, out); @@ -564,7 +575,10 @@ public void writeFields(DataOutputStream out) throws IOException { FSImageSerialization.writeString(clientMachine,out); FSImageSerialization.writeBoolean(overwrite, out); FSImageSerialization.writeByte(storagePolicyId, out); - FSImageSerialization.writeByte(erasureCodingPolicyId, out); + if (NameNodeLayoutVersion.supports( + NameNodeLayoutVersion.Feature.ERASURE_CODING, logVersion)) { + FSImageSerialization.writeByte(erasureCodingPolicyId, out); + } // write clientId and callId writeRpcIds(rpcClientId, rpcCallId, out); } @@ -4854,16 +4868,18 @@ public Writer(DataOutputBuffer out) { * Write an operation to the output stream * * @param op The operation to write + * @param logVersion The version of edit log * @throws IOException if an error occurs during writing. */ - public void writeOp(FSEditLogOp op) throws IOException { + public void writeOp(FSEditLogOp op, int logVersion) + throws IOException { int start = buf.getLength(); // write the op code first to make padding and terminator verification // work buf.writeByte(op.opCode.getOpCode()); buf.writeInt(0); // write 0 for the length first buf.writeLong(op.txid); - op.writeFields(buf); + op.writeFields(buf, logVersion); int end = buf.getLength(); // write the length back: content of the op + 4 bytes checksum - op_code diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 58fb93446b42e..c8af720d1d048 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2527,6 +2527,7 @@ private HdfsFileStatus startFileInt(String src, final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp .getErasureCodingPolicy(this, ecPolicyName, iip); if (ecPolicy != null && (!ecPolicy.isReplicationPolicy())) { + checkErasureCodingSupported("createWithEC"); if (blockSize < ecPolicy.getCellSize()) { throw new IOException("Specified block size (" + blockSize + ") is less than the cell size (" + ecPolicy.getCellSize() @@ -7597,6 +7598,7 @@ void setErasureCodingPolicy(final String srcArg, final String ecPolicyName, UnresolvedLinkException, SafeModeException, AccessControlException { final String operationName = "setErasureCodingPolicy"; checkOperation(OperationCategory.WRITE); + checkErasureCodingSupported(operationName); FileStatus resultingStat = null; final FSPermissionChecker pc = getPermissionChecker(); boolean success = false; @@ -7629,6 +7631,7 @@ AddErasureCodingPolicyResponse[] addErasureCodingPolicies( final String operationName = "addErasureCodingPolicies"; List addECPolicyNames = new ArrayList<>(policies.length); checkOperation(OperationCategory.WRITE); + checkErasureCodingSupported(operationName); List responses = new ArrayList<>(policies.length); boolean success = false; @@ -7670,6 +7673,7 @@ void removeErasureCodingPolicy(String ecPolicyName, final boolean logRetryCache) throws IOException { final String operationName = "removeErasureCodingPolicy"; checkOperation(OperationCategory.WRITE); + checkErasureCodingSupported(operationName); boolean success = false; writeLock(); try { @@ -7700,6 +7704,7 @@ boolean enableErasureCodingPolicy(String ecPolicyName, final boolean logRetryCache) throws IOException { final String operationName = "enableErasureCodingPolicy"; checkOperation(OperationCategory.WRITE); + checkErasureCodingSupported(operationName); boolean success = false; writeLock(); try { @@ -7731,6 +7736,7 @@ boolean disableErasureCodingPolicy(String ecPolicyName, final boolean logRetryCache) throws IOException { final String operationName = "disableErasureCodingPolicy"; checkOperation(OperationCategory.WRITE); + checkErasureCodingSupported(operationName); boolean success = false; LOG.info("Disable the erasure coding policy " + ecPolicyName); writeLock(); @@ -7764,6 +7770,7 @@ void unsetErasureCodingPolicy(final String srcArg, UnresolvedLinkException, SafeModeException, AccessControlException { final String operationName = "unsetErasureCodingPolicy"; checkOperation(OperationCategory.WRITE); + checkErasureCodingSupported(operationName); FileStatus resultingStat = null; final FSPermissionChecker pc = getPermissionChecker(); boolean success = false; @@ -7791,6 +7798,7 @@ ErasureCodingPolicy getErasureCodingPolicy(String src) final String operationName = "getErasureCodingPolicy"; boolean success = false; checkOperation(OperationCategory.READ); + checkErasureCodingSupported(operationName); final FSPermissionChecker pc = getPermissionChecker(); readLock(); try { @@ -7812,6 +7820,7 @@ ErasureCodingPolicyInfo[] getErasureCodingPolicies() throws IOException { final String operationName = "getErasureCodingPolicies"; boolean success = false; checkOperation(OperationCategory.READ); + checkErasureCodingSupported(operationName); readLock(); try { checkOperation(OperationCategory.READ); @@ -7832,6 +7841,7 @@ Map getErasureCodingCodecs() throws IOException { final String operationName = "getErasureCodingCodecs"; boolean success = false; checkOperation(OperationCategory.READ); + checkErasureCodingSupported(operationName); readLock(); try { checkOperation(OperationCategory.READ); @@ -8224,5 +8234,19 @@ String getFailedStorageCommand(String mode) { return "disableRestoreFailedStorage"; } } + + /** + * Check whether operation is supported. + * @param operationName the name of operation. + * @throws UnsupportedActionException throws UAE if not supported. + */ + public void checkErasureCodingSupported(String operationName) + throws UnsupportedActionException { + if (!NameNodeLayoutVersion.supports( + NameNodeLayoutVersion.Feature.ERASURE_CODING, + getEffectiveLayoutVersion())) { + throw new UnsupportedActionException(operationName + " not supported."); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java index 5d939c83bd0d7..9d95bfd551e10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java @@ -56,7 +56,7 @@ public static byte[] createTxnData(int startTxn, int numTxns) throws Exception { for (long txid = startTxn; txid < startTxn + numTxns; txid++) { FSEditLogOp op = NameNodeAdapter.createMkdirOp("tx " + txid); op.setTransactionId(txid); - writer.writeOp(op); + writer.writeOp(op, FAKE_NSINFO.getLayoutVersion()); } return Arrays.copyOf(buf.getData(), buf.getLength()); @@ -73,7 +73,7 @@ public static byte[] createGabageTxns(long startTxId, int numTxns) for (long txid = startTxId; txid < startTxId + numTxns; txid++) { FSEditLogOp op = new TestEditLog.GarbageMkdirOp(); op.setTransactionId(txid); - writer.writeOp(op); + writer.writeOp(op, FAKE_NSINFO.getLayoutVersion()); } return Arrays.copyOf(buf.getData(), buf.getLength()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 8eac14343a1d8..bf67ddd6bd7be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -64,13 +64,16 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSInotifyEventInputStream; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; @@ -1710,4 +1713,77 @@ public void testReadActivelyUpdatedLog() throws Exception { LogManager.getRootLogger().removeAppender(appender); } } + + /** + * Test edits can be writen and read without ErasureCoding supported. + */ + @Test + public void testEditLogWithoutErasureCodingSupported() + throws IOException { + Configuration conf = getConf(); + MiniDFSCluster cluster = null; + + // ERASURECODING not supported + int logVersion = -61; + assertFalse(NameNodeLayoutVersion.supports( + NameNodeLayoutVersion.Feature.ERASURE_CODING, logVersion)); + + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + + final FSNamesystem namesystem = cluster.getNamesystem(); + FSImage fsimage = namesystem.getFSImage(); + FileSystem fileSys = cluster.getFileSystem(); + final FSEditLog editLog = fsimage.getEditLog(); + editLog.editLogStream.setCurrentLogVersion(logVersion); + // Write new version edit log + long txid = editLog.rollEditLog(logVersion); + + String testDir = "/test"; + String testFile = "testfile_001"; + String testFilePath = testDir + "/" + testFile; + + fileSys.mkdirs(new Path(testDir), new FsPermission("755")); + + // Create a file + Path p = new Path(testFilePath); + DFSTestUtil.createFile(fileSys, p, 0, (short) 1, 1); + + long blkId = 1; + long blkNumBytes = 1024; + long timestamp = 1426222918; + // Add a block to the file + BlockInfoContiguous blockInfo = + new BlockInfoContiguous( + new Block(blkId, blkNumBytes, timestamp), + (short)1); + INodeFile file + = (INodeFile)namesystem.getFSDirectory().getINode(testFilePath); + file.addBlock(blockInfo); + file.toUnderConstruction("testClient", "testMachine"); + + // Write edit log + editLog.logAddBlock(testFilePath, file); + editLog.rollEditLog(logVersion); + + // Read edit log + Collection editStreams + = editLog.selectInputStreams(txid, txid + 1); + EditLogInputStream inputStream = null; + for (EditLogInputStream s : editStreams) { + if (s.getFirstTxId() == txid) { + inputStream = s; + break; + } + } + assertNotNull(inputStream); + int readLogVersion = inputStream.getVersion(false); + assertEquals(logVersion, readLogVersion); + FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0); + long records = loader.loadFSEdits(inputStream, txid); + assertTrue(records > 0); + + editLog.close(); + cluster.shutdown(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java index b75309e64cfc8..3b15c2db7a9f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java @@ -87,6 +87,9 @@ public void shouldFailToCloseWhenUnflushed() throws IOException { @Test public void testDumpEdits() throws IOException { final int defaultBufferSize = 256; + final int fakeLogVersion = + NameNodeLayoutVersion.Feature.ROLLING_UPGRADE + .getInfo().getLayoutVersion(); EditsDoubleBuffer buffer = new EditsDoubleBuffer(defaultBufferSize); FSEditLogOp.OpInstanceCache cache = new FSEditLogOp.OpInstanceCache(); @@ -98,7 +101,7 @@ public void testDumpEdits() throws IOException { .setPath(src) .setReplication(replication); op.setTransactionId(1); - buffer.writeOp(op); + buffer.writeOp(op, fakeLogVersion); src = "/testdumpedits2"; @@ -107,13 +110,13 @@ public void testDumpEdits() throws IOException { .setPath(src) .setTimestamp(0); op2.setTransactionId(2); - buffer.writeOp(op2); + buffer.writeOp(op2, fakeLogVersion); FSEditLogOp.AllocateBlockIdOp op3 = FSEditLogOp.AllocateBlockIdOp.getInstance(cache.get()) .setBlockId(0); op3.setTransactionId(3); - buffer.writeOp(op3); + buffer.writeOp(op3, fakeLogVersion); GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(EditsDoubleBuffer.LOG); From edd708527d34d0bf3b09dc35a7f645f49e7becb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Fri, 23 Aug 2019 08:15:43 +0200 Subject: [PATCH 0819/1308] HDDS-1948. S3 MPU can't be created with octet-stream content-type (#1266) --- .../hadoop/ozone/s3/HeaderPreprocessor.java | 31 ++++++++-- .../hadoop/ozone/s3/S3GatewayHttpServer.java | 5 ++ .../ozone/s3/VirtualHostStyleFilter.java | 4 ++ .../ozone/s3/endpoint/ObjectEndpoint.java | 56 +++++++------------ .../s3/endpoint/TestAbortMultipartUpload.java | 2 +- .../endpoint/TestInitiateMultipartUpload.java | 4 +- .../ozone/s3/endpoint/TestListParts.java | 2 +- .../endpoint/TestMultipartUploadComplete.java | 4 +- .../ozone/s3/endpoint/TestPartUpload.java | 4 +- 9 files changed, 63 insertions(+), 49 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java index 0a1480adff02f..db94bbbb75058 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java @@ -17,39 +17,60 @@ */ package org.apache.hadoop.ozone.s3; +import javax.annotation.Priority; import javax.ws.rs.container.ContainerRequestContext; import javax.ws.rs.container.ContainerRequestFilter; import javax.ws.rs.container.PreMatching; import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.ext.Provider; import java.io.IOException; /** * Filter to adjust request headers for compatible reasons. + * + * It should be executed AFTER signature check (VirtualHostStyleFilter) as the + * original Content-Type could be part of the base of the signature. */ - @Provider @PreMatching +@Priority(VirtualHostStyleFilter.PRIORITY + + S3GatewayHttpServer.FILTER_PRIORITY_DO_AFTER) public class HeaderPreprocessor implements ContainerRequestFilter { + public static final String MULTIPART_UPLOAD_MARKER = "ozone/mpu"; + @Override public void filter(ContainerRequestContext requestContext) throws IOException { - if (requestContext.getUriInfo().getQueryParameters() - .containsKey("delete")) { + MultivaluedMap queryParameters = + requestContext.getUriInfo().getQueryParameters(); + + if (queryParameters.containsKey("delete")) { //aws cli doesn't send proper Content-Type and by default POST requests //processed as form-url-encoded. Here we can fix this. requestContext.getHeaders() .putSingle("Content-Type", MediaType.APPLICATION_XML); } - if (requestContext.getUriInfo().getQueryParameters() - .containsKey("uploadId")) { + if (queryParameters.containsKey("uploadId")) { //aws cli doesn't send proper Content-Type and by default POST requests //processed as form-url-encoded. Here we can fix this. requestContext.getHeaders() .putSingle("Content-Type", MediaType.APPLICATION_XML); + } else if (queryParameters.containsKey("uploads")) { + // uploads defined but uploadId is not --> this is the creation of the + // multi-part-upload requests. + // + //In AWS SDK for go uses application/octet-stream which also + //should be fixed to route the request to the right jaxrs method. + // + //Should be empty instead of XML as the body is empty which can not be + //serialized as as CompleteMultipartUploadRequest + requestContext.getHeaders() + .putSingle("Content-Type", MULTIPART_UPLOAD_MARKER); } + } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java index f20b928b1f952..f3d83412ae647 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java @@ -27,6 +27,11 @@ */ public class S3GatewayHttpServer extends BaseHttpServer { + /** + * Default offset between two filters. + */ + public static final int FILTER_PRIORITY_DO_AFTER = 50; + public S3GatewayHttpServer(Configuration conf, String name) throws IOException { super(conf, name); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java index 50014fea926f2..9ce98e11ee140 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.s3; +import javax.annotation.Priority; import javax.inject.Inject; import javax.ws.rs.container.ContainerRequestContext; import javax.ws.rs.container.ContainerRequestFilter; @@ -46,8 +47,11 @@ @Provider @PreMatching +@Priority(VirtualHostStyleFilter.PRIORITY) public class VirtualHostStyleFilter implements ContainerRequestFilter { + public static final int PRIORITY = 100; + private static final Logger LOG = LoggerFactory.getLogger( VirtualHostStyleFilter.class); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index b520e7be0bf3a..70bfb7f8e063f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.s3.endpoint; +import javax.ws.rs.Consumes; import javax.ws.rs.DELETE; import javax.ws.rs.DefaultValue; import javax.ws.rs.GET; @@ -58,6 +59,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; +import org.apache.hadoop.ozone.s3.HeaderPreprocessor; import org.apache.hadoop.ozone.s3.SignedChunksInputStream; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; @@ -417,33 +419,19 @@ public Response delete( } + /** + * Initialize MultiPartUpload request. + *

    + * Note: the specific content type is set by the HeaderPreprocessor. + */ @POST @Produces(MediaType.APPLICATION_XML) - public Response multipartUpload( + @Consumes(HeaderPreprocessor.MULTIPART_UPLOAD_MARKER) + public Response initializeMultipartUpload( @PathParam("bucket") String bucket, - @PathParam("path") String key, - @QueryParam("uploads") String uploads, - @QueryParam("uploadId") @DefaultValue("") String uploadID, - CompleteMultipartUploadRequest request) throws IOException, OS3Exception { - if (!uploadID.equals("")) { - //Complete Multipart upload request. - return completeMultipartUpload(bucket, key, uploadID, request); - } else { - // Initiate Multipart upload request. - return initiateMultipartUpload(bucket, key); - } - } - - /** - * Initiate Multipart upload request. - * @param bucket - * @param key - * @return Response - * @throws IOException - * @throws OS3Exception - */ - private Response initiateMultipartUpload(String bucket, String key) throws - IOException, OS3Exception { + @PathParam("path") String key + ) + throws IOException, OS3Exception { try { OzoneBucket ozoneBucket = getBucket(bucket); String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER); @@ -473,7 +461,6 @@ private Response initiateMultipartUpload(String bucket, String key) throws multipartUploadInitiateResponse.setKey(key); multipartUploadInitiateResponse.setUploadID(multipartInfo.getUploadID()); - return Response.status(Status.OK).entity( multipartUploadInitiateResponse).build(); } catch (IOException ex) { @@ -484,18 +471,15 @@ private Response initiateMultipartUpload(String bucket, String key) throws } /** - * Complete Multipart upload request. - * @param bucket - * @param key - * @param uploadID - * @param multipartUploadRequest - * @return Response - * @throws IOException - * @throws OS3Exception + * Complete a multipart upload. */ - private Response completeMultipartUpload(String bucket, String key, String - uploadID, CompleteMultipartUploadRequest multipartUploadRequest) throws - IOException, OS3Exception { + @POST + @Produces(MediaType.APPLICATION_XML) + public Response completeMultipartUpload(@PathParam("bucket") String bucket, + @PathParam("path") String key, + @QueryParam("uploadId") @DefaultValue("") String uploadID, + CompleteMultipartUploadRequest multipartUploadRequest) + throws IOException, OS3Exception { OzoneBucket ozoneBucket = getBucket(bucket); Map partsMap = new TreeMap<>(); List partList = diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java index 76d4a12b7bbd1..912a769cd3f82 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java @@ -56,7 +56,7 @@ public void testAbortMultipartUpload() throws Exception { rest.setHeaders(headers); rest.setClient(client); - Response response = rest.multipartUpload(bucket, key, "", "", null); + Response response = rest.initializeMultipartUpload(bucket, key); assertEquals(response.getStatus(), 200); MultipartUploadInitiateResponse multipartUploadInitiateResponse = diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java index 6f48ecb509a8b..212721af00f91 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java @@ -60,7 +60,7 @@ public void testInitiateMultipartUpload() throws Exception { rest.setHeaders(headers); rest.setClient(client); - Response response = rest.multipartUpload(bucket, key, "", "", null); + Response response = rest.initializeMultipartUpload(bucket, key); assertEquals(response.getStatus(), 200); MultipartUploadInitiateResponse multipartUploadInitiateResponse = @@ -69,7 +69,7 @@ public void testInitiateMultipartUpload() throws Exception { String uploadID = multipartUploadInitiateResponse.getUploadID(); // Calling again should return different uploadID. - response = rest.multipartUpload(bucket, key, "", "", null); + response = rest.initializeMultipartUpload(bucket, key); assertEquals(response.getStatus(), 200); multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index ac6aa72e4f188..21545ec9b0704 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -61,7 +61,7 @@ public static void setUp() throws Exception { REST.setHeaders(headers); REST.setClient(client); - Response response = REST.multipartUpload(BUCKET, KEY, "", "", null); + Response response = REST.initializeMultipartUpload(BUCKET, KEY); MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); assertNotNull(multipartUploadInitiateResponse.getUploadID()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index 8be61310f99b2..b9e3885ac6f3c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -70,7 +70,7 @@ public static void setUp() throws Exception { private String initiateMultipartUpload(String key) throws IOException, OS3Exception { - Response response = REST.multipartUpload(BUCKET, key, "", "", null); + Response response = REST.initializeMultipartUpload(BUCKET, key); MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); assertNotNull(multipartUploadInitiateResponse.getUploadID()); @@ -99,7 +99,7 @@ private Part uploadPart(String key, String uploadID, int partNumber, String private void completeMultipartUpload(String key, CompleteMultipartUploadRequest completeMultipartUploadRequest, String uploadID) throws IOException, OS3Exception { - Response response = REST.multipartUpload(BUCKET, key, "", uploadID, + Response response = REST.completeMultipartUpload(BUCKET, key, uploadID, completeMultipartUploadRequest); assertEquals(response.getStatus(), 200); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index 120fbb2f2ee56..3e91a77ffd4ae 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -67,7 +67,7 @@ public static void setUp() throws Exception { @Test public void testPartUpload() throws Exception { - Response response = REST.multipartUpload(BUCKET, KEY, "", "", null); + Response response = REST.initializeMultipartUpload(BUCKET, KEY); MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); assertNotNull(multipartUploadInitiateResponse.getUploadID()); @@ -86,7 +86,7 @@ public void testPartUpload() throws Exception { @Test public void testPartUploadWithOverride() throws Exception { - Response response = REST.multipartUpload(BUCKET, KEY, "", "", null); + Response response = REST.initializeMultipartUpload(BUCKET, KEY); MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); assertNotNull(multipartUploadInitiateResponse.getUploadID()); From bd7baea5a5d4ff351645e34c0ef09b7ba82f4285 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Fri, 23 Aug 2019 15:27:32 +0900 Subject: [PATCH 0820/1308] HDFS-14396. Failed to load image from FSImageFile when downgrade from 3.x to 2.x. Contributed by Fei Hui. --- .../server/namenode/FSImageFormatProtobuf.java | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index 3144d4b17cfa6..51379b8657392 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -813,6 +813,8 @@ private long saveInternal(FileOutputStream fout, FSImageCompression compression, String filePath) throws IOException { StartupProgress prog = NameNode.getStartupProgress(); MessageDigest digester = MD5Hash.getDigester(); + int layoutVersion = + context.getSourceNamesystem().getEffectiveLayoutVersion(); underlyingOutputStream = new DigestOutputStream(new BufferedOutputStream( fout), digester); @@ -839,11 +841,16 @@ private long saveInternal(FileOutputStream fout, // depends on this behavior. context.checkCancelled(); + Step step; + // Erasure coding policies should be saved before inodes - Step step = new Step(StepType.ERASURE_CODING_POLICIES, filePath); - prog.beginStep(Phase.SAVING_CHECKPOINT, step); - saveErasureCodingSection(b); - prog.endStep(Phase.SAVING_CHECKPOINT, step); + if (NameNodeLayoutVersion.supports( + NameNodeLayoutVersion.Feature.ERASURE_CODING, layoutVersion)) { + step = new Step(StepType.ERASURE_CODING_POLICIES, filePath); + prog.beginStep(Phase.SAVING_CHECKPOINT, step); + saveErasureCodingSection(b); + prog.endStep(Phase.SAVING_CHECKPOINT, step); + } step = new Step(StepType.INODES, filePath); prog.beginStep(Phase.SAVING_CHECKPOINT, step); From b4a95a2b00f2fb560de9c462fba25b9dad37aca4 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Fri, 23 Aug 2019 10:56:00 +0200 Subject: [PATCH 0821/1308] HDDS-2000. Don't depend on bootstrap/jquery versions from hadoop-trunk snapshot Closes #1339 --- .../ozonedoc/layouts/partials/footer.html | 2 +- .../ozonedoc/layouts/partials/navbar.html | 7 +- .../static/css/bootstrap-theme.min.css | 6 +- .../static/css/bootstrap-theme.min.css.map | 2 +- .../ozonedoc/static/css/bootstrap.min.css | 6 +- .../ozonedoc/static/css/bootstrap.min.css.map | 2 +- .../ozonedoc/static/js/bootstrap.min.js | 7 +- .../ozonedoc/static/js/jquery-3.4.1.min.js | 2 + .../themes/ozonedoc/static/js/jquery.min.js | 5 - hadoop-hdds/framework/pom.xml | 27 - .../css/bootstrap-editable.css | 655 ++ .../bootstrap-3.4.1/css/bootstrap-theme.css | 587 ++ .../css/bootstrap-theme.css.map | 1 + .../css/bootstrap-theme.min.css | 6 + .../css/bootstrap-theme.min.css.map | 1 + .../static/bootstrap-3.4.1/css/bootstrap.css | 6834 +++++++++++++++++ .../bootstrap-3.4.1/css/bootstrap.css.map | 1 + .../bootstrap-3.4.1/css/bootstrap.min.css | 6 + .../bootstrap-3.4.1/css/bootstrap.min.css.map | 1 + .../fonts/glyphicons-halflings-regular.eot | Bin 0 -> 20127 bytes .../fonts/glyphicons-halflings-regular.svg | 288 + .../fonts/glyphicons-halflings-regular.ttf | Bin 0 -> 45404 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 0 -> 23424 bytes .../fonts/glyphicons-halflings-regular.woff2 | Bin 0 -> 18028 bytes .../js/bootstrap-editable.min.js | 7 + .../static/bootstrap-3.4.1/js/bootstrap.js | 2580 +++++++ .../bootstrap-3.4.1/js/bootstrap.min.js | 6 + .../main/resources/webapps/static/hadoop.css | 331 + .../webapps/static/jquery-3.4.1.min.js | 2 + hadoop-hdds/pom.xml | 2 + hadoop-ozone/pom.xml | 4 +- 31 files changed, 11330 insertions(+), 48 deletions(-) create mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/js/jquery.min.js create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-editable.css create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css.map create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css.map create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css.map create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.min.css create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.min.css.map create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.eot create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.svg create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.ttf create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.woff create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.woff2 create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap-editable.min.js create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap.js create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap.min.js create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/hadoop.css create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/jquery-3.4.1.min.js diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/footer.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/footer.html index 5aaeed9e1ed4a..0e5ca0fec2b1d 100644 --- a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/footer.html +++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/footer.html @@ -17,6 +17,6 @@ - + diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html index 316f2cc11cc7c..598ede6cddbb2 100644 --- a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html +++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html @@ -23,7 +23,12 @@ - + + + + Hadoop Ozone