From a4835db95a2cca255bf0e40fa5d016526ef03857 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 5 Jun 2020 10:11:40 +0200 Subject: [PATCH 001/131] YARN-10296. Make ContainerPBImpl#getId/setId synchronized. Contributed by Benjamin Teke --- .../hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java index b3dad0a0165a1..53d52d3c94a23 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java @@ -68,7 +68,7 @@ public ContainerPBImpl(ContainerProto proto) { viaProto = true; } - public ContainerProto getProto() { + synchronized public ContainerProto getProto() { mergeLocalToProto(); proto = viaProto ? proto : builder.build(); @@ -142,7 +142,7 @@ private void maybeInitBuilder() { } @Override - public ContainerId getId() { + synchronized public ContainerId getId() { ContainerProtoOrBuilder p = viaProto ? proto : builder; if (this.containerId != null) { return this.containerId; @@ -176,7 +176,7 @@ public NodeId getNodeId() { } @Override - public void setId(ContainerId id) { + synchronized public void setId(ContainerId id) { maybeInitBuilder(); if (id == null) builder.clearId(); From 8b146c17b35532af7b9685aa1896d2b4f4fadc05 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 5 Jun 2020 11:38:32 +0200 Subject: [PATCH 002/131] YARN-10274. Merge QueueMapping and QueueMappingEntity. Contributed by Gergely Pollak --- .../AppNameMappingPlacementRule.java | 18 ++-- .../placement/QueueMapping.java | 15 ++- .../placement/QueueMappingEntity.java | 98 ------------------- .../placement/QueuePlacementRuleUtils.java | 23 +++-- .../CapacitySchedulerConfiguration.java | 22 ++--- .../TestAppNameMappingPlacementRule.java | 22 +++-- .../placement/TestPlacementManager.java | 8 +- ...tCapacitySchedulerQueueMappingFactory.java | 14 +-- 8 files changed, 74 insertions(+), 146 deletions(-) delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMappingEntity.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java index c8a29b46e7d95..cf725b628625f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java @@ -48,7 +48,7 @@ public class AppNameMappingPlacementRule extends PlacementRule { private static final String QUEUE_MAPPING_NAME = "app-name"; private boolean overrideWithQueueMappings = false; - private List mappings = null; + private List mappings = null; protected CapacitySchedulerQueueManager queueManager; public AppNameMappingPlacementRule() { @@ -56,7 +56,7 @@ public AppNameMappingPlacementRule() { } public AppNameMappingPlacementRule(boolean overrideWithQueueMappings, - List newMappings) { + List newMappings) { this.overrideWithQueueMappings = overrideWithQueueMappings; this.mappings = newMappings; } @@ -76,16 +76,16 @@ public boolean initialize(ResourceScheduler scheduler) LOG.info( "Initialized App Name queue mappings, override: " + overrideWithQueueMappings); - List queueMappings = + List queueMappings = conf.getQueueMappingEntity(QUEUE_MAPPING_NAME); // Get new user mappings - List newMappings = new ArrayList<>(); + List newMappings = new ArrayList<>(); queueManager = schedulerContext.getCapacitySchedulerQueueManager(); // check if mappings refer to valid queues - for (QueueMappingEntity mapping : queueMappings) { + for (QueueMapping mapping : queueMappings) { QueuePath queuePath = mapping.getQueuePath(); if (isStaticQueueMapping(mapping)) { @@ -109,7 +109,7 @@ public boolean initialize(ResourceScheduler scheduler) //validate if parent queue is specified, // then it should exist and // be an instance of AutoCreateEnabledParentQueue - QueueMappingEntity newMapping = + QueueMapping newMapping = validateAndGetAutoCreatedQueueMapping(queueManager, mapping, queuePath); if (newMapping == null) { @@ -123,7 +123,7 @@ public boolean initialize(ResourceScheduler scheduler) // if its an instance of leaf queue // if its an instance of auto created leaf queue, // then extract parent queue name and update queue mapping - QueueMappingEntity newMapping = validateAndGetQueueMapping( + QueueMapping newMapping = validateAndGetQueueMapping( queueManager, queue, mapping, queuePath); newMappings.add(newMapping); } @@ -134,7 +134,7 @@ public boolean initialize(ResourceScheduler scheduler) // if parent queue is specified, then // parent queue exists and an instance of AutoCreateEnabledParentQueue // - QueueMappingEntity newMapping = validateAndGetAutoCreatedQueueMapping( + QueueMapping newMapping = validateAndGetAutoCreatedQueueMapping( queueManager, mapping, queuePath); if (newMapping != null) { newMappings.add(newMapping); @@ -160,7 +160,7 @@ private static boolean ifQueueDoesNotExist(CSQueue queue) { private ApplicationPlacementContext getAppPlacementContext(String user, String applicationName) throws IOException { - for (QueueMappingEntity mapping : mappings) { + for (QueueMapping mapping : mappings) { if (mapping.getSource().equals(CURRENT_APP_MAPPING)) { if (mapping.getQueue().equals(CURRENT_APP_MAPPING)) { return getPlacementContext(mapping, applicationName, queueManager); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMapping.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMapping.java index c3b3cc62a487a..3fcb5fe6b8368 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMapping.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMapping.java @@ -82,6 +82,7 @@ private QueueMapping(QueueMappingBuilder builder) { this.source = builder.source; this.queue = builder.queue; this.parentQueue = builder.parentQueue; + this.fullPath = (parentQueue != null) ? (parentQueue + DOT + queue) : queue; } /** @@ -89,8 +90,9 @@ private QueueMapping(QueueMappingBuilder builder) { * */ public enum MappingType { - - USER("u"), GROUP("g"); + USER("u"), + GROUP("g"), + APPLICATION("a"); private final String type; @@ -108,6 +110,7 @@ public String toString() { private String source; private String queue; private String parentQueue; + private String fullPath; private final static String DELIMITER = ":"; @@ -132,7 +135,7 @@ public String getSource() { } public String getFullPath() { - return (parentQueue != null ? parentQueue + DOT + queue : queue); + return fullPath; } public QueuePath getQueuePath() { @@ -197,4 +200,10 @@ public String toString() { return type.toString() + DELIMITER + source + DELIMITER + (parentQueue != null ? parentQueue + "." + queue : queue); } + + public String toTypelessString() { + return source + DELIMITER + + (parentQueue != null ? parentQueue + "." + queue : queue); + } + } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMappingEntity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMappingEntity.java deleted file mode 100644 index ccb611a6c1ae7..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMappingEntity.java +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.server.resourcemanager.placement; - -import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.DOT; - -public class QueueMappingEntity { - private String source; - private String queue; - private String parentQueue; - private String fullPath; - - public final static String DELIMITER = ":"; - - public QueueMappingEntity(String source, String queue) { - this.source = source; - this.queue = queue; - this.parentQueue = null; - this.fullPath = queue; - } - public QueueMappingEntity(String source, String queue, String parentQueue) { - this.source = source; - this.queue = queue; - this.parentQueue = parentQueue; - this.fullPath = parentQueue + DOT + queue; - } - - public QueueMappingEntity(String source, QueuePath path) { - this.source = source; - this.queue = path.getLeafQueue(); - this.parentQueue = path.getParentQueue(); - this.fullPath = parentQueue + DOT + queue; - } - - public String getQueue() { - return queue; - } - - public String getParentQueue() { - return parentQueue; - } - - public String getFullPath() { - return fullPath; - } - - public String getSource() { - return source; - } - - public boolean hasParentQueue() { - return parentQueue != null; - } - - public QueuePath getQueuePath() { - //This is to make sure the parsing is the same everywhere, but the - //whole parsing part should be moved to QueuePathConstructor - return QueuePlacementRuleUtils.extractQueuePath(getFullPath()); - } - - @Override - public int hashCode() { - return super.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof QueueMappingEntity) { - QueueMappingEntity other = (QueueMappingEntity) obj; - return (other.source.equals(source) && - other.queue.equals(queue)); - } else { - return false; - } - } - - public String toString() { - return source + DELIMITER + (parentQueue != null ? - parentQueue + DOT + queue : - queue); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java index b9b35c1007d2d..350f2b93d8ce5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java @@ -65,8 +65,8 @@ public static void validateQueueMappingUnderParentQueue( } } - public static QueueMappingEntity validateAndGetAutoCreatedQueueMapping( - CapacitySchedulerQueueManager queueManager, QueueMappingEntity mapping, + public static QueueMapping validateAndGetAutoCreatedQueueMapping( + CapacitySchedulerQueueManager queueManager, QueueMapping mapping, QueuePath queuePath) throws IOException { if (queuePath.hasParentQueue()) { //if parent queue is specified, @@ -74,16 +74,19 @@ public static QueueMappingEntity validateAndGetAutoCreatedQueueMapping( validateQueueMappingUnderParentQueue(queueManager.getQueue( queuePath.getParentQueue()), queuePath.getParentQueue(), queuePath.getFullPath()); - return new QueueMappingEntity(mapping.getSource(), - queuePath.getFullPath(), queuePath.getParentQueue()); + return QueueMapping.QueueMappingBuilder.create() + .type(mapping.getType()) + .source(mapping.getSource()) + .queuePath(queuePath) + .build(); } return null; } - public static QueueMappingEntity validateAndGetQueueMapping( + public static QueueMapping validateAndGetQueueMapping( CapacitySchedulerQueueManager queueManager, CSQueue queue, - QueueMappingEntity mapping, QueuePath queuePath) throws IOException { + QueueMapping mapping, QueuePath queuePath) throws IOException { if (!(queue instanceof LeafQueue)) { throw new IOException( "mapping contains invalid or non-leaf queue : " + @@ -93,7 +96,7 @@ public static QueueMappingEntity validateAndGetQueueMapping( if (queue instanceof AutoCreatedLeafQueue && queue .getParent() instanceof ManagedParentQueue) { - QueueMappingEntity newMapping = validateAndGetAutoCreatedQueueMapping( + QueueMapping newMapping = validateAndGetAutoCreatedQueueMapping( queueManager, mapping, queuePath); if (newMapping == null) { throw new IOException( @@ -105,7 +108,7 @@ public static QueueMappingEntity validateAndGetQueueMapping( return mapping; } - public static boolean isStaticQueueMapping(QueueMappingEntity mapping) { + public static boolean isStaticQueueMapping(QueueMapping mapping) { return !mapping.getQueue().contains(CURRENT_USER_MAPPING) && !mapping .getQueue().contains(PRIMARY_GROUP_MAPPING) && !mapping.getQueue().contains(SECONDARY_GROUP_MAPPING); @@ -126,13 +129,13 @@ public static QueuePath extractQueuePath(String queuePath) { } public static ApplicationPlacementContext getPlacementContext( - QueueMappingEntity mapping, CapacitySchedulerQueueManager queueManager) + QueueMapping mapping, CapacitySchedulerQueueManager queueManager) throws IOException { return getPlacementContext(mapping, mapping.getQueue(), queueManager); } public static ApplicationPlacementContext getPlacementContext( - QueueMappingEntity mapping, String leafQueueName, + QueueMapping mapping, String leafQueueName, CapacitySchedulerQueueManager queueManager) throws IOException { //leafQueue name no longer identifies a queue uniquely checking ambiguity diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index f060b56e939cf..7f4150fab1656 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -42,7 +42,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping; import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping.QueueMappingBuilder; -import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMappingEntity; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AppPriorityACLConfigurationParser.AppPriorityACLKeyType; @@ -1039,12 +1038,12 @@ public void setOverrideWithQueueMappings(boolean overrideWithQueueMappings) { setBoolean(ENABLE_QUEUE_MAPPING_OVERRIDE, overrideWithQueueMappings); } - public List getQueueMappingEntity( + public List getQueueMappingEntity( String queueMappingSuffix) { String queueMappingName = buildQueueMappingRuleProperty(queueMappingSuffix); - List mappings = - new ArrayList(); + List mappings = + new ArrayList(); Collection mappingsString = getTrimmedStringCollection(queueMappingName); for (String mappingValue : mappingsString) { @@ -1058,10 +1057,11 @@ public List getQueueMappingEntity( //Mappings should be consistent, and have the parent path parsed // from the beginning - QueueMappingEntity m = new QueueMappingEntity( - mapping[0], - QueuePlacementRuleUtils.extractQueuePath(mapping[1])); - + QueueMapping m = QueueMapping.QueueMappingBuilder.create() + .type(QueueMapping.MappingType.APPLICATION) + .source(mapping[0]) + .queuePath(QueuePlacementRuleUtils.extractQueuePath(mapping[1])) + .build(); mappings.add(m); } @@ -1076,15 +1076,15 @@ private String buildQueueMappingRuleProperty (String queueMappingSuffix) { } @VisibleForTesting - public void setQueueMappingEntities(List queueMappings, + public void setQueueMappingEntities(List queueMappings, String queueMappingSuffix) { if (queueMappings == null) { return; } List queueMappingStrs = new ArrayList<>(); - for (QueueMappingEntity mapping : queueMappings) { - queueMappingStrs.add(mapping.toString()); + for (QueueMapping mapping : queueMappings) { + queueMappingStrs.add(mapping.toTypelessString()); } String mappingRuleProp = buildQueueMappingRuleProperty(queueMappingSuffix); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java index f7c7b501b2b2f..1204213cf9162 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java @@ -47,13 +47,13 @@ public void setup() { SimpleGroupsMapping.class, GroupMappingServiceProvider.class); } - private void verifyQueueMapping(QueueMappingEntity queueMapping, + private void verifyQueueMapping(QueueMapping queueMapping, String user, String expectedQueue) throws YarnException { verifyQueueMapping(queueMapping, user, queueMapping.getQueue(), expectedQueue, false); } - private void verifyQueueMapping(QueueMappingEntity queueMapping, + private void verifyQueueMapping(QueueMapping queueMapping, String user, String inputQueue, String expectedQueue, boolean overwrite) throws YarnException { AppNameMappingPlacementRule rule = new AppNameMappingPlacementRule( @@ -81,23 +81,31 @@ private void verifyQueueMapping(QueueMappingEntity queueMapping, ctx != null ? ctx.getQueue() : inputQueue); } + public QueueMapping queueMappingBuilder(String source, String queue) { + return QueueMapping.QueueMappingBuilder.create() + .type(QueueMapping.MappingType.APPLICATION) + .source(source) + .queue(queue) + .build(); + } + @Test public void testMapping() throws YarnException { // simple base case for mapping user to queue - verifyQueueMapping(new QueueMappingEntity(APP_NAME, + verifyQueueMapping(queueMappingBuilder(APP_NAME, "q1"), "user_1", "q1"); - verifyQueueMapping(new QueueMappingEntity("%application", "q2"), "user_1", + verifyQueueMapping(queueMappingBuilder("%application", "q2"), "user_1", "q2"); - verifyQueueMapping(new QueueMappingEntity("%application", "%application"), + verifyQueueMapping(queueMappingBuilder("%application", "%application"), "user_1", APP_NAME); // specify overwritten, and see if user specified a queue, and it will be // overridden - verifyQueueMapping(new QueueMappingEntity(APP_NAME, + verifyQueueMapping(queueMappingBuilder(APP_NAME, "q1"), "1", "q2", "q1", true); // if overwritten not specified, it should be which user specified - verifyQueueMapping(new QueueMappingEntity(APP_NAME, + verifyQueueMapping(queueMappingBuilder(APP_NAME, "q1"), "1", "q2", "q2", false); } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java index fa95caec10029..22a9125576f1b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java @@ -95,8 +95,12 @@ public void testPlaceApplicationWithPlacementRuleChain() throws Exception { Assert.assertNull("Placement should be null", pm.placeApplication(asc, USER2)); - QueueMappingEntity queueMappingEntity = new QueueMappingEntity(APP_NAME, - USER1, PARENT_QUEUE); + QueueMapping queueMappingEntity = QueueMapping.QueueMappingBuilder.create() + .type(MappingType.APPLICATION) + .source(APP_NAME) + .queue(USER1) + .parentQueue(PARENT_QUEUE) + .build(); AppNameMappingPlacementRule anRule = new AppNameMappingPlacementRule(false, Arrays.asList(queueMappingEntity)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java index eedc6aecde8f1..5beda2522561a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java @@ -29,7 +29,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping; import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping.MappingType; import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping.QueueMappingBuilder; -import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMappingEntity; import org.apache.hadoop.yarn.server.resourcemanager.placement.UserGroupMappingPlacementRule; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SimpleGroupsMapping; @@ -84,17 +83,20 @@ public static CapacitySchedulerConfiguration setupQueueMappingsForRules( existingMappingsForUG.addAll(queueMappingsForUG); conf.setQueueMappings(existingMappingsForUG); - List existingMappingsForAN = + List existingMappingsForAN = conf.getQueueMappingEntity(QUEUE_MAPPING_NAME); //set queue mapping - List queueMappingsForAN = + List queueMappingsForAN = new ArrayList<>(); for (int i = 0; i < sourceIds.length; i++) { //Set C as parent queue name for auto queue creation - QueueMappingEntity queueMapping = - new QueueMappingEntity(USER + sourceIds[i], - getQueueMapping(parentQueue, USER + sourceIds[i])); + QueueMapping queueMapping = QueueMapping.QueueMappingBuilder.create() + .type(MappingType.APPLICATION) + .source(USER + sourceIds[i]) + .queue(getQueueMapping(parentQueue, USER + sourceIds[i])) + .build(); + queueMappingsForAN.add(queueMapping); } From 545a0a147c5256c44911ba57b4898e01d786d836 Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Fri, 5 Jun 2020 19:11:49 +0900 Subject: [PATCH 003/131] HDFS-15386 ReplicaNotFoundException keeps happening in DN after removing multiple DN's data directories (#2052) Contributed by Toshihiro Suzuki. --- .../fsdataset/impl/FsDatasetImpl.java | 5 +- .../fsdataset/impl/TestFsDatasetImpl.java | 101 ++++++++++++++++-- 2 files changed, 95 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 18a747635f22b..a083012a2cf7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -578,7 +578,8 @@ public void removeVolumes( // Unlike updating the volumeMap in addVolume(), this operation does // not scan disks. for (String bpid : volumeMap.getBlockPoolList()) { - List blocks = new ArrayList<>(); + List blocks = blkToInvalidate + .computeIfAbsent(bpid, (k) -> new ArrayList<>()); for (Iterator it = volumeMap.replicas(bpid).iterator(); it.hasNext();) { ReplicaInfo block = it.next(); @@ -591,9 +592,7 @@ public void removeVolumes( it.remove(); } } - blkToInvalidate.put(bpid, blocks); } - storageToRemove.add(sd.getStorageUuid()); storageLocationsToRemove.remove(sdLocation); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index 29e533c32cea1..273feb0491112 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -20,7 +20,6 @@ import com.google.common.base.Supplier; import com.google.common.collect.Lists; -import java.io.FileInputStream; import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Paths; @@ -106,6 +105,8 @@ import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import org.slf4j.Logger; @@ -268,16 +269,24 @@ public void testAddVolumeWithSameStorageUuid() throws IOException { } @Test(timeout = 30000) - public void testRemoveVolumes() throws IOException { + public void testRemoveOneVolume() throws IOException { // Feed FsDataset with block metadata. - final int NUM_BLOCKS = 100; - for (int i = 0; i < NUM_BLOCKS; i++) { - String bpid = BLOCK_POOL_IDS[NUM_BLOCKS % BLOCK_POOL_IDS.length]; + final int numBlocks = 100; + for (int i = 0; i < numBlocks; i++) { + String bpid = BLOCK_POOL_IDS[numBlocks % BLOCK_POOL_IDS.length]; ExtendedBlock eb = new ExtendedBlock(bpid, i); - try (ReplicaHandler replica = - dataset.createRbw(StorageType.DEFAULT, null, eb, false)) { + ReplicaHandler replica = null; + try { + replica = dataset.createRbw(StorageType.DEFAULT, null, eb, + false); + } finally { + if (replica != null) { + replica.close(); + } } } + + // Remove one volume final String[] dataDirs = conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(","); final String volumePathToRemove = dataDirs[0]; @@ -300,6 +309,11 @@ public void testRemoveVolumes() throws IOException { assertEquals("The volume has been removed from the storageMap.", expectedNumVolumes, dataset.storageMap.size()); + // DataNode.notifyNamenodeDeletedBlock() should be called 50 times + // as we deleted one volume that has 50 blocks + verify(datanode, times(50)) + .notifyNamenodeDeletedBlock(any(), any()); + try { dataset.asyncDiskService.execute(volumeToRemove, new Runnable() { @@ -317,10 +331,81 @@ public void run() {} totalNumReplicas += dataset.volumeMap.size(bpid); } assertEquals("The replica infos on this volume has been removed from the " - + "volumeMap.", NUM_BLOCKS / NUM_INIT_VOLUMES, + + "volumeMap.", numBlocks / NUM_INIT_VOLUMES, totalNumReplicas); } + @Test(timeout = 30000) + public void testRemoveTwoVolumes() throws IOException { + // Feed FsDataset with block metadata. + final int numBlocks = 100; + for (int i = 0; i < numBlocks; i++) { + String bpid = BLOCK_POOL_IDS[numBlocks % BLOCK_POOL_IDS.length]; + ExtendedBlock eb = new ExtendedBlock(bpid, i); + ReplicaHandler replica = null; + try { + replica = dataset.createRbw(StorageType.DEFAULT, null, eb, + false); + } finally { + if (replica != null) { + replica.close(); + } + } + } + + // Remove two volumes + final String[] dataDirs = + conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(","); + Set volumesToRemove = new HashSet<>(); + volumesToRemove.add(StorageLocation.parse(dataDirs[0])); + volumesToRemove.add(StorageLocation.parse(dataDirs[1])); + + FsVolumeReferences volReferences = dataset.getFsVolumeReferences(); + Set volumes = new HashSet<>(); + for (FsVolumeSpi vol: volReferences) { + for (StorageLocation volume : volumesToRemove) { + if (vol.getStorageLocation().equals(volume)) { + volumes.add((FsVolumeImpl) vol); + } + } + } + assertEquals(2, volumes.size()); + volReferences.close(); + + dataset.removeVolumes(volumesToRemove, true); + int expectedNumVolumes = dataDirs.length - 2; + assertEquals("The volume has been removed from the volumeList.", + expectedNumVolumes, getNumVolumes()); + assertEquals("The volume has been removed from the storageMap.", + expectedNumVolumes, dataset.storageMap.size()); + + // DataNode.notifyNamenodeDeletedBlock() should be called 100 times + // as we deleted 2 volumes that have 100 blocks totally + verify(datanode, times(100)) + .notifyNamenodeDeletedBlock(any(), any()); + + for (FsVolumeImpl volume : volumes) { + try { + dataset.asyncDiskService.execute(volume, + new Runnable() { + @Override + public void run() {} + }); + fail("Expect RuntimeException: the volume has been removed from the " + + "AsyncDiskService."); + } catch (RuntimeException e) { + GenericTestUtils.assertExceptionContains("Cannot find volume", e); + } + } + + int totalNumReplicas = 0; + for (String bpid : dataset.volumeMap.getBlockPoolList()) { + totalNumReplicas += dataset.volumeMap.size(bpid); + } + assertEquals("The replica infos on this volume has been removed from the " + + "volumeMap.", 0, totalNumReplicas); + } + @Test(timeout = 5000) public void testRemoveNewlyAddedVolume() throws IOException { final int numExistingVolumes = getNumVolumes(); From 8abff5151a7c157355930475dbea2dc535d51c19 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 5 Jun 2020 15:00:39 +0200 Subject: [PATCH 004/131] YARN-10292. FS-CS converter: add an option to enable asynchronous scheduling in CapacityScheduler. Contributed by Benjamin Teke --- .../fair/converter/ConversionOptions.java | 9 ++++ .../FSConfigToCSConfigArgumentHandler.java | 5 +++ .../FSConfigToCSConfigConverter.java | 3 +- .../fair/converter/FSYarnSiteConverter.java | 6 ++- ...TestFSConfigToCSConfigArgumentHandler.java | 31 ++++++++++++++ .../TestFSConfigToCSConfigConverter.java | 35 ++++++++++++++++ .../converter/TestFSYarnSiteConverter.java | 42 +++++++++++++++---- 7 files changed, 122 insertions(+), 9 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/ConversionOptions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/ConversionOptions.java index 7fec0a80ad864..aae1d5547a8f8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/ConversionOptions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/ConversionOptions.java @@ -22,6 +22,7 @@ public class ConversionOptions { private DryRunResultHolder dryRunResultHolder; private boolean dryRun; private boolean noTerminalRuleCheck; + private boolean enableAsyncScheduler; public ConversionOptions(DryRunResultHolder dryRunResultHolder, boolean dryRun) { @@ -41,6 +42,14 @@ public boolean isNoRuleTerminalCheck() { return noTerminalRuleCheck; } + public void setEnableAsyncScheduler(boolean enableAsyncScheduler) { + this.enableAsyncScheduler = enableAsyncScheduler; + } + + public boolean isEnableAsyncScheduler() { + return enableAsyncScheduler; + } + public void handleWarning(String msg, Logger log) { if (dryRun) { dryRunResultHolder.addDryRunWarning(msg); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java index 5bd3b1a52ba81..c2554a440b9be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java @@ -109,6 +109,9 @@ public enum CliOption { SKIP_VERIFICATION("skip verification", "s", "skip-verification", "Skips the verification of the converted configuration", false), + ENABLE_ASYNC_SCHEDULER("enable asynchronous scheduler", "a", "enable-async-scheduler", + "Enables the Asynchronous scheduler which decouples the CapacityScheduler" + + " scheduling from Node Heartbeats.", false), HELP("help", "h", "help", "Displays the list of options", false); private final String name; @@ -220,6 +223,8 @@ private FSConfigToCSConfigConverter prepareAndGetConverter( conversionOptions.setDryRun(dryRun); conversionOptions.setNoTerminalRuleCheck( cliParser.hasOption(CliOption.NO_TERMINAL_RULE_CHECK.shortSwitch)); + conversionOptions.setEnableAsyncScheduler( + cliParser.hasOption(CliOption.ENABLE_ASYNC_SCHEDULER.shortSwitch)); checkOptionPresent(cliParser, CliOption.YARN_SITE); checkOutputDefined(cliParser, dryRun); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java index 368bc1f493700..5acf356725211 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java @@ -270,7 +270,8 @@ private void convertYarnSiteXml(Configuration inputYarnSiteConfig, FSYarnSiteConverter siteConverter = new FSYarnSiteConverter(); siteConverter.convertSiteProperties(inputYarnSiteConfig, - convertedYarnSiteConfig, drfUsed); + convertedYarnSiteConfig, drfUsed, + conversionOptions.isEnableAsyncScheduler()); // See docs: "allow-undeclared-pools" and "user-as-default-queue" are // ignored if we have placement rules diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java index 8e5f92aa6d4c7..86e4cd3dfef58 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java @@ -38,7 +38,7 @@ public class FSYarnSiteConverter { @SuppressWarnings({"deprecation", "checkstyle:linelength"}) public void convertSiteProperties(Configuration conf, - Configuration yarnSiteConfig, boolean drfUsed) { + Configuration yarnSiteConfig, boolean drfUsed, boolean enableAsyncScheduler) { yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getCanonicalName()); @@ -131,6 +131,10 @@ public void convertSiteProperties(Configuration conf, CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, DominantResourceCalculator.class.getCanonicalName()); } + + if (enableAsyncScheduler) { + yarnSiteConfig.setBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true); + } } public boolean isPreemptionEnabled() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigArgumentHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigArgumentHandler.java index 9f41d3147949d..c3f380e2ea8b4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigArgumentHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigArgumentHandler.java @@ -651,4 +651,35 @@ public void testValidationSkippedWhenOutputIsConsole() throws Exception { verifyZeroInteractions(mockValidator); } + + @Test + public void testEnabledAsyncScheduling() throws Exception { + setupFSConfigConversionFiles(true); + + FSConfigToCSConfigArgumentHandler argumentHandler = + new FSConfigToCSConfigArgumentHandler(conversionOptions, mockValidator); + + String[] args = getArgumentsAsArrayWithDefaults("-f", + FSConfigConverterTestCommons.FS_ALLOC_FILE, "-p", + "-a"); + argumentHandler.parseAndConvert(args); + + assertTrue("-a switch had no effect", + conversionOptions.isEnableAsyncScheduler()); + } + + @Test + public void testDisabledAsyncScheduling() throws Exception { + setupFSConfigConversionFiles(true); + + FSConfigToCSConfigArgumentHandler argumentHandler = + new FSConfigToCSConfigArgumentHandler(conversionOptions, mockValidator); + + String[] args = getArgumentsAsArrayWithDefaults("-f", + FSConfigConverterTestCommons.FS_ALLOC_FILE, "-p"); + argumentHandler.parseAndConvert(args); + + assertFalse("-a switch wasn't provided but async scheduling option is true", + conversionOptions.isEnableAsyncScheduler()); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java index dd3e6d4588854..46e1fb39ad622 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java @@ -717,6 +717,41 @@ public void testPlacementRulesConversionEnabled() throws Exception { any(Boolean.class)); } + @Test + public void testConversionWhenAsyncSchedulingIsEnabled() + throws Exception { + boolean schedulingEnabledValue = testConversionWithAsyncSchedulingOption(true); + assertTrue("Asynchronous scheduling should be true", schedulingEnabledValue); + } + + @Test + public void testConversionWhenAsyncSchedulingIsDisabled() throws Exception { + boolean schedulingEnabledValue = testConversionWithAsyncSchedulingOption(false); + assertEquals("Asynchronous scheduling should be the default value", + CapacitySchedulerConfiguration.DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE, + schedulingEnabledValue); + } + + private boolean testConversionWithAsyncSchedulingOption(boolean enabled) throws Exception { + FSConfigToCSConfigConverterParams params = createDefaultParamsBuilder() + .withClusterResource(CLUSTER_RESOURCE_STRING) + .withFairSchedulerXmlConfig(FAIR_SCHEDULER_XML) + .build(); + + ConversionOptions conversionOptions = createDefaultConversionOptions(); + conversionOptions.setEnableAsyncScheduler(enabled); + + converter = new FSConfigToCSConfigConverter(ruleHandler, + conversionOptions); + + converter.convert(params); + + Configuration convertedConfig = converter.getYarnSiteConfig(); + + return convertedConfig.getBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, + CapacitySchedulerConfiguration.DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE); + } + private Configuration getConvertedCSConfig(String dir) throws IOException { File capacityFile = new File(dir, "capacity-scheduler.xml"); ByteArrayInputStream input = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java index 1597d5054a967..9cebf16f8be16 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java @@ -26,6 +26,7 @@ import org.junit.Test; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; /** @@ -52,7 +53,8 @@ public void testSiteContinuousSchedulingConversion() { yarnConfig.setInt( FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS, 666); - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); assertTrue("Cont. scheduling", yarnConvertedConfig.getBoolean( CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, false)); @@ -70,7 +72,8 @@ public void testSitePreemptionConversion() { FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS, 321); - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); assertTrue("Preemption enabled", yarnConvertedConfig.getBoolean( @@ -90,7 +93,8 @@ public void testSitePreemptionConversion() { public void testSiteAssignMultipleConversion() { yarnConfig.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, true); - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); assertTrue("Assign multiple", yarnConvertedConfig.getBoolean( @@ -102,7 +106,8 @@ public void testSiteAssignMultipleConversion() { public void testSiteMaxAssignConversion() { yarnConfig.setInt(FairSchedulerConfiguration.MAX_ASSIGN, 111); - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); assertEquals("Max assign", 111, yarnConvertedConfig.getInt( @@ -116,7 +121,8 @@ public void testSiteLocalityThresholdConversion() { yarnConfig.set(FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK, "321.321"); - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); assertEquals("Locality threshold node", "123.123", yarnConvertedConfig.get( @@ -128,7 +134,8 @@ public void testSiteLocalityThresholdConversion() { @Test public void testSiteDrfEnabledConversion() { - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, true); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, true, + false); assertEquals("Resource calculator type", DominantResourceCalculator.class, yarnConvertedConfig.getClass( @@ -137,11 +144,32 @@ public void testSiteDrfEnabledConversion() { @Test public void testSiteDrfDisabledConversion() { - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); assertEquals("Resource calculator type", DefaultResourceCalculator.class, yarnConvertedConfig.getClass( CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, CapacitySchedulerConfiguration.DEFAULT_RESOURCE_CALCULATOR_CLASS)); } + + @Test + public void testAsyncSchedulingEnabledConversion() { + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, true, + true); + + assertTrue("Asynchronous scheduling", yarnConvertedConfig.getBoolean( + CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, + CapacitySchedulerConfiguration.DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE)); + } + + @Test + public void testAsyncSchedulingDisabledConversion() { + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); + + assertFalse("Asynchronous scheduling", yarnConvertedConfig.getBoolean( + CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, + CapacitySchedulerConfiguration.DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE)); + } } From 2326123705445dee534ac2c298038831b5d04a0a Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Fri, 5 Jun 2020 19:15:05 +0530 Subject: [PATCH 005/131] HDFS-15359. EC: Allow closing a file with committed blocks. Contributed by Ayush Saxena. --- .../server/blockmanagement/BlockManager.java | 15 +++++-- .../hdfs/server/namenode/INodeFile.java | 22 ++++++++--- .../src/main/resources/hdfs-default.xml | 4 +- .../hdfs/TestDistributedFileSystem.java | 39 +++++++++++++++++++ 4 files changed, 69 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index dfb635dcefd4f..0b1da8bccc029 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1158,12 +1158,19 @@ public boolean commitOrCompleteLastBlock(BlockCollection bc, /** * If IBR is not sent from expected locations yet, add the datanodes to * pendingReconstruction in order to keep RedundancyMonitor from scheduling - * the block. + * the block. In case of erasure coding blocks, adds only in case there + * isn't any missing node. */ public void addExpectedReplicasToPending(BlockInfo blk) { - if (!blk.isStriped()) { - DatanodeStorageInfo[] expectedStorages = - blk.getUnderConstructionFeature().getExpectedStorageLocations(); + boolean addForStriped = false; + DatanodeStorageInfo[] expectedStorages = + blk.getUnderConstructionFeature().getExpectedStorageLocations(); + if (blk.isStriped()) { + BlockInfoStriped blkStriped = (BlockInfoStriped) blk; + addForStriped = + blkStriped.getRealTotalBlockNum() == expectedStorages.length; + } + if (!blk.isStriped() || addForStriped) { if (expectedStorages.length - blk.numNodes() > 0) { ArrayList pendingNodes = new ArrayList<>(); for (DatanodeStorageInfo storage : expectedStorages) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 67c86b375780d..3f0208c4dfb0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -374,17 +374,27 @@ static String checkBlockComplete(BlockInfo[] blocks, int i, if (state == BlockUCState.COMPLETE) { return null; } - if (b.isStriped() || i < blocks.length - numCommittedAllowed) { + if (i < blocks.length - numCommittedAllowed) { return b + " is " + state + " but not COMPLETE"; } if (state != BlockUCState.COMMITTED) { return b + " is " + state + " but neither COMPLETE nor COMMITTED"; } - final int numExpectedLocations - = b.getUnderConstructionFeature().getNumExpectedLocations(); - if (numExpectedLocations <= minReplication) { - return b + " is " + state + " but numExpectedLocations = " - + numExpectedLocations + " <= minReplication = " + minReplication; + + if (b.isStriped()) { + BlockInfoStriped blkStriped = (BlockInfoStriped) b; + if (b.getUnderConstructionFeature().getNumExpectedLocations() + != blkStriped.getRealTotalBlockNum()) { + return b + " is a striped block in " + state + " with less then " + + "required number of blocks."; + } + } else { + final int numExpectedLocations = + b.getUnderConstructionFeature().getNumExpectedLocations(); + if (numExpectedLocations <= minReplication) { + return b + " is " + state + " but numExpectedLocations = " + + numExpectedLocations + " <= minReplication = " + minReplication; + } } return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 38e8edde5a6bb..1e8490addbe0c 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4903,7 +4903,9 @@ Normally a file can only be closed with all its blocks are committed. When this value is set to a positive integer N, a file can be closed - when N blocks are committed and the rest complete. + when N blocks are committed and the rest complete. In case of Erasure Coded + blocks, the committed block shall be allowed only when the block group is + complete. i.e no missing/lost block in the blockgroup. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index 6353e194eda4d..29e49ea61519f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_FILE_CLOSE_NUM_COMMITTED_ALLOWED_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -100,6 +101,7 @@ import org.apache.hadoop.hdfs.protocol.OpenFileEntry; import org.apache.hadoop.hdfs.protocol.OpenFilesIterator; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; @@ -2105,4 +2107,41 @@ public void testGetECTopologyResultForPolicies() throws Exception { assertFalse(result.isSupported()); } } + + @Test + public void testECCloseCommittedBlock() throws Exception { + HdfsConfiguration conf = new HdfsConfiguration(); + conf.setInt(DFS_NAMENODE_FILE_CLOSE_NUM_COMMITTED_ALLOWED_KEY, 1); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(3).build()) { + cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + Path dir = new Path("/dir"); + dfs.mkdirs(dir); + dfs.enableErasureCodingPolicy("XOR-2-1-1024k"); + dfs.setErasureCodingPolicy(dir, "XOR-2-1-1024k"); + + try (FSDataOutputStream str = dfs.create(new Path("/dir/file"));) { + for (int i = 0; i < 1024 * 1024 * 4; i++) { + str.write(i); + } + DataNodeTestUtils.pauseIBR(cluster.getDataNodes().get(0)); + DataNodeTestUtils.pauseIBR(cluster.getDataNodes().get(1)); + } + DataNodeTestUtils.resumeIBR(cluster.getDataNodes().get(0)); + DataNodeTestUtils.resumeIBR(cluster.getDataNodes().get(1)); + + // Check if the blockgroup isn't complete then file close shouldn't be + // success with block in committed state. + cluster.getDataNodes().get(0).shutdown(); + FSDataOutputStream str = dfs.create(new Path("/dir/file1")); + + for (int i = 0; i < 1024 * 1024 * 4; i++) { + str.write(i); + } + DataNodeTestUtils.pauseIBR(cluster.getDataNodes().get(1)); + DataNodeTestUtils.pauseIBR(cluster.getDataNodes().get(2)); + LambdaTestUtils.intercept(IOException.class, "", () -> str.close()); + } + } } From 76fa0222f0d2e2d92b4a1eedba8b3e38002e8c23 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Fri, 5 Jun 2020 10:58:21 -0700 Subject: [PATCH 006/131] HDFS-15330. Document the ViewFSOverloadScheme details in ViewFS guide. Contributed by Uma Maheswara Rao G. --- .../src/site/markdown/HDFSCommands.md | 40 ++++- .../hadoop-hdfs/src/site/markdown/ViewFs.md | 6 + .../src/site/markdown/ViewFsOverloadScheme.md | 163 ++++++++++++++++++ .../resources/images/ViewFSOverloadScheme.png | Bin 0 -> 190004 bytes hadoop-project/src/site/site.xml | 1 + 5 files changed, 209 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/ViewFSOverloadScheme.png diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md index bc5ac30769463..d199c06afb740 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md @@ -693,4 +693,42 @@ Usage: `hdfs debug recoverLease -path [-retries ]` | [`-path` *path*] | HDFS path for which to recover the lease. | | [`-retries` *num-retries*] | Number of times the client will retry calling recoverLease. The default number of retries is 1. | -Recover the lease on the specified path. The path must reside on an HDFS filesystem. The default number of retries is 1. +Recover the lease on the specified path. The path must reside on an HDFS file system. The default number of retries is 1. + +dfsadmin with ViewFsOverloadScheme +---------------------------------- + +Usage: `hdfs dfsadmin -fs ` + +| COMMAND\_OPTION | Description | +|:---- |:---- | +| `-fs` *child fs mount link URI* | Its a logical mount link path to child file system in ViewFS world. This uri typically formed as src mount link prefixed with fs.defaultFS. Please note, this is not an actual child file system uri, instead its a logical mount link uri pointing to actual child file system| + +Example command usage: + `hdfs dfsadmin -fs hdfs://nn1 -safemode enter` + +In ViewFsOverloadScheme, we may have multiple child file systems as mount point mappings as shown in [ViewFsOverloadScheme Guide](./ViewFsOverloadScheme.html). Here -fs option is an optional generic parameter supported by dfsadmin. When users want to execute commands on one of the child file system, they need to pass that file system mount mapping link uri to -fs option. Let's take an example mount link configuration and dfsadmin command below. + +Mount link: + +```xml + + fs.defaultFS + hdfs://MyCluster1 + + + + fs.viewfs.mounttable.MyCluster1./user + hdfs://MyCluster2/user + hdfs://MyCluster2/user + mount link path: /user + mount link uri: hdfs://MyCluster1/user + mount target uri for /user: hdfs://MyCluster2/user --> + +``` + +If user wants to talk to `hdfs://MyCluster2/`, then they can pass -fs option (`-fs hdfs://MyCluster1/user`) +Since /user was mapped to a cluster `hdfs://MyCluster2/user`, dfsadmin resolve the passed (`-fs hdfs://MyCluster1/user`) to target fs (`hdfs://MyCluster2/user`). +This way users can get the access to all hdfs child file systems in ViewFsOverloadScheme. +If there is no `-fs` option provided, then it will try to connect to the configured fs.defaultFS cluster if a cluster running with the fs.defaultFS uri. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md index f851ef6a656a0..52ad49c57024f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md @@ -361,6 +361,12 @@ resume its work, it's a good idea to provision some sort of cron job to purge su Delegation tokens for the cluster to which you are submitting the job (including all mounted volumes for that cluster’s mount table), and for input and output paths to your map-reduce job (including all volumes mounted via mount tables for the specified input and output paths) are all handled automatically. In addition, there is a way to add additional delegation tokens to the base cluster configuration for special circumstances. +Don't want to change scheme or difficult to copy mount-table configurations to all clients? +------------------------------------------------------------------------------------------- + +Please refer to the [View File System Overload Scheme Guide](./ViewFsOverloadScheme.html) + + Appendix: A Mount Table Configuration Example --------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md new file mode 100644 index 0000000000000..5fd863325cd98 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md @@ -0,0 +1,163 @@ + + +View File System Overload Scheme Guide +====================================== + + + +Introduction +------------ + +The View File System Overload Scheme introduced to solve two key challenges with the View File System(ViewFS). The first problem is, to use ViewFS, users need to update fs.defaultFS with viewfs scheme (`viewfs://`). The second problem is that users need to copy the mount-table configurations to all the client nodes. +The ViewFileSystemOverloadScheme is addressing these challenges. + +View File System Overload Scheme +-------------------------------- + +### Details + +The View File System Overload Scheme is an extension to the View File System. This will allow users to continue to use their existing fs.defaultFS configured scheme or any new scheme name instead of using scheme `viewfs`. Mount link configurations key, value formats are same as in [ViewFS Guide](./ViewFs.html). If a user wants to continue use the same fs.defaultFS and wants to have more mount points, then mount link configurations should have the current fs.defaultFS authority name as mount table name. Example if fs.defaultFS is `hdfs://mycluster`, then the mount link configuration key name should be like in the following format `fs.viewfs.mounttable.*mycluster*.`. We will discuss more example configurations in following sections. + +Another important improvement with the ViewFileSystemOverloadScheme is, administrators need not copy the `mount-table.xml` configuration file to 1000s of client nodes. Instead they can keep the mount-table configuration file in a Hadoop compatible file system. So, keeping the configuration file in a central place makes administrators life easier as they can update mount-table in single place. + +### Enabling View File System Overload Scheme + +To use this class, the following configurations needed to be added in core-site.xml file. + +```xml + + fs..impl + org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme + +``` +Here `` should be same as the uri-scheme configured in fs.defautFS. For example if fs.defaultFS was configured with `hdfs://mycluster`, then the above configuration would be like below: + +```xml + + fs.hdfs.impl + org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme + +``` + +### Example Configurations + +**Example 1:** + +If users want some of their existing cluster (`hdfs://mycluster`) data to mount with hdfs(`hdfs://mycluster`) and other object store clusters(`o3fs://bucket1.volume1.omhost/`, `s3a://bucket1/`), the following example configurations can show how to add mount links. + + +```xml + + fs.viewfs.mounttable.Cluster./user + hdfs://mycluster/user + + + + fs.viewfs.mounttable.Cluster./data + o3fs://bucket1.volume1/data + + + + fs.viewfs.mounttable.Cluster./backup + s3a://bucket1/backup/ + +``` + +Let's consider the following operations to understand where these operations will be delegated based on mount links. + + *Op1:* Create a file with the the path `hdfs://mycluster/user/fileA`, then physically this file will be created at `hdfs://mycluster/user/fileA`. This delegation happened based on the first configuration parameter in above configurations. Here `/user` mapped to `hdfs://mycluster/user/`. + + *Op2:* Create a file the the path `hdfs://mycluster/data/datafile`, then this file will be created at `o3fs://bucket1.volume1.omhost/data/datafile`. This delegation happened based on second configurations parameter in above configurations. Here `/data` was mapped with `o3fs://bucket1.volume1.omhost/data/`. + + *Op3:* Create a file with the the path `hdfs://Cluster/backup/data.zip`, then physically this file will be created at `s3a://bucket1/backup/data.zip`. This delegation happened based on the third configuration parameter in above configurations. Here `/backup` was mapped to `s3a://bucket1/backup/`. + + +**Example 2:** + +If users want some of their existing cluster (`s3a://bucketA/`) data to mount with other hdfs cluster(`hdfs://Cluster`) and object store clusters(`o3fs://bucket1.volume1.omhost/`, `s3a://bucketA/`), the following example configurations can show how to add mount links. + + +```xml + + fs.viewfs.mounttable.bucketA./user + hdfs://Cluster/user + + + + fs.viewfs.mounttable.bucketA./data + o3fs://bucket1.volume1.omhost/data + + + + fs.viewfs.mounttable.bucketA./salesDB + s3a://bucketA/salesDB/ + +``` +Let's consider the following operations to understand to where these operations will be delegated based on mount links. + + *Op1:* Create a file with the the path `s3a://bucketA/user/fileA`, then this file will be created physically at `hdfs://Cluster/user/fileA`. This delegation happened based on the first configuration parameter in above configurations. Here `/user` mapped to `hdfs://Cluster/user`. + + *Op2:* Create a file the the path `s3a://bucketA/data/datafile`, then this file will be created at `o3fs://bucket1.volume1.omhost/data/datafile`. This delegation happened based on second configurations parameter in above configurations. Here `/data` was mapped with `o3fs://bucket1.volume1.omhost/data/`. + + *Op3:* Create a file with the the path `s3a://bucketA/salesDB/dbfile`, then physically this file will be created at `s3a://bucketA/salesDB/dbfile`. This delegation happened based on the third configuration parameter in above configurations. Here `/salesDB` was mapped to `s3a://bucket1/salesDB`. + +Note: In above examples we used create operation only, but the same mechanism applies to any other file system APIs here. + +The following picture shows how the different schemes can be used in ViewFileSystemOverloadScheme compared to the ViewFileSystem. + + + +### Central Mount Table Configurations + +To enable central mount table configuration, we need to configure `fs.viewfs.mounttable.path` in `core-site.xml` with the value as the Hadoop compatible file system directory/file path, where the `mount-table-.xml` file copied. Here versionNumber is an integer number and need to increase the version number and upload new file in same directory. + +The ViewFileSystemOverloadScheme always loads the highest version number `mount-table-.xml`. Please don't replace the file with same name. Always increment the version number to take new file picked by newly initializing clients. Why we don't recommend to replace the files is that, some client might have already opened the connections to old mount-table files already and in middle of loading configuration files, and replacing files can make them fail. + +```xml + + fs.viewfs.mounttable.path + hdfs://Cluster/config/mount-table-dir + +``` + If you are sure, you will never do updates to mount-table file, you can also configure file path directly like below. If you configure file path, it will not check any highest version number loading. Whatever file configured it will be loaded. However file name format should be same. + +```xml + + fs.viewfs.mounttable.path + hdfs://Cluster/config/mount-table-dir/mount-table-.xml + +``` +Note: we recommend not to configure mount-links in `core-site.xml` if you configure above valid path. Otherwise both mount links will be mixed and can lead to a confused behavior. + +If you copy the `mount-table-.xml`, you may consider having big replication factor depending on your cluster size. So, that file will be available locally to majority of clients as applications(MR/YARN/HBASE..etc) use locality on HDFS when reading `mount-table-.xml`. + +DFSAdmin commands with View File System Overload Scheme +------------------------------------------------------- + +Please refer to the [HDFSCommands Guide](./HDFSCommands.html#dfsadmin_with_ViewFsOverloadScheme) + +Appendix: A Mount Table Configuration with XInclude +--------------------------------------------------- + +If users have a HTTP server in trusted network and don't need authentication mechanism to it, you can also place your mount-table.xml file in that server and configure + XInclude xml tag with `mount-table.xml` file. + +```xml + + + +``` + +The Apache Hadoop configuration has the capability to read the http urls from XInclude and load into configurations. If you choose this option, please don't configure mount-table configuration items in `core-site.xml` or at `fs.viewfs.mounttable.path`. Please note, Hadoop configuration XInclude does not use SPNego authentication when opening url. So, this will not work if http server where you placed `mount-table.xml` needs authentication. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/ViewFSOverloadScheme.png b/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/ViewFSOverloadScheme.png new file mode 100644 index 0000000000000000000000000000000000000000..b5029c1e0a09828439f072fe49d9fe524171f6a2 GIT binary patch literal 190004 zcmZ^L1z1%}_ctIQDJ38v9V$pjN`puXBHbx1-3=xG zbx^cYG*U!HYijmPVj#xc=ry7pp1UJVOIm`aJKq1}HpIZ_D7$s*hWjizA1qGk&b0Z%4$6|kmgMaZ;nb_M~^Dr|zIXN*o zu`yZM8Z$rO=H_N*VP$4zWdv_9+PPTT>p3%8+EM?WWYXXaBzjIOSV21*ysGKtpwAkYl-qxgn;gK{YjrfI~+>l8Za z)>UrY?Xq}KIT}CNYrMZ$vs1&i^+d7Y8#V!#`g6*GHdKl-(>Q?o7(RQxMDOetJu1W*;$d{c75Oje$pD_l9d8Ll#O$ zyIhu{hZJE-HBcmcQ;voi@qgaFBrOySS|q_!N4%O=9+71z-5T2m?*AMj)Q-J8$;zKh zjY;|Q{A4SR-1Cq!(|pV{gQNcmn+)z&Tc?Iqb;R?v{*2HVn`WZUgJ~}-fx{oK=3QXt zXEL>(XCIsyCfrZATNc;yYd0NL(&H?7`pc}QQ$I4-9M-#^A1z6`|ETjZD*8?*Ju3)1 zEhgeHQ^X}_(e;ke54iWincAvEWIe_>;X${+Ne&U4Q4mg;BwbA76T!1xBR?V|mfEuo ztIn#OuJKm7XM|t}KIt^xgUv!HVl0EzWR|JhwA&W{&RnQq1#6mdSWPbJ z%Nl>Hnj^ZEtQu6o14%NY2??&FR_f5`VQsh0m9W(7ajvVG@l2Agd1+DFW6NX96k6%_ zv1omRlE&gdDxd>Vd|Zdtj)Hah3*RJ$}$6{pb3Q6!B0_WoM4-WX?$%&or3E zt>b-G^6d*H;baLNUe=Tmp z=e~l@$%hYD_k7qrrTH}fsF}tRXGd_(@y%xCR*hxLIufC#F8z@ux$81z@zG*jW&9T` z!Bgv?(%ji&Ka_aX<;nsv3p*gyPM$H_^Kb7YuxMD8r>QxP8V1`x3%)VN@#ODD4J^`9 zC6-Wf3Y&?$(j?x@0o6cD!JlrgZZBU-h?BBXA<3A1tRCo%XkPhfV8RP_rsVYG<=(G$ zW~&4sXjwb^&ymY{!+0}*gx1`N6Q0EO7V1KEHptZqj@n7+albGNt$5%dU5IlR%^7xT z7fHgzN+dQ-hBfVE?D{0{IArn=eVwoWBL7M=ik?zo6Xm1JV$5dcazL*VI=r&Bi=lzj z!eZVu`>hMBHs%j!JoKeobvQMH=jrZ2Ev)+JWEz$b?zG)R7&m>k=_Faz7gxDi!BARZ zJ5r!$xs?2?#rSjcw$xgyLdB%h^yK}oKVB+vsTo)LQ5ta^&4>BtyRA=ElyQz*)q0q9 z4BpDMJ#8RA*IkS@xZ$K-*flUXs>)n@TExBGh{sFhIIM2YxYNbE6>YSl!kwg!({9#g z8}5t+j}lG|6#h2lln7*eh4@ieVA|k zE32E*7S|!5M~@mOPP!=dYpN}Um~Aq@Qoohz_F+ba*B*64Mt9)zGclNPvoP5>=jd}h zww(^PIGwXqmCbbxB0C3xU(duz4d*jEug6<3Oos6x;{LI=of2>c7HlC>VP%qjqdlr_ zTh#}#2UBj{sN#d{XCI`QBx$FCX9)W`5&PQG_LqLNG*6E&2@z%KaY?0Kl$PUz&24|-u)?%Rb$a*bsn|tJ0~Lv-!`kDmN?-UJXe3jS=F9(m}TcKYEC=0b-emQX$=g1+A)3n`=Hb9 zMI<-Qq8MNZCqhHi#tlOGJGhrVhRvUKdz~9-yDZ?dZB^~ib$|WiPuS7nCn&huCFVbq z^6)Ay@xM+Ui!cS2>+Sn@H<3{n^xnPih3#&3R=j*Clj&CYYa+HuE_?rc8g{0WH3~ay z5#%86mXnzc;;*wuIpYxSzL1}wqG?{CX+e1Kl_4r;968HiZW z`vfn;#8%=7G+q=AWM}79(Bt1h`0Xf72oP?W#~ISt4ux0~=EQpTiV`Xfz8aBZP@z$W zR|%vL-3MZ9q3yatn__2!T1uP5rkd!sSw3JkEM@k_X)y2WY(T_wJJa*s48gQMGjYol zOoC~TgLs#{L6vy7MX{$hYmXL5THHILAKbe!`S!D5e(iY$!vP?Y-Bv;C{+rTO&;3#3 zPC%;$t3@HJ^9^_u`%v#`wef2CR?OcNE)xS)*P0tbE02a%PgslSX)k%TkUD9uhDEi{ zDadj+xpYfa-^GdXIzQ@+P|=Q&qC9KBSJ8a$I`mnBe^;8^{ijbqL#v^E?#nLyL3b?@ zYBT)#m+FQ!KkLzDv3f{JJDv)jJ)d%(!_3Ga;j+|Jl2NQaMs|O`%(|ME0rZd{Y#d-JCcv66beR+ z;rSJJ3-D$tEM?zBw$kX_zR=iw|6+ra|*>o zYL?{B=1398-g{F3Kwg7!M~v2bd8sTuLT8-ch3hMojIstJluEocjsr+6zkaicw(pbd{PFG8q)QXM z3J+$mhRPx*tE171uDjr@`PXMj6PB6_N^xRAxAg20h4QtlgQBI#Q}(mT`(CXCE!c{6 z6p*rZY*wu0<-znBCfh*pG_%WU`0(YpH^~E2&}h=lf>8iHZ$xARE=n27e;{2F`~Gr$Fq!X;4_A z$PMn)PqsPFxW1>HDobpHcc1q@O)JcPz?-&>wFwmGCH`0Wl?F%OgCTF_0$vmkWvA-% z<23~rtcO3m2ToXi{dk3y-k0@)p~p{B{=l)tn*Iz>kyuq07CKM~6oS!ZI6e+f_93NG zUOY`|KrsW8!qx`;5g4hja)Tfem?p*JcgD_ElO*`-Xg^roRxg6a2(}j^1urL)W2F@6 zKNaQ9JYvRqN?Xx&%0ICpNRp}9aK`0Y3n_laNp^aJ z(|~3P?>AyEg$2`dQ>WAls*RVs1H6^dm7lWKT`Dr&_aElV9#v%P{6abh0m_DARH8_W zO67K)A^OsE--Daz)Z!sIs!t8H;GFuD8W+@=)qP2wCLRnjYe7byv%a)gV6H_4@|2E)5Z+=XYiGEJoW~P3-Zs;f!%PF;Wok)Xc0mM&z)C8i(gAt+UC7bE5um~V(VHC z&yWW(-d=>+D?$n>Ev{+z9Yqh;5Z{&Jpca7!9P!)lrLx@}MdQ8VD*{AK9!Cm4!05Eh zDK&m+ziQrkZ%sRlhUmEH)(xAgBhxC`pzck%8 zN+m0t_3_Ji8S)Ch@VbVv7S%*N)OnFp%H)Ucc-F=Q!=cug-m)C(`r`o(Z@1X z&FS>!qLGuI<*Q{3lnrL({&0w^LUo%VPR;6CsF1G)(RBhv-D!S<#P`$uSFaWkJ7CeU zBUoU`XO@q5j8bI#s-~&Y9o^ypd-R@$nw#x9>n@Rn`}tmH<{6yo zp;$;3pFcmwiW%)$PPTtFSk&W`+H_5CIg3n;Z@ufR;PZe7V{P|{D#Y zBQZLRsYZQxIaA>Ed-eT0_oLjPxY~uzyO6UxWf;*S6e%W0$xx4t7Nz--H$RN>$?PV> zgCV2bl7TkHHP;cM(2|$Zl~Nln8S#jyYXP)MOJnBw@ltHAyUPP^K0Ep~qaqtm> zGLkYtFwTaw=!F+l8jHmOTRXIPad4z#f->l|4f1CDliEz@J2@DR0*&Zj4#uqnVe~s@ zTG$zjg>LJG;`CaKd(!k@!wZ#noM2v=n;s~bAJurx_kxeyJ(=>d?eY3jT3hdtzH(sx z$ohmKc$d2`uJKz1yR6MKb+?=H!U?UZwP6Xut|;4GeK>|h%Ek7`#N=z6<+@rMJX)DSDK9Au&ba!;P?$S{TE~+zQ%Igk!IbJekXcocf+P z(9Z6f`(A_5Ey+V?Z`<#H$W4eMA7Czv4)d|8lZAy{3E$+34EHg@2FNXmGKwD2Hjk;o zeJCK^e1Xv((X)tzq(!Ug+gxtf&Cj)`&15}?U5Rz0y&YE|1jMq6F}T|G*d*q(hh`jR zX@a)zKj53qARk$IooHlju*wsTKRD@N>$YdDlm6U}W}A+Z$GJFqQv+|C}9joCZ18+7O8- zP(|G|P0`hDN;V^Pd+SYG_pv2AQNSH-DTXpp+iN%dosm*U@PL*dgZL6c<2r%R|5`!cxRCeqr&r|EVD3Q2fBo{W-lt zmncq_Ww)KZ72?hHkXqX6sJy%8KGn^F_r(v?V2RC!jt3_l`k95nTsKiZVC>nD;CGMZ zGN!n8(48*w@2VfPM785XndcP9y*f`&_N`B7b9}u@_FDMaUp99JBF%%ug-dpJb|I*= zP%b0lg&N*s!B3#oyn{pivb>Bgxe+h*1dAq?n~lf)Ru)`oZ_I2y*_aIyd7+8Xo;@$1 zUA@%4l~<^|wA#F9jH*&2pk-CH-KNJy~#`L5QU*F?fY=%#&(5v zFH2oH?>{|g3TmRKenL`t$TVm<1u!%%YHQ%kxeYczs=1YiFT`|#D|icG0^MF?8DfPQ zoVS3p^t$^ilAsIAA<-~8_FYO5FVT3t?VQ%eHg`5p+dYOx^dbW-soXtjGm*$b<-lrb zU-@SB?I6swF-A@LM56OBaCYF8QSJ++iyo#us&Z2|BxD=P zx=qAY%s1t05wNu&`804&+V}8QwmylpJJHx}>><2_WRf+#l9Vsu35q0$^+k_DEwb{- zrj2tBTv9XVnO88E_(_dia|*Nj@_tSu|z zK|y~UfE+Utn%;w8YuJR(qd?5rYkY-gqokj>lC^t;w7hxq7rSG9?>(8*I$73^Ezbj9 zgW#9VvUg{PnJkfX4OHalw$_;}4KTYUC#F?JQnII8atk>)_IEnD61^7khCE6O{f-BU zS$=`Ynq*CRR8@tunK-;b1%F!O+LveK9tC0I3Y&3653Q35^E}-NQl_i9rkttY8c&YA zeHQ~F>tbyp8t@yrqk6#ead+eXJ;y`NzGZ zL4x7kcp3@?Zm2N55z<^ds@e$|zU{{N1f`Rtr-=fY_lNF&46p(TQRY{zf#`sfH@m!g z1-$kRpEFB6?N;suaMyM-pS2L1=gg$U42u$|HI4+MWIFXV9I^n6 zvAjmd`WsSnf{57--gOpO)-uPy$=zps`$H7*ZEkh&*HdS0XomWmw&o52Mr@wiUhF4o zfcb5Et>X}aR~p+biCOxjjeO1Xt2y^ZDiCCw=30?SUi{cq=<_FY77ri*7Sbyf|h zR0csVO%P;z;>r&uJ&jb<)7w})Sx58+yO1@&1D$RsuGP|BmMywfqiq%tC2D!`pdr>Zh5v^+3>jvGb9EHM~TlLsX z6T|hV$s+!K`Mi_nbC0?Cs+31aphH>k1pc2|=QUn)33cD~HR7h}3iGdF%E%qo zpD2XsCh&7224_<1D)uDW`d)wf=$+LVt6FuunvD;8?LZST_@lXkPc}lod+2Vv(!?C9 z93Q+4KJjDMgG~zp9e11JY0r}w-p#KKsBO5exg^9XgB~{TepHN^WyjpVHlwUz5g|q9 zW#>w?M>uSdFN;o^J z&j?7y=CTZ4y*L!mKGKvrRIbqnO3w;q31;GT4BuwITW**o(fsi+hKSKH2WMsd^C(Vl zfaR_qh-a!>oJVnV=j^)O6=ht!Nm5#(%R5C&vL*=vH^WEQKJgcQ$GF8pp=1H@tW2~Q z&l6l0-;yig4y<2fblf+esqDT_8p!9bMp$c^k!vsf>kG$sny`nHK?E%t zPW1vV8Ea8Go-wZJS_KiWH(xyNnLUEw;}x^@@S>URz8zZhH5bfQn%n>hS< ziOyuAuhKfUm#N#`#W54o)yX-Y)6V-Q44M6>81kI@YXDefOE=cB}+^qCtfrG=W~^`18?L!Ew@zW2Nhk~>y*Kd69&kz(Xe z>L}5)mIjnqpL&u5uGz20Im19RF|?(5Y^331-}jBY5U*GOoK0>jk_xA#G^Y;tecF=} zd9j;Q60A0-ZW?dH<02K7ummD@JDymEysw|adX=XozG72YI+gpb*~Bk^;43Qa7uV`Z z&b)I`LA=)9hmp_cuPqkHpujBw1q5#WCLA^zb6Q2)Ozl5&)E%g`&s1fG>+w4V=i8-7 zDhS_G@$v0Esx$-@q_rYn%V0y3NL}2@>yd7nzCZ74HW{|IE4)D(%=^OsoS}@BwZJSQ zUDMB(H?}>3xod3UHnCpB9Rc*FDC%owxAH1hvuKEk&GzJsBX6Sfz(m7D;zvm;H3D`Z zu*6axk4fT9e|SI(UcS7kbTxWRlvLCnlXi7Xgr6~>wHxPWv%05>nFnuKUX})I67>eBh$(=~?PR7F3ac_3>SA<{4IHH= zi*vX%{1QgaJp4H9x)S9ce?C@brQI;-=>vW(hp;GNx|qRo0V1p8p`Di zIR%e0e0TLKMreyL>>?0}RHczt7E1Kywp+-Z0IKb`^_(W6eeFH9cx-fHNXsuu%Q8n? zE@_j21EQ_XToo9}V4`bjqAgVA9*7e%okrZti~ptJ=dD>FA+%qKW4P>oFzFQk<8-H6 zFb-6Dm{+nfbOsY7-#`Dtg*#7Ua`Zi^Z7u8leMi!iV#BB03*o6e<<8GE77~X20h~QL zdA!3vpn{av%S~Q3=@=KdMqbLs*UsV6+sI|xjh8S?86_MGr$qATT81X3e`T)2l$0~m zi}E7chi$eX$X-5Bv+6R* zvFWlsOF^AsZNEP^p{G*Gz@c64GGWtFZf85UA0D_CM+1C*u4Em9I?K?P#lt=Z$eOuO z*MWFQ=d_a*7WnpNXc;&hCcDx%qu1lDvV6O>I1oVfRV$t^tI1DUCo1InuZp|;{Zt!T zPLqSJjFu+s=b$duv+^*}bv4TU#LOm`|79={Tl#u=Ru&gum-IG~@yBO`gsBQ)vNW>P zT5m(n)j?G^{=^`DntsccxE*AUWuMO(PX&eWUyJR85EsKR`KOxhu^&JVd3C#N+S5Ee zT0dZ*SRQ9%V(`iCKST{0F$Z$pw`R_)2*}7Ri*Vc!;RQQ!F{`Gis%* z^R5QDT{S&w)ye43N?h@vVwjYCxlBWHLEL;kyHzUid6^a zceyO9^^bO4Hd`Fq&Wka|6Xop5&%DMh%5qifhFAMRHM8GIlsAQ6?Um7(_#)DGkCROwer8}L7Q$OTY()A~>09q!+v+CVMd_}D&n%tqp3ZYMOz${y!{y;9b@UPh?JwPa89<5BQ2DtVqwbS4c z;Up%|D61@#a*=z4G>^$#my;I(p2H3$An|(Kj!vuqy^gguUSuqr&<9ZY`%0;$bk1zR@m4`(jpt54DGBMCxVN*$BvNJ2Nv9%q+joAmgEJ1I6{qLP^VQ z#xG)Zo2z1$$mR`1BGPY2!mpXxG^u9beAb_*ztPP9!~0s$M5SS=LRcfH?er|4^c>to zmbsmUxqZap2`^M`+c^K!>o}}eRx`dyA<)jJZSXrl-IDxK>_^PpKHFyb{%)qQr@M?9 zL7Z?poirI*=(c-yFg+blGIrz@+V^xmhH(NXdtiICL=Sb$Fmtk0pcQZEI=Q=U{oGdq zE&FF>3IiFa`FnlLz7F;W0z*V52Ew$IvV@zf`{FJ~w$D2zEXpRIJP^t4Yigd9)1hiu z1;Nf@pj{kUTf7UyQ}?kS&*(9TSZVfnCMe#!KBK~2!w`uYGb^jXB2UFT2Kkmp$+A+5 z*#^iwx=0d_NsI2rW}-9y`stzEpR2s~l*8i=Bb0&`A?Cmb!uiDFj2I_+pzWtfd#(qR zywpmIY}_bM!5=gXmXjolF!pU$tO=9lxwkh}VSMitOrFVZ(@4B7?=p~=!{@kXNFUG9 zLuXUT!={w;%l|BRYeS7ab62ks%`vlT$iw!qqW$2`N9#vfnrk&&N|79&(x)_LSofyT z3NsH@!+WixGD40 zRpq|wfsCZU9{(cYQef>MspAM`L}MPDOoXx1V)_)rhdt6!(Hrr7zE}g|yvY&~7EZ^0 zl4zisYPRaJYIw7d_oaeq_rtlMyP`Z5yM5AkR`x0%1Iw~1UB~p&1{SIX61u(IH+_G) zp_G7wyAF24q9Q=$Gx)33Ka#V>j}M(KUqw3b<4}3N1W1F_$>!G5a~=iUgGk6$;hI zvh^YVPN{_ixtg3mfZRlv{_*G$MHyL5NBl!DLS|*PXqG8m_~8zxZX>`Db7A z#hjwGqbCdq4(HalN-E?x-xa@fywft5b()NRD|afW7Be_Xxy8@DxUCG&!{Cl_CYx1+ zWk$SXo7854sp0cO?QHG_HaiDx$wKVPes0j$B~x{cx5I5I-pTKoQXoK=K?DfLAdSz!rQp%h8{gMewiFgtE;g+@BGVRq{^qqFh70Xm9 zDQ~5GLB>|GqvSzcS=S5F?Y?;qGwcR!% z8XXJ>b1MsfVj0IfD0{8tC1=kJpp(4M)-T5m5y`m|uxDT^O1ibVXJ~S;O6h-lHbpd_ z#Hyg<{4IJhcN%rfFDt~c<}dG@U+f7EEx{ymQ{ z)>V`eS6@E&#KGO_!D9uPX^>3<+jZaPHc70hYZz`pSoHus{RvLRS7lXo;jB-}EtU9l z@ul>Q6@43u-C`sbiq4>EMBZV-Y1+frxhZKZb0|wQD z2BbB|(Og?TPIh#ol_R| z#Iu}TbuIFtTsJqzr@{;trE!DTdB00;AS?5nb(y4PI!+xY?T@0$WU%htzp?FfyB2G1 zJqOd&8p~&qI%#n37mMy>J%`Sm3C=~fP{IKv3p>E!uIO9?r2$8mFXN}(#0ET_DjcuREt~VpA4h&HPM0*J&s# zxDQD}{Kx3B8D=QbN_P&g9<7aVrLh6CrWDDVX~`Pzed%N3ja$L5+En=U)V-_--?w~6 zw<(u83H3Tnp@{d#3gzEdrQFqDYVYTmQp@HWLnTSyKA6?R%+HfuksVOhf*lXn8m(r9 z=Z|oef>=0XRc;h?{8>nm?=+KG-CSRMt;pkI8RpsQZ??bGbtg|Kx%!WM9XUdm7VMop&ankHP7%U=_gIFq z;z7a4#)!O0I`D@>r~W&KpzuAR$Qb1c_7(Xr6SnQ2#O!tachzm)%@jq~Nbm&fTkIMd z5t%FbFA59DsN|%bW9>W@*&79N zs<-j#RO9BpnGdCVf%dc*gdhhcO`9-Knag6?7>MYJp=H5$Q+|Q3HS+cF_gQPDdJh8e*Qm6hr?7${UX%t}p}<3HE@qO7Kj ztElp^Oi>g0Tt9gPO(!A)e@U;WOy>peJEhthjSBjXnVU?^`n_V(bU+F0J9N zF|V^V9hp%9Vx)_r$cD7{kxonb$AucWMXz|wD7BrqpF}&gT^+yJVPU80FW@~Mc5PP^ z_^a3fb6Z&kx@=MxxBCWPCuG%^Y<7LbyV@_EU~Q#mujv%Y1T``u(cI<;}9{r*j?I@ykSOW*MLC_?JMR zpMg_fdRrla2K^>Ak2s6KlKtV&nRADR{0G_5h8&nhmT!V5xnJ1V1mar;8kHg=8mP#S z#HXk;jo?R$E%|AJFg0+&vMKj2g-4#>MPc4}u60m{uD=NJpi1wpddS@m`^ ziOd5JDGBi%qs|kKmf&s$%>Iaq(C*$T(D^5`HB~KbC^fKU`edc!5a;4{F)qsCicT&#P z8kkzCQcF~=APU}FFYewXFE`KEZc-X-+gZ)dk7et6nDWW0jW{}Z@7!qpnU2`>qw=J! zE>}LbAqSUU$83xTgJro^d^ZMH^=7T8Lr#9J9eEt{tl9uDa<$j8?KK2d;UUs}Xez%$ zpDv3s@T%^s&0Gg)a@#;uSwm>);M?gi>PZqd`Z8+7=5f56DIQ^=lR0%X2snc_iNo;!8{!*cMXl6eVA8m_2_fU79OpHOlVWa0U4r)>*Hpk$Rl!6o16 z*7=Gf!xKa^L6fv5>me#^psAfZB|7MN>=vg&Ydeg&&&}k0^)CC_$$^vKCOE)hMFaM5 zGu1ef)TjNS_AIP@WAXV$zrNqu8Y(dPI4H38tqW>tmyM$GXrl>x(wA!+p>~WL!GPR% zZyA)bh$rSORb6)Wr>GwHP&9@EWhSbVzcF2#}`{VqzZ1Iu z{U~IPQ>QwvJ|}5)5idfHR2X4B$KsoCn)H8+DL}M-)nP1$?-R^9*1GHu=VL@|-Nn5W z4K;Zdt6e9xe@Db)Z?ecs_6W?@dJR6P)a4fK7zz}k?SZDXn;7!sbc^aT-&Iq6a^@WJvwZsJolMFzTJ3+xq4E3E zOj;M6^AI070Gw(OY?Dg(e-FTZ1M1|#Z?P}r^5Xbq#K06`m?)k9bp<6D%$SQK5%Bl> zJvzC&H&?Zg7A~5;|1asmgsy@36l9`ZF~;5(>X(hEn*Te^|4hMR>I$`ALyv^eae>Ufd?giiDYO3`K%on^zrgnQp^PlF zdo1kEO9uWqolDgaA_N0As2&zwo;jS47+xKwZOU5(^8X;}-=#H(tYuF4+Tt(cNYDN+ zR$XwRM-eJZ3|jZxy3)v=7|ygEJw%miF-KdpUCiPh*SR_B6h$NB5}v9=xBt=4+amO;?YuBUx}y?zu5aYf5^Pw|Ga&v<;9R^dO`ij?P^+~$KY%c zU;noI|G&PdzXq~GKqOHCe>ue(#E%LC>F%p3{5xC8wucNzdgmUr=HJIfXdP zhm)7OMNtb}`^V|Rl!gC$0Cp>6NdnRSvQr2T2grTgXGwOXE3v*J`1f`{LoDO)mcY51 zGbsVoHX}jqp890;>fygA!#3dqgMy7^gRi~^k-;tV?0QlC|JuDD*!=_sKIY|SlC&V* z8GfHJeKp6w*2smNLUt(2*`-rRG6aWTZs_+Rscm&YAlv=_%5dyQIIwqjZxgaB zX}Em#_evGeRVBi2?=i1VH@kI7DI*Fvj4a0{YdQ%dEgbHeuRE;e5ck zn&{so%1DAi3OCS$UtY;K9yt5>x|h8F!LAZqz~Ef=*xk!>^NFOZ>l%wB)8_fVZ^QQZ z0qOfPlhki`IpZ(Ux~-f}m26W8V!smYe}0q;0cT?S8oELDhhEp{-~P`3`peb-&=)xa zaM0!$;j>pW3FSg2hvQk>x_hYi&H_M8yI-yzSyB!Qo1kgiBGK}GO7YS-xi zH^fY#dxo+5#?^a6b_*fI99iiL^0%&D^cLEHVj)s#slUv0pan*;Vm1w2==`&G4>h8; z`?jbZs7l6&lY0zckn)tITigPdSQrkX4MH+a%m1m_a=g%mSUoOUu3WP<56`TFq)uWr#KY~lY3NwklS-N~nh$0b@ z34q7LKS8}}?0#V*Zl)~g>AGVCz*bIM!wCRmuz@C!XmBZb0s*VRc7+})Uu>1v_9Jb57M?1X?Hog8E(e;_;X=q4@Do=+yu zm#zm6(N_FNC)f1F^=I>gB;i-`8&SQ3B&LKKFCIXNU!~R;fnETD>x+flUP;MZ)AJ`a z0@s)nibsv&%9n~eSfziRFGT6cUNmxDijBz)gzC5yq7sCwa}Y|p1%;g>ZVCT5uldlr z=m!QL#Qc#5LGg2F0^I2RUI{MfMt{GPII;X>{QT^MgvbmZ^r5j9FMkRIg&DhnXX3P5 zXMyn7kPkI>KzqQQgNc7`_&U7f5NFH7p1&92<-&h!@?9$Js~3GHa42+6A71_zz|Tsv zLF;(#wFZT!{0ml{i*Z()7^Uamc-C^}tU5tOC~o2;4qsXI+ce$qRJB_ikx}%wrZdaQ zd0MV3k%B|uhs<;+7Ch!()0zg%0b{3s+0|PVp@?)b&p)J&j&ZMvI6GQIi)}R=9&XQh zM9xE9o^%H@0AzYH_epPb#%=Q-X2%t>PYDK3JFjL+q5_J_Q2&_fO3~jjAb{fX)4nO7 zDG$qGomQS<6VuiWm_`#{Z6r^>j9W@?HrSufJP(suv{D#`F5EXQC3>9?*il_g3O4c7 zo6X$K6T<`^H1JxmZL6XBtTsy`b>$j zm3t*nkO~k75ZSCKRzV)MM;q}SY|OlBg%MHxz4tk}255f>?t1UBk`^VeRRv0nodFDa zCSsvv7J0Sjt_OH@P^Gm)UYZeKfZi1h%%|N##v=PTVFx8~6joreU!3|SgrLDSTxe;* zF=CMh%07JPsD5;B^meI3w;OzvAsYiSwaGKlhWLH16*xd(TM}5hp&7I(psS@ zFx!GfqY3yGhkxWhJp+hU1q2BT{R5ckHqZfPQ2-;0rb+{NFy;r&WE_Ko)K^FvZv@n> zHDU9chmel$Ie?3!T*F#U@x3m^0JI2rpZ)v{ROL7`?ST!cVB2jQA*oQ_e|ySZc7Z{F zPGGN&i-z`1hziSA6Ba-PB`_-ZFR6$CvnLQwrSyTu9X#yi0jFaQOrB#fNjc!j5S{nw z@YtO676ky22-Hm|%iNUDs==Kvw#wYeA|R=uy?JA1uP~}X`6Rdf3+R4Vii_Wcb`l!^ z$XCF;9t=VcGbk{@)RVsn*0MvS$@v})H5O!OitlL#UNk)kZJ@r++cwkTp}hcc2;t{z z-Gd2p0T%-B9s)FRvK?Tb4Fh8u0{{YBY$M}*5e+r91#rE*&|O<0xFhE4uO8!pvtok6 zc-j}wfmFx7_jdBF-yAc#O*#gC9K+rpLJQ(qTQACY_^1@H(%H{_cTh^meL(FoR)%x5 z5d!dx8sFe>AX$UUq!nydpegBQSzUR$uL9(;eaI*PJFfL);sQb9^_g51c8;LcKN#2Ps5iXU-gkC?!yT~_(5;-`4(E@q#X;l99r&1seHV6Rvbl8rXblw42 zsUtO)8e}Kf$-&0|{FH}%B8c`XyWfF(=st9mKU9VW$1D`X>_d&fOcoGALxLrs1rQf0 zT%mK&*B*7H{z(hqkV_}t^--b-)q#Yh-5AVVf&tH*GXsU2;ZV3)3nG~I4DeitA`ToA zCt@ka4x|X763W~uF4*T6uN^ZiJ3qJ##m2Z$T1{8V2S-$&|#}xqOI>fki z8Qwx}K;_Nzx6{k8O$dR;j~k}0+(8`k_SRAJCzK?CG$PvfFfYIH*?c|E-6G`Ju5*~S5nFo|hyQuu<-@vMb9-N{wsoF7; z&pwodSGOcii6o@~@-ewu898#GZJ|JkpZB+mP*xi5D75=;^iu*!|30ymE4q&(WlyCR zkCY(P=yV6rK16QdisIu)3vtgJw!#H*PaiBF@P39HsFoRG$irEb6kj*!2P$H~RdtPA zx83WI^=Sex(Tj;b0_QCbIuYVhzI_6{{gA~qB(;1eZ8r-Dc_#>NdbM|rod%OKf`ASn zPDlAQ?)ON+d5|GoTxU0n#B|Sq_ zRU5wBHvmj8xPII`CWeoj9 zH>tf(Re~_O0k`;|TH#V}$_98Zz?*Ogq?z@P0L=iO06#L2{4JFMd?5q$WDt?s7n~tC zMcr)mZ46u)fZ`B80vo4{&V*iP0X7m7Sr)({LTBK_(mY?>13j9{Y6#lg!^OEtj?npc zii7u$nqLzDG;fa*gk{IpX^LRKUeJEIj7q>yZQOkd^wmWy6_`caM|Jr*4VbpF$U#Ox z=kR}92<4!l*I54ZO+z2zYrHUApunk;evM7D@}>$A@S`u2$>Q2z=D(I!h$0Y};6T1;QGclOl^0qjqc=St z+k_ml6vjC#_Yl<+e(&v7!0kQ7QWMsV#npbCCg4CJO*SiyjbEK0CHGn4lOK!ix(^OFp?M5n|;_5tO4G(c9KR)m8e zT42Y6g3W%D9s)q?9pDxAXgpRS1rP-jRkz9m8`Wd|TTw_t63~EjAw&5eT&p4ozUM;# zNt=E_$bbb}G(QZ59furoC`JKEk(9uUvWiGTN?iWCzil0U^S$lsiNL^D5K*a8f9-C0~KM zwtdV1?Ml)?!21emqwdCjzR-=T5DEyOF%ty>piFZQScA)-VK4zHe-9%Q=xZC0A_nm{2jFEgFh}Ry=U%Wl zMHp~6T$U4GI}Z3DXcm-JNWZARTC3Cyr3!{FCp-ZwHw)bZk*A3|Lba>I=bsOYznJ>J z@dIj%2|V>MMUNt6!NS4W`OT(7Q7gca3j9dsUFFkXfPZD6ICHh~D+%raal$qc17@l7 zA$b!CraV+q+Pu(YWy6_di z)o6*udIE9=fgQ>ABY;Y$e0V|x9Xrdtz*Hqgf>Se-3@ZYsYXl|pYGN-PfdQMO1E)4Y z;Y31<1pX!S1a8<#hVLOeK#@!y0^0qkGl1U!t(pKp@UK$G@&p#Fvs<-S9xU@G+`+i34Wz7`<^nwETKL&(0=|<4>#ib3>I2Hssec%L+n#IT z(i(?01L`mkqiHNwLG(xr>3@oZ0v6b$6T%ec|KsYbqpA$Lex)U(Ly(ki>FyAu6r{UB zQu@##0@B?nNOw1g(%l`>E!}YE_`cs=-@R+`$Kg5WnbnDDxwTY=8>(Lyctw<}v7Y z5ib z_`k~^BQ)0h?fbh-^3iVi`#pXgiu#kcqB9huf8b{o#{R&`Ux8+dpwtNuLj%-7N3N*m z4hoQt1syd3l_`Fu1&!Od2LsZR!YVm%t#35f%h~S(E2xE0<-j25Ty}i0k#e}Y>mXi zM8M3(k+0%89h5M`gs6k>w4rn!FwCF8v=D%vMmAIAcY(*qfRNOyf58b~FEvSwe>k{<#U1_t`3T+f6a?CO9``{%ou5#h~);LSNM7;46ArB={n zD>S-BpxF=9&;84P6o#}@DGz)D2bE};s6yaD_TqsT{(J1xB;y)k|WC#B&9A^f-H7K0{mtM3uOs-p%InhORp0QAG^hT>=r-P>MjWO{|A zR*A2k%`2!*&Tilm$Jjleeu5L@iz=dlCt~pdP?(v&VraayQt2hf(Y~3b z!hW3u_?b9Zu#r)0fzYUt|3?M!q+^AFL7;%ab)tCjFODG(wm&?62(Dnv``Ew&??*5T zL6=!tu$T&QfL$2u5Ono*jKqMVt`$-Jvn%Sz5gXuz3E+|~oJ&Z;0r?|^O1H#nAy}6! z=wM7b|7&0S0JXVpo{fPcy{u1Q4JacbBn0CzqZySkJOuTN`4EsbN*rMv!PkA==SX?| z4}%b-$#6#s-xfeCTcN0dR_H?K?>Rual1uX~NL^hac3bt^8kb|`0;LEpo2EjH>SMr% z9RlD&8!?4mpx!{Yk%9z1b?7mrkO{ z2i>j*WM7$GKVHZ*%rm+N1fDiiWm){l`fi=bl$)t$5}Mz)|Vz!B`(o$CvlwyvIfR&UG7lAsP*FNNPxzfblc`*xaRO1jH@p&Twq z=BIE3$(qeAAUUTMU>!jgTLs%>Y>C;7zTALlS^6Rf1T{BWB{HC}7q)=>sDT4eeQpND zHd%I((%VIza{;JRW*}O_u^ewW?nkt=v1#WS-js@aw)}P_DcL;f4J+^ILG;$baS!#e zGi9Suk#Or!TJthXpU{n zl5A0`mr<%|-P;vI7~C*tXd!=t$a+${T(xWb7pJI^eaL?Dy?4&z)c*uxt2{I{&w*=< z2vfde`Q_C{ae3SPBB+EYtMz%hTY2@VMAtA7Zx95HNDiB=?u@m;3)2>@j?0p z-3NKh1|E*NikZ1W`Aibg*Y2uWectq>KUw$L4zjPuZH+NbE77CKnO$$1s(F>SJp>lp zNizygcWHQlyfZ$S_L$2H6Qc60E{%oQ@1jtsKo0DkvU}Rbu>mG90r$l4S^$(NuRIK3 z7E@nZLD4wQKz=Nxm@5eQ&&adHoYE&OG%=KOD%U*#@Qgu_!)186YmfK|fn2FDEx;ST~ zYVYoLAv0~Uu1xiM3JfhCB0Gk@aS-Sl5XO{L?f1PAj8pfyllo)vj}P;>72ud46~qC5 zRP`Bn0D(vXRi#`PEzQv-?atYQS0L$)hJ70rm1&f+dmcoXm@NbF>8ATNJU~@Ia_i;% z0I}Ekm{}-uU9ThrS{dGL)b)ag6AlrrSH3t%Q!tdqHk!a({~~7YupnqqY2+x$I=A7$ z&vht|=c(KR!&T^1?5tjhfCK69JT=GiTWfF8@uHO;mt6#@Hi5Ct_1*gt1&C)vC3oa^ z=~Q5~a^Q&?t877(PjP-NoUSi`e;In*5R&Zz1uYFCb_h=XR8%NGn{o7!wNu7F!sXyL z>SD0nDq$ab2DA*4ubYmFT0u&#j;pCB3xW6Jjd}f<``?;PW$r7F(k749LX7lEZrk0( zfZJro2z`3U{X?Y4tI}M7)s3EX(YoK>S1DKo<*6;@8$UjJZJzpo7@%}NNIS)TKe}m}Ye5UvZaUcTxTmJ^{87gM{2AHq6oBs6r2H{okilcGFrQl@2wh=W zm9m^FnaGopI<4i`B*(`)9=&e>2sf=TL!roHjy*2JV~7zLx2n4nAA-`@gRJ=Mi?d3R zl!?a&|L9ZO>&#LzH(>2JfHfblO!rJ_7DXr@1IVKs zwnc7*3#fg2dH5^p!?04CM1lJih~0jP{N%ve*@@IhGzTyd!*mV<#g4>HCQC05mH~F^ z5~1Lx;z&@5`D1#SgV5OX^$(50UI1Rju$uiCWV*}om?g7*4-$F2{(f`Re2xwu-UxpN zE&qKdM8Ob{?J|N!#M#?OmG;-{=Tbvv4*)lnYtEU_7P`AJ-*;?~yzV5D?D2ju4#Cy+ zxVV`mJK$AItY7&UnYxBOK(cMxPX1|6<6ti>Ua0O>-%Szt1;44&Ky9v$>?x^2?8TFOBzJKCKerCq&%MtlzLyt3S*dEy z{q&H4Mvj)Su;vx`0k#{}xrtl!BK`o0tp97Cn0DwPS2ZX>`?2^uF-D#0mD+0#yuzwK zJu%;Ne=$wd9+of@6h-I#HaeKCEPTy)^QzqCaa-hxZGfp-UbcQRM!<+q_N2V+hF;ZN zO_Ef$zt2_k=AF>pV^F3*BNt2ja{Jv-%LKK<0ygT1!L)`MzT-f0@hj#I6@5~VBg#z9 z#-hwj?-Q^3TXH@QMr(kXV%CX3z&Q`23)->Oi&1^cC}eDI!x&EE1%l2^a#;zd`t+Le z>?tjgbiIz>oE%P3a(+9YKr5>IVyviocAi(idu(g;_r9Usx6@@yW*wwWmUge!3VOG( z_GvH9nl5o%Q_PEw0Gu-BD9VB(6k-)?3KjejMTA+Taav2-@(5KA5HPNe;T8I*zA4tH z8ME?d0}`^2us*vXo4_y8MrUK}?}I z77jU@GBwhYjVp>Gex47&=OttyLIJSC;#mT=@2RZ&oI|{)K>^oCXKb2{X^G?9!4IRF z2JF4Np70OZf5lkbOb_B#3#K%nxxpOs5JSo0OD~~pfhETB7IsJ(N0%qGRxx&aVa`!s z9i$#U^{08??j9{cF>jrI1S~#-rm7y+q=Sb`!{G`%OOrCZ$N?@K1uv8p13NQsD#qX8 zxX76a#a7gT!`g8&ysn2X*??r&x!~*j2aO#ivd91 zh4{^*Ow~gQTT}JzoFh-i9IoZ&aO(HfB&@K5B^}*``2n{BUI1e_C@n-yz4!} z2oa&M*Y!xcUDLMu(#a2>jWX8OlSYp+l5&*!hoVbj$0H7D^Q57MMbjTo%89cMtn40S zPZqC5AJ@kT!d!Y73RpIyQ~x-KnH$LqZBnt!X@75bP(NJeeOO^lL6~muzR-6rVzoMS z4}y4o405Q|@yeX7cBtoH6`t_ug_scy7-Tq?aVIWvb_RxeljS{!IzU4?p)7me>b30E zaUJD$YCju>Rc$Z9zIE-1XK2}M9-1YQg$3V4`D9wF<4;wq z%HCk<^VAZhHPw3wJrYxo@S$=2%y0z-_4S~-`W=I}K_calx`Eyo&Tr0zrYC(rp#O+1 z%neQub7Fm_HH^bIAZ!q+Z<2i)^HYqQIUAadiJ=Q=xaR}m5x4JCFL3%}V=x{Zeaup- zEX)HMWFnOHCuuL(7Im34-&GZ3fHhd-HOHkLQHZi9c!$akYrt!$`_)C+rl)b_LU0|J_;JEph zlv(=H5ogTAPx55b$aWFybMK}+*3VS~p9Gf7sA6yaPJ8I{&AqEqC;qq{qrH?QDBRos zHKvOcVAg)GQ(!xX<=C`ib*ftXKH}2bfg!@tJ=m-mWbN~WL)LIUvvTI5EM1wZ*Gj{# zJW1c{RfVw^Yci#Wnr`J?JD697A;HtSe18=k5%_&`FE)&*KRM>%Qs|^IE=F5Xzij-C-p;Xt?=zH=cttRI z`HpcVSHP{K8Oc>|1$eKzI@YZ-0+6}0o=NHupT-j=aiM%A%Jn)ELLa9OAD!>{O_98ChLYcGS~kDAQjdJ+#rt72LFaiQ#oR$k4r5t*!^5hHMR-sNW&A z8@O`d9&9ujgiElbXWUd=Ol#9ZpoLyf4XX<#2rWMW!cbK1$>!cCgl>Pld-j#x*1bx^ z;#_(CL_CKW^CqljeNZVzovFEQG5%Qb;t>?rvb5jsWzLDT93|sj$C$sesrgVu>MgwS?1!J$GC;zb1kvuk=J-RZ{?*LXtWDGoJrgQhsF+@8ZbhA#_CG= zxcct9d9nOk-~`_vj~DgN#!uQOW}VMJzu5gIVnbc9dE>e^Q+@6Xahl?IIC$fk;V-m5 zCA8vPeS;cE;Nl>XNYZSi+?MgQdDSjEZd27nQm#L)Yd?9)vNRXO94k1{Y+goklyPh^ zpnkvPQ77MzgVZcAJypt0=e+kqQ%&CZviMDz||Hrw#6*BJB@+|McQ6H2m>qowP<}jZ&H!n63k4MeUCKK7c z>tl!&9!j}uSgdzDH0jA-wT0%mG)s4scOI{zZ+2IDrJ#*xAC~W;MU!&6HRg^a{BEq| zFi#hHc1a`!_K=y68T)o03&}jV)yC5S^wJL#t$5?5HM|lmHjeZS=3-t)pMY;umM>am zyo%5FuPb)7zdD}iR@w|*ys zV63^YlA9L@BIxk<<*hj`$#JU{NG7N?K}+IQ2pQfwREt-(pSa9O0`NrIUa=S~^!lm? zXno8no_E!0YP?@d+Eza?e^$jVQM=+^>|*XVBQsk?#aNLe)krC-Vd7OPZ9S_@-3OvR zrJ%zm3x+2Zb-SJMRu`+FlAJ0DzIRvsjLCf>i3hpnkLIb)+YB04&a(H_K|bnkXTwA3 zVR@V}l~naYPIK;MXsnK2H#>E*$!!M!@{EOkJE2<5^_Uv2h`8@-YB`^|;St+}?nqz+nxQ z4VQbP#ug(D(hc{nJE*T|Y64bCwdf4>B6oUKv*Hy#N=&imtDkKNpUSzknq7|CpB`oP z^uLz57%Ti_VtUJ7bpBbm)Jas6Le3?sP5nZaY~$=0U@iytkokCuW0fCFF+<)vOe?+` z+3!yzcIk+aKxwvX6jff)3}Q*|HF0N69vCotY+CCN&~fi`bI0H7p0w@L7)QPpmhpad zb#RP&XVYH6s5Ejl`^oPobx+Cashsy@^}?{;cNJX*pDAi{m*Xk5-y#tE)Z7EE#PnMG zRmJwtr{(3xO)Bcln)-A~ax@8w+^<|z+5ZDN1&2=h zLuPoKx_c^awwoXmBO?y0X|Cd1)ds}#yCak5sAiRt;K`RtM71jmaQ8zu3jVNZmD*p% zon*C6OXnK&mMgJlvPq=I$qNC|T4hB60gwatGIQ|o*?Zx`y4r5KE6+eYHT!0!n)S)d zIr4hOwQqHx4sY1vw{}B-sDSjx1U!?N)9MV%+EkZthLX11y)B?*E&_yJG>7=n&51{d zhhIx_m9(Ps@gmfE9aQ&<8s;e;wF>hM!V7G-%kfHLRUQ7Q9BA`_3e&J|SpierNhO36 z)r$VnN!u`+y1DK~1qc93Sws;9mB;~j?*@CH(0B+)TU%~cY*L9BqZ+Gq6E&TDZ?64p z25;W7BsO5SZC6II!adC^8$goA39!t zu68!}Q{^Ic#}-}J__v~T@ju^?5%YEHUPQWNehRA@czYD$#GU1@`YQzl*7$CIoDZV> zkv?MVTH148F7ZpR9pY!N0ipXHOW39Or$3hiu^}#ZfmK;o(ys}(0U*9(j43o>ay}lb z-}iA7WFaNR93S}e63n1$HhN91kOx8$tQ$ur%3k!b-9``Ybp2iHN?-huUST*o{GpF$ zK6*!jsT}ymNYXYR-#4D_y-prIW*J5I*GjM6dfF;tIgE%Wrcag^(_=b#&F9Lhsn$&3 z8AU+IS{_Io+BYt&?^YXnq$ASGMD`RuGmLzgG8$nNUTlI8UN~F}icp{S1UJ8I-gp$R zvQx6?&FDD)>EF`#Ikc#DVOX>q@owF|2Z@CFB}OHG`lx;W1V%1h#K~p23Rsk$4hr5M zUkqaT5bvF4nr{jQiV)vqHM?wP8cLYGnSNo zsc1Wag=hHk<}^QK@drs`3}vmhxR}H4X?uF#Ql|A?mi-ad0AcY<&Yr{82FOWw_|Tr* z$e#?6^Yb9Qsr;7dX9u@F+wId%kYjHu=K4?SjonK`gW;-7&5glrHjt(09$X>y-ju`! z0-JLT0jmXzid_b99+M#Iy4n88q_TqTlz}!q-Tz!P``-9PZ%CVbhVlmU&jH?d$D5o> z>P3kv^GwM~BVJK$>(OsXWzQqs%hH>toi<}XdLDioR%()aSh^%V&K;PDwn2$;Z8aC_ z%b6!+)N}&X^+&DSCIl0NtUbGd8|r=0{CoXDqo~STc$SdEeuo4nip1ENvXDOEr~zF6F<6c9ED(Gupk4)qmT* z6*B4eywz26HqDgfxtvZ<6(HQ-i&b-RNMT>HXuSpCijrH`UI}j;hn7d;9Zw&-!nimf)X>m(ofspIdA0g9ft}Uls)|NFc*VB)cC91q%i@y~%VC_#X0I6&Dn$q28(q_Fr=~rzs6OiM%gJ$T$vu zwA9g$vmRoDWvARw5JIyhhn}?wSqkTk=(qe`V<5>XuE9OliWk|jXnP}8$z`)wHE2mS zh3>#}jeP^;IQPE|Ukce9?`vK1Ln7VD7;rAu;oeWi09OgM3v{X3#21^rd%r~7D|%c-huZJS=*MQt>pa`ekd! zN7#nS!VJM{-K91cdJRVy?h*=n)!pjS8WV^lJ$_&*(@nPD%rWK!d1t<$dJHVpp~dma z3bvjXR}zTIgtpWibUU+X?G_5B842!xo7LgsL!Z1#n4Av%h0|>YN5)@LTDg zW`{NNQ_6*g{>(DwI>$jIBr}~oWzY5@7+~c089>ULf;6Il{da;&f~@UCa(3asO^(KlyX!Obz=d=S=a-a`i(Uo0LZ8d2o-SVv z0@D{|P0Q1#i|k?BnzhRT2_kx1wZU*1R=GgtR{!U$Uv2R*Vek_*j-LAgidm6xY{S)) zpr_{9@ST2g9syxg?$c}?m_%WnKQy^V~-Ihu1xu@KA zRd$+BY-x`XvcoZKqwY9tp-ff56NIvlXJXP~WDHXSc(FI0eA5%D6L<71kdL-W)Mql@ z@x$-fZMd=-`9)aui%3aqvW75(p*K~991XfVu>tQj-Ge%yjN6yaLM0HNqu-a2P*u#G zzkESO=d0Z=B3_NghtR*c2k4uU=#y^O3XJZF)Brx}2ZTEX370gu_52(fX}j+f zVSo(r`*)d-{*gRJhXlRYW*e9vqISgCdvnh)RKF6*F>!f#{lf{}j@mHOhCngqBK4DU z{k78R>B6N<)Xjv8$(6^)cce^LcTpeXCQ{MqMe@V*|JBzR!Rl!+)@idVlJ!tKX{vv> z4UEVA%-{abKLP`r{~H3bu`peEoZ{ywe{^Y{?!RNTjJcM%Rv9fbCNpsg4~TO!9EFf5)7_@Gxzj2tuWHS(NFzcXWOgPs+q?q} zG`=4B2<^igzB_q*;D=B1<8XS}av?AFb0AUwY3E4a^TE-Tgk8zU^~IB>y?xN1x}-{W z`BY~S^GQ<(jylYV@RjWQXZI*;5_H!;drJS8c|McUP#= zUZ>I)nZ2Y>YTScd9zod)w3BlUD4K^P;PSIZ0C}2@y~H@h#rE9wz=Afq`-A!D21Q(| z=tee14YUFWTz~`=9(5rX!6{prkvqm+QxRffg4PA#GBVhwzaIa&g8~f`1E0dSGIc|k zRgNxQ(8D6J8(g5VZY{g$A%9}-yepN^@){0AIZ`qB)qwy;UW^r$pQ7L((bKl(zhvg0 zjP&2o?lWY^PrOal4rd~nCSm_Ovfh58HJG1HJl{$om5JuxpHD(OAHUX4k-4I8PVz=f zdf%L&pZ+?~TsWM^{5lao(;* z1_=_dY#XA6MQ^3sMHL*1TR!5VR+5$xrR!I+cPY@mqqJibwonOkkpclgvZ`~3$MkkR z0Mz|Bz5BYBLsKlY$ur7Zk)Q18iaj*nIeUM@c2^!mBTReV3i#Ak*C(aT<3au=tMkv- zuC5L@36`&T96rqnV7#IKR{A1SZdg;)eF-(^}O^5d;iMh`UvS6|A?}}@`E*qM;oH||aLB8(pmX+~esIi_Yr1Ps1n)lE+ zzw(E)lZy*kSg)@kQ+0BvlsSntuHL`yrfPNA0~u`Tnr-qFki#y8D$g#MIRp6yj{OD$ zIOKB^5nYx&aenVV)~i^;NZ*khZ#_i$Toj$A=Nqpq1dyXT_~6e68|;4*p?p!n889R_ zBz*B8f5lO^1aLd)BOaB#~x!#Zkj zdd!4-BZHBSaV=BtFOZ4gdAf{c^!$m)rh&lRoT=s{q1ynd@dL zCqBfeq3CxnU-pFc%ODncO_2>a-V**jTG5#9tCIDFm+3%ZW)g#P!K2GxbO4g!fn@9jueEwWD}Qq*vr9!z}kct7+T2r^kv#gc-t}Cuyheu z`k%G6t)GM$D3j`5z`3nvwD-y`KK$q|3j#!eTCj^WmS!h zj#{aiKlOvZY#{2^5ZfznIEAD)Vtiv4U*1d38SF}y1RG~q>^(G7_537B^3xjiOYf(N zeun%-fn524pa?%8!XI6-RikxycKxFzGUqd6ars`Lg)nHY$7H1{<4lKaKsB+CUs)_0*h?fR>ok&tK>jh6rnJ{;og5m+Si=P|f@CG{ty1FjB?iH&djM+N?js5m+QK#&t zNm%$XvAwdT8t1@i`TcTbV9@eA%2BW2dmGm|w0MMDeC-BZt3~ZW-nOgXn|`)KE-!H4 z;qu;}8z}l<`0Z78)pq0_jKa>#z*33A(!jc#=~6{VN)kC8&a1|M^CK3be8EwAx_Ktj z@S)McG?F^0@#~z6n<1eoQ%F#&Jde%%M&I|VufLz+|3bqybGG~dCau8=mMeJ`WjJgG~ z6=rAf2a2SgVXj~;;#>2eoBiU)h*oAOPMQ{{F@(VA zvLnXm>9gDWQQ!2nKb{u$ygaSGJA#M9ZjH;KmKWVZolPte*C)F0kPx%S7|mu&-ES>h z&yBrU5w$9C$<>@X*@)}69=x)HT1l!J)G3SyeG(}X z0rxX{&D@N8Zrz1Fie>T8qtmO<@vO z^)T69^D^vV^U~YlM%D6&d`j!+&KCcWHp2ysHL2yjJ4!E~gQ2OS=La5l`{TuvZM8|) z`H2R?cn1E4bk0b{NFU6AN@P!3{cdrZ)~Zyuz(I}@orSN_;262b$gfC+;tDN50hAqw zla=Ba)fM@I?a}3i#jMFw@T%J@?BEesFqiUi%p4%J2kFUaX$M~O)sbC%1dn`vLKmIa-WhmF!b)4RBs@V?``e!KSZtMSV}W^+}h zpIihH_dKZwg(m}|fjDLFAz*8yx5q{URnPzS=1kL_I~W#ip^F z-h#lHU>rcXASf|*N9t)iSCyw_gn{lZiQw^e`?`rX-%*1F1KA_nuum}kksl(R;qjz> z@d333TdF!?qKmYS-%zs!Ls+O8K1Z1BpgH2%P1md)_hOBsQbKjZhf-zbv6>BzgGqFA z=F``O7!Qcs0^TL7Z?PhV0%pzb*f5_XwOobCI+7NXOlVrwl4c75K~bZ?WPTu!_o(1bZ zy!Dt63&pncZ+xeh;I1bXkD|`Luz0ceh%{%(=@nb2_SSme2ys>-i1}V?{00dx8NCU3 za2d03_=g?8=p`9N1q@M9N+0Y4Tso4G(6f=UQ6;9v=b!2RcqBe>6-?{0Ktg;mhU9)D zuB+H^th-=x$XDW=!m?eBkX&_hr$0&6>s5d|&PP zA(xnE$Wgwmu+e+%2DLA1qR?478nC-C)8~q6r z&+=~u1u-pyz4WqME8r_4cz?H*y-Y0phtYAmw5P0#x+nv|jmjNM1mdO`lp@t?SNtC7eDTQhB5cju;^8O?WB$B72D zaNU8J3ZZxg9pj7MO4)|Q2IcCx<|~?sHv46*NRLtK-KKnXuZWCIWHtwIKOP60Oj&mJ zKU6u6%(01j3D3Hgjy;AMFe%v$Oqbe7)utoubyIbHP^qKis3H9oB(sOAhJko?m4#M) zA_Ba#hBFI(;GH$X*KjZ^o7(`$p?HH)u8u}?(vtD*)|HFXKHf+|d9Q@rh_OOn5#b(jZB@FG*74Z%6h#>&yoexY6` zcZhnE`r?wyVpP=p1cFY$HX7tYA=cZ`8QMzX!Il_^ycQIjdg>3a&HgEYaYN%DK_t_Z}ABFk(kf`eD2M?6ivDM~@?$d5?5y+Evmb zqmmqh0AD>Dm69*7E{-9G=*rJ$*b7_xs;;1d^#I&8`Y{6rX9_q(|4P9g)mS!%{^(haDUD>70#d z8g$3N8P3AW_7Rs&n#{kzr9n2bN5L6+)A41pNR{#8CtE^Dpx(HaB8G4>SG-88e=thP zn`|7KQ$uWu9G&P@x9(@D?eGeDIBmw2SBz;X%jkMLTC;^X2SNDE`fV+=*s$nEhq-|h z+WF8jbq>w3L2g>+(qo?Xpfi%lQnoUHQYE$_&pF-mHiQx2nr#8lYL=dtOu4h zFfyogU@VY`p6$i!{?#mhXFeTdfyzmHPMAaQe7Y(B`(=S%E9Om>roC_ zMvW$&Gl(RO0TULosgXyhy|Tl=*_6$UV9oovr|w@0;}5s2CP(9~d-st@BP^duWW74~ zIt`vke_9r+9%WZgGYE+Z02d}lTy*5;HxojWm;RfRZ$xu(E4IxDb=MJ!M1D7Ug%i}Yd?hH)nacJs+1lu6&Kz6v zU45#L_|bAKD=hcCs0{I-dw7(Y}fyjW`Yx&90ofW)z4yAbj0!!q!sRv6^x_j$5v13>2`q3$$+gU9Xsod;0$e3&6HP?C>J*BBf19z`~yF zJRS1dkh~SvoLNnjZ6TDMtspSO)m=HOQTQP}AjEUo<~RFKRD7mUGsrSqM_?ygsbwv` zRBKX&MMll@ZMuZi31mo=hz9fFfLa`0>9JdD?Uy>%B-hfb3fhCcLV{wLCc-n0jjwMf zHX+7bxI0-=>vwVsG^Rqo!@Yh~-?ns*s$;a)_1l*nC47jap{FgQjs2Wen{h)N(Rx)P z)kbh+^Sw$TxmBf2e&oE3x@UxL{fx(h{(wr{tjfsc-laZ+syk;M(f6SbI*(r65`Jtq<<7Egy;vhEMVM-m6-Pg6@cnLqm7`m??~J(Fy&XIr zjoU&FbDy&aI|TXV9&?1lK#e4qru+rfo-7~LM^eQcl>$S zIyvATGlWzUkF0f8^{DwrrNOJPg$%tCe}dIX&+QYc#*6Xl_3mEBi;nqE%EbOGm|34E zM%_>UDrou%Y8mgj+MRi&w?8$}_^T>oX5VeSO`T$PM;p5H_Q}NJbvPTnt)g7#F?~85 zVTztGe@vKCVM*AF|JL`XR-|ujMX_MPYic_(vreM@u6X6HrM(^L*wf|lo^>5*WyQtz znGG!r5GxM9ni|f{>n`P=0RE3h|Jxl~!d{2BUeeMJDHFl4>;3Zf$rv(?ALw$Fi43X? z2DP(Dumhn6^t_T^Uf=L<7h^K@1Z^t3c|}_NV#t3?-TRD>>B&D>6^(Px59!SL>~n>U zxG+WiM-7`8SO28!%t2-N%xTZxQ~48 zy?k(CTX*7R<-SaCqL}0wp1&%iAa}Yp$NH( zY%hl4V_G_34OG=1q75Ug(v*Y0WhB@Rc+kZzK+D&WaKcJM&``$4l9LBsFx9FV?AWAF z=O;E^RkpG(Q}^e>b#1rLjAf88NIya_(PC6i|H=3b7<>OWVLk!q(gx9YL>JW_%bjxn4MKZPo`AF?Jy5p1{GK}jC*wgrJES6i_23{B}_IbfOzXqcp#zKS%-Xcf9Q-*NrXbMgvg{6o~RlF8JQ8~ zRxNx_HM$v9PVtF{s8DCOq?Ol!fl=$?rU+>y{fgt5F>`8omrN(k+`Km9-yR}_CW`I> z35kkkQ!3@}lZWC0d(!^jJl23I#-s}T{#-6yy8@d)nyzJ2KkWX=yQkZJf{28=c~{TQ zdk8#Z*hbtNLY8sl&s+SfFVQ!Mxwl_9=`UF(2)ew( zIeOXO;m{Cox4_i<6SVpknd7c^R9xZ65I*XiLIWpb#m$}lf_m|}UO3$MimiKm`<)h3 z`G;5D7@NnPcY4j9mm*x_EX-NyP-!uEokgiZxl}#?dbxGz+mT{2qPp)&|TUR2oIVf zR`9(Y8C!bw)cv9UcP5_|+P7zvRslbMK~;f;B8zta^Zf`l3F)!f7CnnU0S@x7=x6D* zpT7(hfp?9RL_$ybqt|}VkuA)bDXl2s`xXE_h~I+dKkKG6Aa1f< z;uN1){Mj=fq5`Gp)Ba&4FP?=Yf@X9iz;C#8)04g`W6*qU91!D5=aJ7Y^xYQS z2mpbdkw-PHmfEsS!Bh+}M?oMO?cK}2Zm8g^Jm^=uN7nRq^;m7cJqnx#zl!Ihny>J=mn8vDRbjPC*s7+Bge0NNcun2WJ1>KL z>J8q=$I5zi)QYTvZl~Lxzp0ugz{{Y7!M10F*%PV16kJe%9i|pFFLk_2V5Wpo$6jTp zD?4!EPJ!1E6Md`FkW*<*g$xyEQQuouc#Xk#pf7FMdCh@dtpyi~7hshMehstNPWRK~ z186^i9JEB;3I5dx^mUmA>cJo+4s{(%!~uc->qZ*M`f9yN zaMI)kR9J#aU}=v9E091%Ab~?6nB}oR0>AmgEc7SufN3HpXM@s>5&8X7 zW7Wc(1-3m2jP!I<*}$9uAkH)RmzuAhp|e88HwMT@-2pd|0p%r>&L)nKTPB|Ros#WgNmSM3!rupM|#{5 z@H(=-!&>St0M%Em5egVtnc(XGxpp-m3-kH$bm4xk?YX_ zroqFMBJh14!CLD3M`9Vk6mj2{bf9q#fLOIUiKFD8R~ck`7u{TV2$+~J9F8QqLlgspRF&fP@0+#ms(OSAniHRDdY_g*MdSbxf|5A=r=0 zZ&f-@SrY&nAcE%=C3x|T34HD-1dXN5S6f3Bgr=icOTqsb5P>R6%J()>2^Ktz1%kex zyKJHjn!<$=5RG|Xa3;gImf2**7wk$g)+|lqlilR!3>ridQ1lmdtg8{@G1BDbi z0bej6^c_iVKhVDb4D<9$&D3;M5vYKq`jTedfiTG75~dcA3{hA%Kyw1P0;U)+JY}dY zgHACJIz_4|pv|yr)t>;J3!nnAC!e)V2B@kfvt&GLuO#yj$0|7}? z++z-*E8$yFD>3QmTgqQ&pbuM0U_I+d%{^p0<^c1YU7o6(saD^CC^|`^n2YIE^%&Kv z2CTbp#29YGp%Hf&7&Ua7SRL!7;O#ncKq+k}oNy}8A0<#h-k)Zw(2>)Mn%fLwRl|bu zx#5f&g%6PTqzdi>jSofwqgoeBb_TPU4F{uIau2(NTJ(2-V#X#m1sh43{|`4BVgbR9 z4qWR}peA`Bi?_*Rju+dW8QAJ=t!LJ}P^o~266!x{#_JEvG|5=FH9Q~(F3?ITESgXt zT{66&Uz}wGPH4<~9vrFhvjh;^R53+@f1}}RsuQAXLk!HL|_TDH}@Z1^-TuDG9wcFk3I~SI7;$VVS$!4{#JVi*O9&jJQ+`E&5rUOcSc-s#P>^QcR!9W7@DM%|D*cwWWM`lzjd7t?? zBLvF714UpYo3F?Z|D#ZQ?Hcb*lfekos6?49e(Pl`fX^XN)A)R^Bo=fr4S)Kp2^Ih) zf#;Pp_Is1`$^6bm^2zK{6@nHgMu8huoqgzta+E~O>f^@_?WD%8}}Qs>_Im31MT zU|`Tiz@BhJBighPG~oiJuuI~a$rlv5z5yLRkv1ETxb;`RhzlJ*w4?g4kcjq zR@BP?osz>8W4nRdMNG$Xq#OW_OIQUD8uP#e>O%S;$p}~~4?jLW9{1e^WNhzQKk1d4 z%&t<=M~P(8t6z4l_dLZQPfTo9m#Y<_f-QhD`hsFQzv)QtKXEho0L$8X9+R7&KLGA- zp_G-C#jlMq1LiqO)EX-8Um4|~t22fl4y|G@*ym9DJl=oWq`T8llmq?BkVD0I?en@W z7~Lfm@I!>|!qsAddvFiL>y!Tc&DnPGc0qpQWiuR^hT zA*Z>whbcUEdR&mX9J6sI=)1v5;3vPHYeH4z1=O+YQvBXWwJHr;Z9bwFG2azboxtAa z^gafSWR$`hH&#TKDppuC_lQ-qD))8_;1On446KL$BeNB7wmd*4E~+|0FqaKfybTb* zMAdc><^dNB@L=;5(_=t|nM{Tt#ALq4QjIOt1*(~%AE6c)j}{eJ{t7H$fo&!=Ipg6M zWmdsDr(XRvky-mk%1X;;=UN_{p--EX-IeP>58ZloyHze-imGb+)1}m0T&ka@O9(&D zF#+;|XrIC=xUDi8oTH+o*N)K)oPuX1(!e-=5)$`-eimIFOy*#>7$xtEXH1mO6kbB1 z8AJR>4I&^b1=%KIU`E_fXH!8S!YtPJ)pIDa0lRZQE}!kekV?aQ@6Fu9h3`_6sgE~l zs-G-3L?Y9pJa10vgP6^jp!<+0-}vXyn}DkfI$<04%bIf?w};i(!=VwODA7qEBT_G@ zQGpEGzw`rH`PwXP*G4xp?T^&pqO*`Lt)4CLyM|<_B6(y&<0GTMu1!&0L$#L>_7%7F zdpXi|68c6PFJrrTDti4olR@jnrwLZ+IGdDQ)|H$+i_t7Uhk0(xaVj(CD&S)R*FLZV zT)MIWidv%uj$2eJR=B~jA6WCD+8XN~3-us_@nCqZ)@lN)@fEe9b{xBc1FQi8SESGp z1akw%d{>Cu1e5!sunGVhKJ^(77DzB?|7cjtRY({i;{G;bia1)-$ zR~2xH<@;>$Z2aH%|3pL;w!HN+WiV4j9~9=J@6Oj+eH<_|gnku82Sg?0^@uxAkOP@# z1LVehCQJcgn^DPtUossLz$zciTGb@~GoL#p;arAL0mHr_RYV_Tb9Z_8UGv4jKid%n%>E%rKw?>Haz@?N zGXtw6SZJXRu~GRbboz`q9WFvpk+q!oAqzyd#G1hdNGLf>CK!vS(V8rjM40?mH<*Bq zRB;_^T|rYZJy5N2`B<*3c&M;YWf}fuKCoZC>_L%6UMW^L87Yog$m8;(;sW&M)wQ~@ zXP}8IEH_Z1Y`WY65jQWryUk#x`G597C$D`~GZixg3u*Ybhf;a;rm9Gx{r+#Q?E+f6 z{}9+cSTQXLd)`)t%mdF`Yf-%yUKKcA;>0@RH!5@ zp$_npH>v}qTv6!zJk&0_EZB^k z=lu5GGxN+d&&+1DzSyvBni%~_P-jL@AmF@3T3A@9nkV<7e^#LjY;~HX5opw4cRkQ+ z0KH*BSW;;dbv93t>;1qvC2&u+g)k(SALQoCr&ht-WlH!b&qG~Y7y%vxXaPh*91P(_ z5D%8wL3Yqs(lw(8G9y3GrKk55x5_t)P>9~`bSb!DEj}A?w;lS~-N0X=$OuBtKL=hVvRz9 zxtbD+E(99c^(UrcjC2(p71u0Bz_(un{GZ#PSK? zSC#`Fc}pk(s6nlml3SWUuksxVGM8&T!j5yh<7x1_sQI>yx9)3b9Z?%`anH~DQhCZ+ z@Wrgbhp-V{2KTUj24iRC>{_qPwkL`cnQXqsfF~aS0Jb^gZC(t1>aFrJXu*ii>7iF& z{SlRzEg9a^DmDo$9Dqb%T_~8(tN!{&67WvvkAw5UuaMJ}1DVKr!9t~JIDLt9J~jKm zbZTwxO1X>J&P+pLwS}HIWItEi1Zui2akc=xq>V__g50QG3=UWJ&6lApI`G%MK@_=? z>_P!wQlhiLd_x>g%+xweCuork7W(D~4 znv6MV)mqong_g%*-#~=1taN>NGw~hNkBjagOmZ+zT?Ga0chsy_ml#d&<^CNNLj(MW9+oG4(l1`{(9fOV-0|5<)ar;;tP2LBg`V1|MOl;qqH zE(*;aV1*-cfU#LfZy{U5^OFq6rjxdnk_x7`FgoP6vY+*IOTSxE44HJvGks0b_g*Ju z;MOY76TqU378?QA142BtMUa$;z#_m9$#w|!{ow^JNed)SB_bvoqlcf2w*bmU+!NSAF6KMGy@YJB#kKqBOI5Mg?J3oer?z8Ei7m*ul0m?-`o zdI3KcdV6)!SJTxfd)hM*Njv8}bAM;xc0QpQKgJEBFTGj;x&qsPIP?-Qz#kEyqp^U` z5BRS0Or&!|LK0pLkSUQiIIgQcFF?FvgX1A7o@X2!7LCkLs9^TL4ahZZBOtI0Pv)c ztB~r6a*YL?-`;GEJfCGEqjolLwT;eeW`a1%iJ~|sE}VrwUmgp{gi}k!?f=-WH0x4= zE)p`gT+^ws+P3ioR!QoYg|yYdC#4D(6b=Qgw-VqftXkkKczysN89W6TVFbw`2sD3r zHY2-nAHS<3h`@A!b9`x*yIjc*c!fvEX;eo**xv`{@v4` zZKrLSmklC3d?1S*4%P(U@xAr`sF*CU` zX~F6A(Ry{d5w9(o=P^DQvz+_&s~kYep??!KN6L27XeWv9sNj#NK(T^P#=;Eb27Hi7 z`Z2<~pAcV`tYZP@83ep>pP1OZ`|va}c~Tf2D9es)8RCu=35C-r)hi^d5qQ7%%JpLR zmss#qr35kf(-1Aps(?q?F%@lxA!FEmdso)*@_kVD#%D8EXRj;+9!a?z>r0$=>J~!- zoSqD6AHWgWC~?;;je^?v$o27nlUSHNwc$bj4(!v=neX+3WFn0`1x!BHa>l=`06FOq zsD$+a^tBGkf3cucCO>Gk*>5{_8PnSHxz+8|u~Zt)g37Q}u~(?zUrB%7VAlfOwK71CUt}=_Y{e zpN!Rdl93N&ZhBziw`wzQ9l4`?TU9zGx;0p$3JQfg9D!QKWVkdb-$m^FbJ{s^u(ag> z_P$VTvR@Kb|2^VIv^E+Zo2|wl{tHfrtC;RXWM<E^KDT7vY5U6`I|u{xk-qf9 zh737S;?d9y-RrMN-|pIa18`zE_+=LBU+=@QWfR0< zhbX||N}tNN!GHjAKTBI27z1wrU9C_vaLkqH979mHN0%r6;%Sq@BCep{sT@vXsca;r z&wX>H<(ysb&!5p}XU@Ip0z8R=Pl+jDJ1u_C)4+-(7#Qr6B;S7j#3g z?#Ju<`UK~hJQ&`PJCS(Az;*}5b&VElve0Q)nq-wUgCG1aBw{6^jQhOn7?6``e-8*A z>n7%$4oY+{Sa5=)3t#8=g}q=e9%c=wjrMUKumkL*R0HG4{ &f2Z+ zO;Cem2JIY}M8EdoB?oX*ys^HYN?go#r-AbY{2e$iJjdnOxbpbX0rZEOrXQVI*hi0;qjHOhX!=&m7((vl4uQ{GBk8Lxkk_B z%rRClX;n(fDQgPzuT6fS-I!|j*d@R8X-yOM(%G3P&d;{(VoJRPcw#!XJ@_es$?3*m zL`jYLQ|4|?wEpZMZjaH=5o0+IzFAstsREL@Z42b>x&PpkN{7-hN5*J zWb(~tPm6{pzvE2=B5nPg1xeoAJ6q!baBzNuf(?ss8pXGuYZg`i z=1jOWdEoMf_H}C`;NC(oMZ+XVml-$!w@OaTatgq5%~niVa3D*FEKgS$Lm(3xy0-={ zb5zYcadX_|YB2>MLA8CFA->lg?g{o!8rzhqguWO3GCdiO!S)%{B<}ypk zB8%Di1}E)##}q`^Y597^Qyq4@KG@`|3ArmEpB40&d4DjU91UKF5Mn-`9U5^*{S!B( zj8@ec&kfT=s_-$(k)bun&9R_Hri91}e*x0Yox%rlxSw;f?O&l{g5{52HXy@&WVZOW zcNM~?3zJ!Prt7)3uksyfyIITNk-$somJ&@W0@EkM5>|@cUpkBj^WG;`tG{VgWW$%_ zf;tv#1!-1#O$BZ0Uosq&!ZMw%P5K^uBEtb^!6pDG5+2KhdUa%AR&YnBe8_8r-FScu#| z&w}R_IYd#-S?<>T(uBKY!{NI)C^CH!lk)6{0((yUkDDW(*68q?g80jy$ln z8E|RRMjHdkCj%VANN~C_o$zbd{Z0K7iO%tu-A@F{Ds28=1S2OU^);*-J_UG}XmnsR z;z%~L)w0}n^9A>%1YA~Y(Ha#HqxlP36sFG?v?15Se@Q$*(ql90(Ly7=oDb%;`x045 z_J2cFFW*71Kr>YKzt*gES)x#R&6p+gzRRZRs|6$gx9y8T;B80XoGIZGTgD_HI_43yI^NF; z9_AMwY+7#qS)9vrAwZj(fp3}N9|(NCcQVYZ>OO#63bcmX4zPeVOX!WI=?9FA3DAT( zdN+J_Je|Oi>yzBid)0CK%w8zfoZ44r=i=Lr%vB|oiFuE5C4+HPSf!BnZM_aQpJA)# z#vW8vD97*vq(DD#eQGAQAWYbr@F@a-3#vL030-5J3`}*8M@JFJ*kzQJHQTOYV`KBg zf}X5A?|^tP8vuldd(v@C#|j^^QRBK}eBq(8D{Q(57UZ^zHOl}4xIb4{{Pz0ND(~Ve zQ}j!F%I*_`RIIJ}FJG!HRdjol?xoCz4H>{qCWT>@in6jX&xrV^n_W0F!RZi@Rt0}8 z{caeLn-H0mj1a{IoD5VnTh@csK?60`2CspH7$8*~_qqRBescn%@rFMJb&@a z8GIZJUiY&C&p#_6CtwERPjD$j?)%6mjz6Ay<+;{ve5O&h!-+9^eTVVWS`d*bbutcr zqOU*03lZceEtOC=P_x=M9rY(T8^k^L)5n6yS}~6j8g<&dTgHnul~Xt^-%4UxLhAPj z05rRC%O8j3-#dxWR3d{Cdpk&6O~6IMpX1^vUodE(M#nr?J^kRPatSF?Ym`{XYZA*RWa+t* ztN2hwVWytzHJE#-xL&QOsQ5x+mK}snEl^TS%c+{2y_495s$Kw;Dvs-~t2{Imx6SBm zV*LaZsTJjU*6B?OlQR$4Ki5m2+;92!;c=vgG=AUFm)U=QTp7}lB{hL4;{{PAWO;Jf$PJ8(%LcHU@yyivvG}sy#>%UMbMNJW-RI`z$pW?~&#tBM z0KO|B*7qrr%|fF(G`X|})p6{rqOB2|B1HSd0pAamiorr0^O3@AqPN#_gVa0WVt=k5 zOix=*3$CC7PJ$R5CgSIUP=0L)cl3b)nKBe0Yb>~oiO(YCaLZ}w?51IiAXi!VJYU)x z_La%7R-#;zl>|9WeEGMqt)M&wi*Ad)Fz$_CiGH01y6GIaxyk9|6si>dhDzkeULq(m zqy}J_eoM~kpM#1%v`qg$6$RjiR-MpMz*|&g^|8y6as#wOKv2pU4@9U&plAC8jEn6D zH<>QhB3vV(GsQ{CzLJpkST2YG?r=iZag>$-aUo35tg z=Fq;Kh>Z}kRz!QP?cDh2y35XZjM-p{_5gnhNWMWS5`Zl7UPn(*899{<+LUP*hGYFG zKtUlsTWiapUZhqLwNf1iKp83U5WXKiu!(_Rz<^!br$ncL@>47e4ROdnmcI#5$PmPHd|f5v`XzmaXQLV;1x3ka1R|F-DsoBZdRWW z;7|SjU@Q}x9vcs)$dG10t0kN2qVjNz2Wl{w%W@3=xTSlrmsb>Brxj4nfl|a63zsbo z-1INw-p?|mZ((Va(xPQ!se6CMP^?3Er^ZpcVgFiqv`v$tH-bdkyut)Igvv8lMHCZ$TJdEdVcid)21xgH7Fyto+^z8%(FZIi>vBP zyAbou237<2vDxweI3y!&SWw}M)^B<*3-Z=nkmM)S#0CRvfdi}_P7+#K&nIxa_NFUM zpmPZXJ5)b!WJ%@V5?BnGL3cwuxV^0q^YwKd&&5|oOd%(3xruUIOeC2suj0nhQd1&4 z4$IcR6b=|xXf>lQG4p`gZ#l?sK1Lj;vUWLO{*@- z0uPur0E?ieM~nvc@HlILUxt(u?BUZ$9H&^3apYT&5U|_L^8#usf>6j^4S%9ZBzVa= z#(94_^wVs?n3-P(dix`hDyfM)gMGQmhaX*)3{IKLKfZ&@!Pfmzn4{lzhrz!_Jq1u- zBDwt&k$?#Zm*9x7Yhp|P@6|Za!ODp^e~5%D5jR@zi?4LrRI;2Xir0bZe;~#25nueq52p1c~U)X|%Is>Rj zaz)iUtWP4k*)8208TIO4NzOnpWvSIGfH?#YK&EVQ-%PJL^BzN(dGRLNmt@eqb3b34w!!^bo8h22_4Y=+jl^Q4p2E*Y*k~A)SbuSC(Fr z7Ay8zixycz>@Qks)KGGvSyX`d?^h-26`g2+!7GjQC;p+GFJ>9CfF+;JzLRiF@ z@Et=(8Lh7*08Wf1nn9DAFq#>?4SHa z=6rW*^Yg;H{ihlm8h@{bDKTi#0YwigCY@>r287yFI2#D8B`j>X=M+BZJs_d`!Xt~I zh+);*%&3CWnWZR-B_@DP;B@E99xW(=)!ZA`t)8$1&TF@t*wlkj-Uw7r*xgP{xJI5) zcsN^;cOg*80zQh-n&T-2j6JNEM|CtelU46@FAC-Vflb;~W{WddP}L?3TB_%7Drqr8 zAq@eqpBWb$L@>Q>W=w+<1N7%?v;!LlGG!pJE3xvbQCcXYWPb(DnkYI5vFOoBL2%?` zzU%ZAYsVnDW`vq~0QnO)SGrFTa9tbk4}T)5%6UlT7l=-)#@beGuP)t}$Ur8*;mw|H zb6G&uQgO*net)vQ>XaV+=}q1znxnhRrT!xIVzuY+*>p)DxR=O-6>9Vn@9%_Cdt#nN zN!VZ1-af_#J?@E_4lc6&4}jhyiyBa_tPx2J*G zMQ=Zyue4Mw)c)4Ix9BSK-t-`zNzA5vk6J{3^FSvTOrncqqB+<9Ss(cNU=avAPZu4p4~#@alvOu!1JH(h7a()nK_NI zkrlKs!I#?A#uf=)$czTfTHww&rnb9_RIBgn;{3hQ;j=S!qSa|YIrk`p$$&&jmv&58 zmmVrm0f4Ef7!N`xf(mRE_{(b&Acx(Ei<$zi;I}eZrTYPG*Y0s zt^40fcK^?3XH_K(1Gn}O1QkTEL7+IRPE~86D$Y#unQ?-d7AFs13l&)_^AR;owUvf^ zkFq`~L7hn-@KxK|_zfT!OxizUq67-Zks%#s5AzkJY|BzeBGt%j9^eJy|0zsRz6`0< zzbRvcl~D!QSs=leD}Tk=qSK7sUa=yfO08ylR<2i%8uGJ^(0>V5c8EyTe|n zg4np~~43`cDzuZw834kB0 z-DSro6bE0h1@>p_%YtYOi7feb;NI`gQUO^=0?6@;$OT+2XdwqM!`_{v4&P4(GlyAU zuS{SG25Cm5-$pV52-Wk6X!m2K?R5DU+d0WDSqr0XUl5 zs+AX*p$MR2gQrdKG=(t$&YuPtpdiDB^M4{FLIMb!Jk?CtfrW)qjax9+Iu?{tWwt*{ zt|wSC$9;LsFS8;BuQ%cjgmwNDuc~E4x2L~mTocS3cX@+~NgY&k1_}-;2rmh`@tV`} zeg^b6Ie#U#d{~|3Sg;Im0GN;}2xde(Zy&tfOliMGc_fm}e7y>{?JOlY{hvX0I{niz z1=LJ{<1$}r%~?N&JjKh&lHLY5Ub%z$R@45s5=H=5(2F^M8edT`$R+#AJ7iT0jP7wK ze!i7$zlU>owj3@{o#G^`~R{%4I=yHabS|4W%irUM@gV@r+am7}N$ux?v@rGP;izdzuZhohNK{?-F82a#8dY;enhV$Mw2 zRv7yI(5dqR-+=hO1;A+y>x1hGq@o@PDL~MUPyvDp>|n&i6)2mkGlV?bBTY0&TU*Ai zvZO>8fZf-b#Asg3fBB^x~z!#=X!NJ-PSYw)HMPYQ|FRAx zhHVBrQ)MiX1U&DRyJl1&?D+{KpRD1oP;RX0`jUVI7En3jzTG7UpN`v5A&k=fdm**pA81BB;o5Ot1CaK`3r;yp!j)>3z=$QF({^RR(yI>9rzzBAU$RaNz1wQCB?C1 zeEUgh(1nX@sr?{i+hR#J89iH9#E^(4oA6{^UVF?)M-uiKX*7RUz<;d3Bt$K>-jwR# z^8xMJ&UoRfFE*4}^TD+2>90{$Kx!;|@Jfpkf09Ab7+Bs#gm%Cw#Gu%+rA^h@sN6zX zIS|!Tf^`voOu3v1w(bu23LMP##bju#W7q+MPQ7nfZp%M}aTbbYc`rndQ-!@qP}(Ob zXsZJViR=EEYrj7{3fyB1tqplJh3`}JlTxsge>pNBq>4vjt{0iDv$rSevOq?jP6Ov; zP#}6^E(Lt?ttqO9#;WeqOLB9X1bSxWPuBa*G2|@~kiSQ@A1pMP z_I{>P1(Pwhftu}>-b|5lSp>Y#vpqHL-V4a^mV#_V2$8^yd2z5vIo?=Ypx^%)SLG8- z2MDNao>A%khCukxwtOw6Bjn3LSq)I70JioQsN_IIOA&m)O^y1mYCw-vT83M2BlAXy zX4b&b5`15f&^ zG$()r8=^G7B&GnWbh@Jud4vEU+X_OsfUCItohghckpK zKODc6nv@EnMtMg7K0KtHwRQy`{{5loEC~ZFbdAbibUs-^2dX;}>RgP8aD7EbvCrjaB;NQhom@BwT3Omv;8M7Xm_K;&+I~^E zu_^QfLLw`cIZ(`kv>7?GeLYYMenjpOL4^qoF2SY@dfI>-1C1_60x*c zS}e%Zpk|)Q)c8^Av#&DvQRvYq5QKtHAD|8+BfYTmqwI)#QjCCQ`gtk)bn0HhFk3t{14T~$0#ySjsi+z<5MiQ4NWr80 zwvO;YbqTy3B+`^l1g#+&#Ttq_b#_DxouF*{KaUaX3&AYmtfM$%pszRi@t%nw0nqT^ zc&Xns_(~-{d%6x0alm^A0-|Oa9Tw-(l7dKQBL0$QFxIrVu6XOpWp`56%BqOdW_pC| zn+b;uxSP@7;^46Dff^*zFfW0f-TJOrVmg2n_a&kWbt)!1jDnJ6HZdw5k-Y;XJVA72 zPL+avfEcKpWdv~eUBp?M$|E_gW}txyfz1D~%njx;Qmxm` zr-U2Qa+h5VJT9xaTQK8g$Lb#;t_htHV|*W^SF(X!h$WB*(wFrGWW)aE>c5CdG|pB| zE(|ol_sm3^@9K#{D0Rz>n;mk@Z$ws^p!EgrX6=K3RGjWQGhUjqgmfYixyuCcG zGr29079#9uRPJ-Y^WM$-%wBH&O7ge;o*&JkhTEH^iLPq|7sK4-i#-9-c>1`f4N2?P z?|=TGLope?7vEU~H?}k6+XC&L7;`vJw68i+iB63i#9ml}VJw1YBjSJcy&|1R-^Cx` z=XiG{VqO&U^q&o5|BQyw&o!;8y7_TACr7 zXeRR+A-Bd$db<+u&jS4L_phIv8>+plWDUmzUDRKiPx=^uqM8v%E$Ry5Im6-JO1H~h zv%LRi(hERz2c&3Acz!zL2e8Thh`U(*7pT}W{U>C7=x`BGK zFDT$JA-(Cs-7fv;U^Qdaf@t8DU0?Xh^UJ*FLRK`GfqiD+bHhRWn%~7@gYVO1M_C(D z{H6gBOCgTyDX;ZdVrlG~DIxmhnEBSiB6_c0emC#$Auz=)7boF@L$i^(@B4h^A7Ud} z(jw>=SlEq6PeL-B9+{aG~nDyCi7K8mr)$aEGC+x4Q04ObB@F z{Ck}5q%*e8Z3lJZQ7jeLusLx9-Pa+V+i%UsLuw6|U#6&xSMz7iELm$e;zItisRwY7 z-xOo1e7b@u6L=r)@(PZ+rsd+|x#h2FyOG%K!Q0<#Z;o=JHj9v*E*IV_4`Q+I}IPj%oDIp3hBaG5X6ly4t;noi%VfrAX#-EgZNq3C#CAZFp#5SMt8w zcLmajXu%JM6B&ZXg6;L3Ju)%(hX)Lu8Mxd18)?*g$ut=67467`nl_r{h|e=wMg`3^ zq3*$&O^Z5+29_N42D-s8sT_CpwW2Tjxn?ydD*3Suo6_qtCHkkZ+85_dWCE%=cF|F= zWQ@^LQGZ@G-1ptH5m~TlK3wf}`)^4bZdW`Qlo4jOmd4X-4>-ARaiP(*x-JN+9W;0) zrD-?a@BSCTzM~WU;qn(lp-;1woAGfW2Q5-z z&*^ZvRQR1H$=9C`Cm!ypE<0_oXm+B#L?}s_J+BuP!dj7-NSO;nx~XNd>PL)ii-CAg zaA(9IDBkNeRPr%I$+n-sIagzWzTrc9d|YwZV~{U%2k`%TS?Ky>@XUh^H$8Mhx;p3m zKq^|VuW)bQcK8cDRuzm-J;aX>qX!f-dlYZ3ZKJ1x_=24@XcWMxQI@Fj;@9e#VOtA{ z30+1snA8*6Tnl&Y#a3L&T65BOIM$tvFF`{Xv4k@j7IJ-%NcNebpg*~{@SpIMv!EBk z{`yx6mmba3sBdlTZl$Z%PQKvKoA39==C6SpQsNGMrWsDnT1yA~nA}fNYKNz8+RBKt z(rRc|haNoE3H6VHDpW~EC0hiFUi=1m!|&pxf^>Nff5q#(MG9%32M*n2B-Ja2Gvm)Z z_Rjh=BR%T$<|K98nI7hQ3S8L1#~l)U)@&aSBY$a&VomA4Q&CH+=}?&N`m?3z`f_s{ zu|%)RazHN zu&t}?>^U+GS^{n0(Rt0;Jc$o+tYE|G_gg$sp&y^pUk&H59N*lK%mh{D!+Z@e{-KOC z`?|mgW^Bd@ZFi!SQhCwmqe60p)>p4q1<(y7_-rm5+$}Xv@F5tEQI0g_;Zm#mBXIh# zn4xv|C>9ah*)N>QThZQxxUr}8O60vcn#oj>;QUVFL#TOqGbXxmlKqY_Lh1Vjv5cnP zsDQ2-eQmGI!HAk9wb3YPwr+)@G$$r+0<8gM)?$En$iZb^UiDM=iRTIz63;)n`scvU zRywTslWhz5=-6FM^}{{(h}@YvDwSg;b?+RBm5gY7o9sARf?Lo}x59(soV92`^~$?k z#4w4%+1Y!mZIifFY`g8_7^vlQ;}!T|~zCg3|h7?529tsr$6QXq9D?S>=xkGFE-iiwYSt-j3E`Xm+>bp(n`2kv)e$G<WNwnPcv^XtZb$*Z6z?FkN01f~geGR2so!|t#ewnF=rM1FwLlg;or;ET$s1@*C$ z3lyI2aTSjnAA`@~%nh0q_;(%ostxy#+-~la$IX#K9c!*-KBBnGd^t(alNuwFvw=J9 z+V1MD%3BTMk*knf7{5zWxkw~?-aK}^>eKrmbn&$)Uu`1ebG|d8vFt_@oP0Xz3K94A zjQO`ly&U0H{Ju720?i$tAe>sdS+%0K7Zf$2UzKrU7KfUxY zdQ+Jk#J*EaSX7#W7A@6!TA^m^T;cB3^e%+vC4z6cC&_i8bB%#&v!g2>@1A$>{Ap}o zm96yG{0tm})a3@W{_k0K&E*jxN(IuHZPyz#d7Is?JMu+k4*Pd#FEuxNlr!@71`N5i z&xtZB))sjP?VrvA+jF{(EL@u(r@Faf znjIXlW?GG``FL}8!HdN^Jsf6A$}OxtV@fTaycwp9wx|gOo6YzwR7-9>mFqoDxtY>} zh!!X|5}l72g&ftc|@ej&tj87WwE{s`Rn zwpPsvGzrk`I>5DM2!e~2DsAVu=Q|}}6KH_qYQuctL~2X~GZfZOVyn$)P^nt!`>5L= z;viu^r4HA&B4986}I@bZIxFSrm>t1@H7g&NGNitbu1KQge*T=d= zp8KsuWk&>X*Nd8+7rgejhX^|_4IMrVC4kT~u^%E@9 z3#HB_mYx)U*%38dn&WuK6xL9W^7Q1P&Eg1)Oa?91wkNY9%fV%d&xrjvu(xNV?PHi_ znc@-z+C1?ZoMq75ZeAF_ou#m+eUUk}3tgw#Tv54tXiO_6t?RbmZvn5d$M;rD+x@X>bN9&Z&L z_+nHzz2NZ-_a^kK>|NtTZhl3z*5FP&++(;)q1#|m^0QB=suPKv3GjZ#XjOX^fgjt> z8rU2hyCb8D^DHn1Ke8C?sjQ@vE;phz>tnO%mYtCAoNWDiIZT z=|G>4LPAM<7~2&rC38`+&swPKG-K{kQ%QgbJ zH~Uj-6cN3Z$8EXBJ+&=Oc*}8^4H)`2Tk^dPlV^U{#VG+Q3>i0FZ~Jz1+y@_f$rkFS zs67)~OKpoChzxFPwj8T!f$BvBv*R99`=qj}0Nn z)QRmPB=mbo27Vs3w&kE7(qzbClLjC@qVX?9$`Pm)-)x%-eahbN`OazaGtX)1SH9q( ze1d&+I7^$MmjPfTQY6l2PrD}M0y7lI2yaiUC+Yn*balAOXebPuryOqaRzC+OJYE-bGa1Nx>fzocx=msEdWM^4 zk_y2d%g1#SPNAE7mE#6b7U`iO*HOQw&LdP<*><#M>)5+Yyo3A9J)(@7qK}Kb}2=? zSWT|aQss*$Y2$lUru4_7=yg?C?qDB1+U9YpM44Pb{G(q}v}hhuIfGj3tm6`QIP`2A zb+CR_50AAw@Ihm>Fw)+9!)qY!UjOLbR4&iaAV@WHy1urWP>|8+u4Pzgn4f})5c+ET zv8H?Z#@NT)6p4LJ^XOtFU^? zMD*kshWx`sd5{y#z*!&)8mt$vT}Q*oq3)! zs_d)boXYREa0Ya`gl04Rq}*H=)2z;XsG^odi8Kup&YPe5I0SBb?@fj0y+v1i^n?C8 z5d!LAgKbjZ>HKOP{Tyfy+-*RNAw{opCAfR5gzlf@oOMH#S(Ae;-zs?FGgX}TP};gK zOT}#8yvx^7rQK>{>BL4x($#za=V9^nrRadt)v{&HID57L`b0RM7p|6))s%#O(?M4|~A^IOacOeelSeWcOBT#k3D@Cb^jJaYvfguRDlWzOmn*qFs9fnz2K54C-}G zeD-a(uL-L)p*ksA>d;eLzdsc%@V_Zypk_n zd?@ER)%It6-)i~k!^?SxA5~UHkE_NGn=K2J(?elIP{Wp+33#xZJiQ_dG0J*t}T= zlFph@>(5>m;iE3Lob08m)+)$~IzKu?#pD#MKA0rJ8?Dfslw*mF=uMo}M!5@LzbvP$ z;&Zd+(;Iu-bwMSV`=0A=Hf?yTrOYHQYaATS_y%ns0dB9?KPN=5_Ha zb-fTr_{ic7YUhs794qd(G4V_N9B+o^^FJtsoM^5Fb4?@>Q8$X~?k}9o@VQGn zwQCuLv1J6vz6MD#+nM}hF1y^eG8f@yir-l0q#ajDkRIr@I%gC zX!(aKx18YBMq11kB)pk3JHJCiwC{ehiQJPhg00ZbZaqobrh7d1)TqsjLi|ky*XHNpg9p8 zPNnHpBtDJXxiNy(8=>)|-rK(Tlzx^c97ho8i(w(rV$bYvkag7w(n4rb8<>(}ZcAQJ zHI#)6rZVXFMD{ygsM8wC{cYXg=5rXc9?_ekQ}64juEPV+OXIn{s?*nQNAHg_#Ov8$y_wsJc6fEV9vin(0H*{c zg9M=&%UfS8`G!dMb8k^0he`559tl*qCRc(EU6%9+&wv|Ojk`$G-LpN;fzsV20pvug zsW!(#*V|afi2ykTW6LD*m!5fMyzb2Sh|BruW!JZ(@dV>n<8A zO#InE7zI@F-$5UB9KLq;hSeIA+=$FFM|){GqRF350Gckt{ar*#d48Rv$an=YYS;4m z?(kAhm_PIUz1LTrNQuO!ieJwOhvJ#l2TtAAQ<(TuZ*PVk=nyH%gX&6deZk{c3%(&8-mh$0L{{1N zDDk+bx}Y&TYr;M70?eI@KQr7`?ehc(1*V+Z}><&>*Md+M>j_SK3zDhL3g( z+{pfHv|X!R^s+Ak<58W_<7Oe$@|H;WMs5=iYt{eh^O54ZEyt|Q0j$%W z=BsF<0d!=Be~Pib`mkT#Zea*~9qW4&Q6SFiToEz%^_xJqph#eIlI@FrxOX{3 zR(un&EQSVL(eNF?emDaX;*@k|GjCWiISAenW8`QJQNHR+XQ&G!Z69?(FF?Ex1R*`fKrB{?oGi|gcYa| z<%L@>2-J0^E;-++->EN=Jtl1VK=wtU8adlwm{$#_`97KBrL}BE?uDYD<|wwgB;F-! z!pS6ZnhQINa;OZ7Q#Cw-gShM>M+?TAoEzA6qgO+MtRbOM)+}wb%Myr;@g-PkhYoub zcP=Nn!uw7dv zMVgVPMIYe%DJS_cX2tjfo(7bYl#~VLy}P5@9}mQ`S^3RnrL9+`)S+;vmH*0_+?h0+ z;K-l4!)U8Ms#Pt3cY5qa)gfC9O8>S}+J(Bvn6-KtVN3v{AJ^r6H>9BZF2bFM~bEWw^QPwr9H%nUB77s>rhFiRm@+9*?6gpXD3a4erO<+ zYxq{CFEo215En&f9+XJTv^>rCWMV9_2cyqXm+e%0&SOX~_JJ<|^Yjlnth>{v7C=p3 zBzlp>E}Y%9Bt9jLpx~8=`(&Q_^!Gp zcN~Qg6Y^0>9=u=TKA zO4DegRI~Z&wDuY{y(i+TxiVIQG)NdLMU~u*1XlxLvq2moLAw3UlkMHVf`zzP`+bfG z3*ZSbjLsvT#<&X~5x~nGk5ZxR+WZp!{2)odk9~#-t2XJ5Y{9)s2#ZD{tac#N_NTKk zhuUYcWVBY+l>*6pH3U0vYfTm!czG@r-Y)&wmkH)3-;)|tF2tGy5mbUIv0?Md4A%Td zu?|iNk;_?QwW`F>OAV^q`S~)s{b5l8Uy=UC8)4%ydWTZ1wudf$J5BQJz9g(e^|^!q zyI=O%+>6zC0_-_lD_peaSB6X#2es9jXwMfMz8Tv|+tbs&9&JQ1kJ{q&Zq1 zKB7^6!!BK`;QxVsI;IF7#i^XxkHJK@Zjc}CQ|)k03tt7*18u%f-12;k2=Ziux(nT% zs&I0fO=_C3hm!F9A7gRLRJzyH(vrzaaLvKK8k4i$njYUC+k_&0K!Wl4?K32{Y?<+A zIWt5`b8&lSo|h-n=u20OUX!=3oaVj}qfAE_40fZIh}E=X*23G`C863w)25)eNxYr| zgrq0$gh=MP|!`g`T-pt%(j(hg=*8Nz2e{ip|z+(@` z>C{N`5X_{5|DwUc{}r(gu`sH@lqK7GNiD(Sy<-8}yKi&fF|cviUYxWgk^elY@OyM+ zWRo?-E#&9>^5o@*^7f5_H#ug7h+d`H#XcMuR0XjzuVY;wARW`k+fGu z%>S55eT%@BemM0S=gcyYV)7Y>c3fzzIkRSI@*CE-+`Q%MP8jr|Z(t|;OYg70jnzEj z`0%KML@-oD8)eu<3o!t(rt0N473^BA*JH$Mgz5%4j+Un9ctqkbN3?-XA{HFD){lJZ z$?X0}QLQ(?dm$rvJOIohi+{mh2x8-?C3Kh;GAgD12Nx)RAQfhet>}wD_y5P$Sw>~m zJ%L`jOG>)CB&54TLZrL9yHiR81PKvoP^7z&?(XiA?vCef-v9k@*Ij&Kt#h8U&z#w_ zXV0GbP53LUu4C%MUyJuwiB6@{(#l$qjLe_;i)zdsy=Kaw>rvRFS3vtA*PFY&Rkr@> z`68S<@%LJ~<7W6X2<3Tgv9+1k0i;U(R=Uz`I}{{M#1+_RX7WvAqCRD%F>o`@v9N58 zM!$FWdLb-t_0S+@&W_OLY$jd%wU`n313L6xH7>Xg>R&MibU0I8Ef9!{sVDLkX(S$M zQb4>(v+3>nwd2O6dEww3xf7&aWPQo>pn{whE@M>5et)0Z{Syr@9z8Y<^2bvc!(|?= zwSW4)C@Cf1oWJ2Kuq*gsPkn6s^h9E3fqM4%&$cQFmJq=iZm)wg#;XQYs3qw8qBFrY z#-=`W&k9hg&+OM*6ky;fEWoWB2CgQpGP9?#8(!;ichb~XsTQlS5{TrKFzYmY)Cgz+ zj`X|{8e({N1H6>Vq>H*vA+MUv&C!@0@va2Ra2ky5W>D5ig2% z6OHR)0um4WY~r?y@?g8?FWK;!^)6S^pExS|vFq$s%ZjDbxC$)l^*B9ME^Ll`FjlzG z11+~3K&u^Bna?V&q>C3ze4Ipbt7PS@wfnGM>dqQjYvJcO^_@o?g1_w^R-tOLkV~UQ z^7iJcYOfYHUDndq(st%rETYl1C~WJ7$^`>h^2((8(={=fM&c2Ohj+&Sdn9MRL+gg6 z@uxZ)^^eStP7Q|Fx(%%LRL#)s%Q+q=#fx3zc1ZS0aU*Q~xUTu5VxPS;h>RxM7WX9!qTRD7CYFnJIR9S3K@hBEI zLLpuAmOBWSw%A_R)87z}Ro_zEaFOZsa|D|G+#z0Sp29czGbv3pxU9(OG~#S6yNU8b zzuSuGT(XTCjk3$iDVr&oE2twFO)m{tnF-R(jV|vV8HfzjAiRczCe`T`FM_c`> z?waT0x(pe+gYn54x1~U<1*-_#NNeVjqMv-*rxZd)C8uTV&Fn&3zO8%U}kl&kZr{d>t`LzP<95x z78yxHwX@-*QZ;`3K~08W=Hft4SU$6dlbD0#E{%%!!)$sb$UcIypkulk{!jD{qdb>bDMaKdm}2mv8H8Y_&>s*E<}#f0V4-^v-dKh1l}8IH?u1 zRWYtG>CMbPRK>KLvtbIEz{}9=@RCZ%1Tbo~2|E4zgu^hY=YspkvEtUgaKkt&Lms!- z)w~pYh5>gVyn%Q8#zu{yMEQuc;;UwsnW=(cf`@9UG+lnAWHD-&$-8eE^~tv_m``7f zJ72+?y;jXPk#p|~ob!&b%(0k0wb$gnYBC)iuXLNHy(0e0V-s5$0LRCzyGw{GECgHz zB`WKg$1*iTOccc?`WYUqT}sK z8I8Y2iP=`bpVT&g-f&bO;y-LYoDK|WR;!4i9@Wv#GOJDMldZBh@L$gP0GE0ipv$28 z_V-e~vjiycqR5qrOP>6#e84RDI#^4h?Ha+o*!>e4cJLeWwX(<2c?#bjbR8^dvAO0C z$3*NNv7ZCEeLA2cmZyy|8Iyg-Ri-E}E*px}2`)Ai-g{BrBSn?sI)w5VsBi0fkdlrt zA-r~k+|@Nh;-?osYe;vbX9+o2DOJ^2eL#~uBNGg5bAgB6rsp0F)ksBAieY1tV=Ry!6^p8MD*R)Y~vrx__T_<9TDwDLU}h_=6*(5w%hsZ{8F2vZMu;j z#;boVSzPTI9*}cO&OPw-bR(o*)&dzCfHXwUAanN|t$1>UOjj2Dd?yxC-m;o7Thg$P>5yRGf1daoP9~w_sZ>Q^p!JixgCr3qW5hYeIi_3;( z?1n?B9$ca4p4e*Yp38eA!`07k$5IAo>h80O0_?tAvEB`j7GR(jy zlncd!`BLA0PZ}GU^yN*94t(7AY$bLBJNhwb7DKe2w|qv4~xm!7oL5pmN6?uygUG^aQ=Do@_btlq1bX zO=4iMI`@Tih|Q3|5xl3`5F#epp<09sQqwx8t_Pvmil6V#ykUI_&<3llv_jYRDrbD3 z_;`111;%`s{lm0kVvQDti7}!^HLyCbnRPBnxcU)~pXaA2&^?oE@d>or_ITVr`|MHH z!YxHw1)~T?YOGNh#s!cSh5b%ps?%(Ga5s?Yt+p;^r}Dx0Nw5D9>z+7l%zw|fm0r6q zY(f!4&1Em5_O$KGpw^JmUiyxnF_%fJO@c&6^y^Tx3jgvoqGyl}-)DWj)QeVD8iSf^ zTyIN7QY-qeSPy8(<}7m*1uRgPdTXS3lb1QaM{Ip1^)p8a9jH@^r<7F9iYQahr@41v zSutwP3F|irbSnoMA5cH5UN(KTuG>x~nYn#qf$weg%3oR#8!@q=?e74Q&=lXq+BsZB zA_>wGt&R6Zy0g6TLg|BSI_NRd+w`>X$5x97O5uy#$!%AR0|_DF9hRdu3Qk3*%;*PG zpFk(Nq%UORUdi9g|JY&5A38`iTY1U>cHj0Blw!X9`eWl%W!G$!e6L-i?)P z!U{FP*FdIUY~Cz)8sIIV{(_|ZDFqYJq{3KiNZop|xpqTojlO%JMlG#sq4h3%V>Qwq zS((Zy!fwZ?w08Y{0KIr`>^}#yBh&R3?`R%XEutu;pX7NFJ9+x|U*d1~2AEiLd6;#U z1w_hh?)|#qyH6Ugo@DhGftR3GJ~Z~(cNvbk51nYtz_4%?TE!2V4KcuQt}%q0Ls!Yb zhhk`&l>0b;$UxnmoQvr!epP)0Ilm6iX#}~Zu~6a3yFF~@+r1|oFAK9-GtE}uiujKs zNqndl?Sxx9hn4zkv) z;%^T2=tU{>QxUgKy}9GLpIZOGksw80xI5o zpaCteUK>u`Dz8?A+H%RAtkE{{ytX)*;3_Ic4z*!Pb=pvj{YFF& zXtuK9I0Y~IGYxm`13~@VVmU%6wio4;%kdoH-N|)^Q=rcivG=bboj+BLPqNRu9{p4tYLK z=31@PCfn&q#IBG$76(swhM6?Cia^GdS+RM{B9NnRBT%G4^)iPUZ&*jz-Mx4*)V(c3 zv1%$BOQTL}D)p`XpTdhtIhc-y(8-xl=gey?Sd6C3tjJD>``Tq;mCx%~q5K1uli>yY zZc9vGdB4CxR|ZQ7({o-OGwA;wFuE}0jCMh7`csZg%BJmmd|~(bQ`4t&C}iGhxP013 zxJaz$QvKHarEAi~I$qT<<9g&Hy|#UV&yFm$$HDQX?6clZpKRkKl9!EZQH;L(c2SqA z%oJ`-nGYSuygTb^nSEd3yna2xe7(@^Pp>{6UMj;D8@y)RKG1BH&tmrhA5kz;uVn#O zofv)@FOnQ@$1Z8AEpWN;Y+Y~kAIDq`UtmwuzA97HrI5d*pm`S_H_8UAM7%wd#EJv< z$Z!QI_I-`bI=$EsC(B&RZ+y=9>ou|>btIez*I%gIiT~;&qy+1`+jroeOuOv#b0)v? z@T=BYFr;$Ve*0|su+|QvU2YVNO2V%eODaT|zONOIC`kk*li6vd*%u<&Hi&pj1aZY9 zePX@gR{r!mK2%`0s2kgk@nFgKiwiyidgM)^x9fKnWyv=fTG~O;9Zg6B$B)Nfk=3(= z671dyx=Uu#QFg(t?3PP^=U%)TRzeIsn@)&>WkqPmU$ci{{h96d{wHtt2Aja&bk@jG zJnX0&E^1jbcOw|0+rrdLBN*}y0jsFbu11`@_v+Y555_GNDk*R&+D(U>h(FaF$0~Za zFDXmWO|uU=oV6g=1Jjiat+Fz*UleU=2}UCV0-TjwEl=&{V{uyZ}*P9pgC_d&HY>@~)k0bcfwBWF@sd6~1sxu-r;@dUCpfyUT3CGWS0g7Y$Up@(gX- zHy4s@`-lkp8q&Fdxe_r)j$s<(oatAJ%IG7kp10lt7fgvkw7<@-h_X>&vg~;ObK2&c zV29PTqcC|$1Ya`|-Ix86&+Tbs%;ptJ5w34s3KaLVV|o5)(p&2P?jR(`$)MM*F~F!X zh>|aRbe5P#JN`6$qoQHb`P}Dxk!ajEhGF^VA^s_1SO>HI+J4C+QdLkod3B|Q{lr~! z3H?B`4O!mGPFT_~h-e1BJ;qLGcfM%w=g#MB(exkNXq#|O@87_RiWQmC{3)}oisS*S$U~NUK68chwH@0KK3JUO2F)?sTdCT91@tT)byI#em4LlJW zY{j!sij5=S+Y)f*X+(RDTpI2|vCCOdB;C^#PU;Im_Zli^YO>y~Jxf8}xYv++?#|Ac zjU;Mx-#d9e#ZN~xhvczj@2Ua2>IYtvFAH!O zvI$HTZDTTm0=KAE(T1+eJFZ(TcFqiFDL!7wBtj33N{6vi$$UFLrK|NN9)%|FSA3Le zkN6o;k{wfY16S1btwBxtr%#`>HeTrl7Jq-}b){T$fQ=;+VTD0JPXwi+1E3^vNnhxI z3Yt`k9P=)-D#$yO&Ln&D@Qx{gRZ)IhVUo6OP47d6_Pc7D|=QDz`&m1}YY^e@|$79OsHn7EE(M3q3 zHk`DVAD!O^^Wns({K!TfunBgCDR#k%6&>svZ^^$2iV7Kane4rp?3a&1al~FY$L7JL z!#&CpLhNvBf6zTCjX{HBn}LMKrBX#Fm+;^itDLqsArxzk*AZEA7r4JUO~p%J;lwf0 z8YQ|B!_YQ5D-8Bz*F%(8Yk5*cPGqR1CTc7;GT|w|F9LwSvX>gI}$K}V1&-`O$+TRcg(4hXo z6q}^@Uk4YGE?rD|8|v&FVdp9>iRx~O9R3;DAVjvcf9H4J=F*wuoU0L-l=T!dUWCXe27?$fI3q zaSI;WuaE|;_Zr(=An47HhRfN56AXY0FF?iZjxdzMLLQss#N-%8La`-N9g#t7C`wV- z=HRx@h`8;^Fe3Ad5vsxlLGN?=F#ElU{FZUSMMM)=rJuYMFz~2fq2Z7V-VBJL!bKd* zAby41jhj^zNd_VEN(McSQgAh|o5OOE0YpOU7Xs|@+?152&Yj30eb|s-2!$RjjdPQXzd#?LC6@rpB1-UnD)L z0954a*GM0#Ud}@KZo%=~dpbz8;$>)+J)>=?>q?98{tMFtz%JC98eGl3dVuC@@gaOI zD;V6eu#vB@obaI^FftRo73y^KR<)K9r;D{eGS~xz$w4Wt76uj|A>!|Lw6~*(hyqbz z&_?M~Lrb>N!1h7(LZVZJf?g#W0WkYFzq|8(0C5o-gD#?mW|s(hWV5)f;KCw@{u}Y? zF7sqqF6)~9*cT6Js@++yBChQPWCy^HAyj$ZxyTcBgG34J{)KZV#J-|0LyClg*5=5M zpDp}rHXnSa5^#5v^9WGA=KxrvkU%X*?RmV!UXUsk732@=uvAZx#^Z=*hghI7yf5A# zp!Dl?@SGHQzQ>v60rCHg0ZULL?-pIiX0evhg%49p4CnuqR?D=Ydf9W%jh3>kU zIgrII6p%&0TREsXDSBHTyE$@D==l1RaW-b~9GEj~+~byuFA{@)kyx_Tv1B@ENpeg~atO*N?aLDOvC5gcolZyJ z(C`0Y8C;%H1+`7~#-HC=FRbCpfs?(}*eDWkOAw+Y0+b(u2D7fL>Th}4US)&wdTC)c z5I*<@d45nv#$%=jPl{5iVU~b}O|EhC?L}x$0Z+NepEojmQC>wbRuk-}HJgVQss^a= zR{#}Wc^V};?eDx|XXsuojj}eP1DBpcflK|2UPBk7R2QR_BVAoPt?j#lRJQQJ%^yk8 z3Pod@G;$lCR*sPusa^(lOAPIKEc{g&-yt#GH$cNb)ZJcIHnMFYSsYeRY7KN+V;pL$ z8Wm;&eFr>icrUP_>L^5?cL99?3+Mw>BoNyxr9up}y~n$YiCh_s{tTfezw>@N;VDiJ znlUUOs7r#K+m}~B0$-I~EYAELbanBx?CC#_mySsZ?4~_^CAxLg3ct7}>J&)+d~C_1 zkv0`fIWT###F3)plwZlk1Q|Y3wr&k&7z7Bo&YPI~et2OadY-JPm)c>|ZuKOK{`6Fi zk?W;b0F!X4;6vxf;c^2?Oa}14_>Rsr@RFEN6p&4cH96Sl=Lg3-Fzso)&$m>YJnWUZ zyBni|z{k&of>Q>gB!~6@W3EC$!Pb`yO0Hpr?m46s0Wh5}Bmx@cR;Wncxy4+LTESzO z{~H|+;*ehXhZ%dm0vBT*KP#((m&(=;i5vEmMp7@`a3+2$lL7j4-TUuvgUd0Y$|!;8 z)a&Qpyom18i|7=8VW4gU2+Kr?E)SqVM88kt9!Bu#Vuj@ZgAW#LA|?WtX1pvY-~Pcn zEUV4HA1^ifo5hidBm*vGKR~0XFW+5QBwYgo{8vzWX$6l7n9?9bSVm}(bY41Fd`Hkc zxU7&u18`Itq(>4NK4zh(Zn|JY19o!Ll{TeqjI;GvRj z1VE8~ibZ+<6k8ieVgiH2Z+DCU;5*)rpS&Z32El}AKqWx=PZE^mz?49}rj-nSwM;Gp zh3Tk^H3y(B&38L#E`XYoG&Yix=U=7IB9}(XM?v{AKujon%)YOz`BMdIdZ1f`2;cRf zcW})XpklzW#sk>Tfw3fR|`R20nCZS)XOp6=f?}> zRFHY0N}Uz;nr$n|Ly@WIO%16{!Zfg-qOc+G!LInF{l!vg+yBn0Y=!_PM?<3)C=C)J zDzC~e7O(Gh@#X%sg3DGP1#l-k{*t@ChQbG`*`BVL#vosm%~Z^Z=nCK|)l z7!A9O16IH{0dfoeM^Wyv8K7P6*Cgn7sr8yhJ`blBjfne;fjKj7IpR+NPbHu0LpgK% zwvlp2)Gv>tJUhWH;8ya}m&y3Zcin|pX4uL~!teIgZEvFgaK1`t9QFWJ9DM8$rwKkY z@bV^bQ0J{$!OM<>`IR${zXp6sTKw)R-~Zr}1D;PdVOU75;y3vO4)50|?jbB#|A`AV zShwz^ZwPoyCjmuGcL3f&pKlMiN1>=l=(AYRyQ7&Yq(VNj*N5}l-;{-&6Jz!y$#uYo zltSUg`%fRb5WzFwwBsj+Ij%G_94*v*ni;Aob6xs)9GSi-0`!<55uj~mK6l*?*N zR9r$Lsk`Js$5`<>vJEt|QhU6$o3HE#Gvm=2@vtiHWs2rvcRGOg!sG&vg&S7u^~$Pq zSWWkTKup25qN(R8-| z8TT$&aOL+bd@u7q#c0iZ?%t9pc+6j?i!_T|Ufi}=mcssvyQ(HVjQ>sJ$RJ;=+p~Yx zsa0Oldi8D+>Uvccira*I_6ya=_K`JraM1W{28m#fDkL*&zjPZ57`OD8@C0(WSZ**L z1p-HOc+n%@=Ul=k0GFcy@aqH|235qBZ5+zA7e}ZxtGoHX*?;n~iU<|^H_$NJ`g?l_ zF4Y0D=zk^tXPdo+`FJsmvbp@`LQ9NA|IH>$u$BG}QZ4+>j)c{j0ft-NSQqp+(kwC# z5RqB?zXp`i_Q{XbwCMqO4?-|@nZU+St`UQWj;!O@H2`;u3UC|0n)N620}OG9{VlO0 z@FgM9v`);@FS>I2VsZ`OmwJBycc_=Pi)h*VbS)UP3X%YP)6ySIz)!PYZ7$pzN?suv zu0jLz1l$w}58ayHKv{)vpMc$}uanr_sJlXi04FiL9XU+q#ceT9^QMIweo;+W_ctK-xyNJ+}0+GzqAf zk^E0+FUqOk?EmK-5Slf2^H@rY@6Eu!@!Xi?##;TfE2vkmL|@5CimCmfc*ls#Q|b-e zycb3L8`FlrdYp7OSX-n-tDK?7V7KbEiMD#o<-3aq3n)C4`i*W_TAHSlxM7IA(cp3gWH}l3Y#t&`2Jne_;bH9f!W`s!|rzY&t?+8{P*hn zE2UqR+(C4+g4F=;-v?d>wp5vXTORkThhmZJ0$2a=yqD7dh7}OhMm7scOhw@y2=4r? z=4j}5CY4{!qE((ncA267g%w&yJB{5`8klJdw<6;Pa&chnBWcqlXn-|adzpN<3?4Q? z<%xC^c~R8ziL%M8`ul1G7RanGI%xZsPX~M>Xd)oqwhY6?@ekUz(Xy`$907aomtxv4 z>Estz4Omyz8Td67KI)h6DXKzI?!AkZTIW`@)bJn*JhEJz+U)G?*E|kOSsfHw@V;03 z>U&EKRc3u(#e|IICA2BdaWSQg`o+qGp8T8*waxrM_zkUr7 zx~5mp;Xt&l!*3oGUfv%-oX$mdoz8FcfW>$~m7aiH?2Zn8XE)sf`%*^D;eqaa#cu-% z^u~{}@@IH2UjsO6*w8m*f{6qd+|0vt<l;*E1WbH=YMjjh?(fUslDNmo;4b4Y3SX{0a?BS%- z`Q4|)m4Hi%s1!u*bhPkpZ>}OYPdU>7P?T}Wb2r{DMZM_pxEb)Oz_eVI0n72tZr`$y z-}zP)K=Y`0PiWWJBsCRT`Ue1)GLuY+u<8AO^Xp~D!2W8qH(FKuU=w@G<|g278=pYf zuYRe?QyCZSfBsnew9WTb8LaQT{;7HmJ`z`@&Y^{>%~pRAg@n9r@wK13@sghXf|pc} zw{tjbpHuc5Jz^@)%2; zy6Q%EuJ6LXwO)=9s<#gp!sQxwa{eR1_e_ifj>MRx_?6c>| zGYkQ{MQQMIkT464`oLCCjj(owMG-Nto4EeWA8mt{MfEAiL zOSHhRr0EhSaAhagr1535C#wEdi;wR85IuDFSjZ{gy+x-3pUWW+pYwTfo@~6@XWS8{ zn0<}fhqO-mH~sy|BoF43wI}MatVjC_rNpUJ{lmKKVqk95{=3uEZWbZrvdRkReF17b zPkUd_lz>Gz%uvLIsUDq?3W5R?{>X+J-8a$)>4Q-E3Y+-EJ+l2myf^R$CecwTXPOH0 z8u+M)8y`iHw}u%>bTt6EfJ3TC81GyW7Cz=XYipJ&WluwSTJZiTbOtvYTTG(^AO6*hxhWD+{Cl5tKf`Ja!9#F-5gd5vK>dbMv$Qy|HkH#go(ozlAkk^F2hDo*Z%dwH zng-bXZpsK*Y6^vkW}S(g!k;4ab)QJN%7;Y)I|sNJr%3KoaYIO|84cN6V4~o>hY1th1~{bTuqQ_!NdE z51G~fZZjkv!!Zu&B_3pk57fdL~?z(hgCnjPTFTQ5&-+i1GC>b4g3}5D+d6| z7G6^D82$+KAs({lS=3_r1*ih9R-o~yBzZY-0ISfY#QVE!Sz&Q(z>7B9*1>R8nFSPpZ@U86+I>2=5R~{KoSQT$tc;SSkx^3FE5z>tV55WfWv0x z46_&TjG4Vo|N8KZC@){m-d*AZiM4jngu>dO0HzgclwD!7R$}WOt;p8TwXtbr)UcWH z<&ZhK<+zZO|Ejo#emJd`j8JJ@>%2Q;otKQclBsz{9VBj!rsqx?WF-Zw`!4@_LVa92 zgO?8=u36KbOk;E>jQhXwv#8%bd4X)-uziq?S4&bJNi>8nE|bZK%6Wd_y7aXPa3ve31H`RJzOOJ3-wP9egER~XVGzSZ@AK! zKw)QTmIAw#=avd8QqzU&R+t`fg>7V-MBjJyoxXmZHg{NbvHmpwG6R?6^W}&!MB*)67l7FWBvxn8rk3+xq12UR08L<>o{)_IpIGQeCBiBt~*he7oxUPl8M@csJ0UlYwawknrHK zaiG{WEL(|SOv;HrNx(vVXcX}{JpU0#w0f%%*rK0_y!0rZvw;!s43}H^ zCd1B9KoNOobh{i+OGUc7`w%zwOC~^6)O|4P|B02ubGh8&X_&z>qw-wuq2Me|@4o(- zuhHe;dbQ9>K3ZXk*SP~{Q`YYiEI8BB+~~fzS&H>~Ewu7`a zX8ZMZF@AvW^-a12o`FVWNkjQZh2BewaUWSaw$uB|(bN;OA(@;{NS*eKk;RIZd=y?( zw{%{vpSQ4fdzRpfW7-L>;N>oaemX9Cx5@6$WM_9nHZaRc#O`9x)@fHJj;v^vlz(_-{M%@Zy>Dr)x$eqrX+OlzUV3Sj%#V!9gGKna z^Crho>;u+y3t>N`*j4;l<~f2c80eXDphDAmlUT~Q%(+MmY~0^ZqPK=if2TOFX;lAf zCjdNkmCp;GkTw*`l*$IzgkuT{VsL8O>gQ{{zfbp9{;sgT>g!-J;jUseF^Ol?#8UY3 zYH;-s&Ty}IKVmtLV37LZ!kq@30uJDkEn#%1lfj~}l_$6P*T0@QU))1sMBB4({9E?T z91_abi2#) z++#)bX4Y;__qAnV!GK!Xssbho{uK`6@_SW_vcjF$rRC}RqEaIC!7-w>L)P0*&x-l3 zMf(YD8|S`&yyiE*&&Jeb1BHi<#I=I5E)I7l%`aE zN=hIysa+waFxcMNipcFER(|KOQti3D9B^bdRbHCV{kjgXLhoIiSR_)FQLx0SZ|kJd^n3KQrS6`;*?bYC`2MLA(O!P7KwM<345c z4L2q>skAkZHOVy(C&w?aKU58;NgwVDJ4Ikl&KI`PQ=;eGJN_%Ei zO1dQCm%2jxiGO4wT%lr$Cp24+aY(M><>7lkH62miU`sy&7vOgs@@^@JHExWGe)j3e zc_MeUH-TBfTrR!p2ea1xTooM7xG5b=f_#}LMlTZGd)-2A+Wki7E-hmzVmf#2FeERh zC9Z8|kVU5d--(h` zZqX0y4ZdHn!oU&U3cP2agH%z9fq8_VzKZ7M(+9b%LyG(R`IYI+{r(W|zge(Wnu9&7 z%l)NwQm)G#C3IbHe*(W{{BD*G?E@CH7S4cnea84^Q%W5}^!r0qQioPXEdA?bJh3*~z^TgpftRCZmc#X%IFXrK5AKS&8rT7Nx8 z2V38dlM6v!tlNVVUk?8j`1z{Udp$e`JxOtKi9D+hC%wZcBLtE21>nqx1w=UF0c*M+ z7<&^90)+=$==Lti&G7 zU1Z;^DMC8`@!rGH=4`{+Fu}5<_?)tb&cy$R>C8r>=wH44zt0#mvEaZ?HRZbg4UXHj z$qawzsLCEkbeffxkj;3h?fTzZu$6%G>tt4;+(+~7Z8fynb5wFCl0QgJFIiERZk<)U zzi}XNu-oBxD&<4wu7!fp)fV*q#AUBG%#aaPse&j&`zFG}zvRNWxc8bR`B0mak;W2Y z{BgR(nyi&df01WwzJnxN_LDgatf1X-M@=}<-sms1c<$liG>Yt>!#Y{91HUslz2v+r zOl-I3rI+76Mf@dRsA+R*up1O|pD8g^{_t6D@q3MO#$!?9_|C9@BWmp0*eWxbqt=f2 zx(&Lw$%yVv4RWQ$*_)*5{UZB;oH@VMYrWp$ZcC^%{^5|(jDTchd_V3GTy_%$+YCq6 z2-l(cs3(Q#9;k~Gm_@()`?%E^f=pb?Ovra(9;?OqS~roWJCJxaT4mXK0(Rkeb;lUy zcjw#5tJO${mL`R(AZSWwBNX;H+9(`>@Z-gMxsHwjE@6B@01r9c9f&OefZwWMu6c>$ zNc6Hn3FzLz?|EE|Q?oab_m#q5BG0mSL2YV5ipdGv)ohTyEIDxe4f#f?xY1dZJ_NI? zP%(+4fxqsC3{T6b#4#u$(;#jadNkq+d-3FcS&nbuh}HmIQ>--VTQ7fMk3y;3A!3TSyG!8=&%xW%&1{TVKAS3npI9rL*T|}_ z-sUYIaZ~@fZCQY?$ChVco{QupN`F>9j-H%;+Le8W!r^XZe?=YQI{eY;@3{h#Wjmi9 zLuYiv^;>8*YEM@Cq-eUo+_yud(KJjudG+R@ONntEi8W4fD6DGN+mssvykn(!_1vE* z365;q-(-mjmz!p&>QR!IPCriCbw}~JQ{QC{8ZSZj+B{9cpuNtnpAH#@)@iJ+VSM|0 z#K4-~=AhN!!?i<(y%9oS@b6X%GkHf^;xK1zl4HuYc!@pB$P-eHEF->+dH{9 zSq;w{hZW-1INiKtG0Z;?bLHv`cUNF#fou@z`?PezKfL0netoIEIiZaQJ13_&w`)c~6{SelBwG%k^C3y!GFs136^# zK2yXgg!g%@-{@o$w9(14Pph?IC()WJ(U!M}Ibo#vyp7~x72@$&@*|u@bzdsqDf)Xd z+|NFo zU3*ehI~4CIySO+Rqt=o1*7x8fnXQbWiY3oN&d>TtrH3u>qw)=W3|NUF@{QF+WFs`B z9H!1_$A{qoyb4JG<{xYR=pN(q)*#ZLt=s!zjF4CMr)9@qehD3nju&Igh#%A!SKkAI)QG|7Z1{-{*i8yC@-xG*TBvUXy~+>gv7axF z3Ir!j@u3)PP4NRAYem;{WSy>||2<|~-qQ-s~O z^I>WjJ>fCYiRv8fZ#$hv@(zxU@#5{LjQAVwjJ~n4yXNpjmjBD4EgbV+|WHf%w{xM{l)CSH(Q6Oy`6%gby%ML=gH{={GLn9}z;WjdbRq6qH` z(Npoq!AAC1c)bhQgF~Y;1$HXT++Ky>c_Ijccd_d=!ow<7%n;0(aPn$HhP|9*T71T> zIyq*zM}O4J#6Zg$vIpjgp3OJen$|?A?xu5FCh><+K0YUrxR~8g*4(iv?6?gO5+7&Q zkfS0po7d!!-uiP~vrJoh>5ZGt++9`p_J$7#pm`OR1bzIg)0EcU&Ze7qb({Vw7>*TZ zs9c;>;8C$V*iz}xv7Uiv_MM%mZ?`FX@0;JrL_84H-~Utm=a`ENvfFfo4-q^?^gpRu{T&z5j#X9efxvUP@t$4x{QPrJsVfOA#oMP$$}+ z=BcJ0`ptfd46m}g!r^7!1tvSa# zHZOlu?A&dl={+cp13SauzP0U!%tY;ajv-84S-ZM>R*-adW9VtdI>8;1i8hofw z{eZ!Ygat#I!-qbCLCkNg#VMZsr-ax^8O7_3VCHuk`yYur3cp9vw=Ekvm}lxL>1)pr zR?43O5w;fYRUxjo3A0I{j`~7~vNLLK2mQ&`ngyb@6ew<_ z*tGY@`IJUrNp6=%1pfz*d##=7qffd!%w|GTTH z9{%Btt{eXfkZ^cg-Km{m>YzWg5ZURk-kAFDL}%-3i(qcfzws|w%G z+zIjl8?Qhy;`fN&+t1=rr8LGJxAEa?Aoocr{oNAnK+{cehEVg}lH_le+>3w!@#o!L z?Az*xAJ`L>+;4Y2#)SY!hpqC8@9F1%aWG3UGO)FlmAq;K`|5+hd(O>484wC}^8n4| zeii?8yV2n)q>tJEq&>0cMjDD8JoP7m=^zZ(wsMXJU zmfC%M_J4Si*7rG6yMe>&H7r}DNntfiX?LvqaJE=}Ko+PnGGjHS+Yl?Q%`8j#mk94< zEg6k9Gy7*xX$Qj@{{S=Yu>7nG;>o2r)hbS`4~H=A zJ6v}Bon%WP!MmbaPNAT9Jr>cN#q3IRal2d&NgKrGlh8@wVqr#Z=67vlDmH0vUz@<5 zEON+X&AC#K#CGbDUS!dHS&uXu`YKJQ>-GIE_T7B(tH<+22!rN`bntJwT{eSv%wq9* z%QUkEsyb(^Lu|z{!9{o_%{@0IJ9G7xaa9ygr8K1#+NX~_ab$PO_JbJ=5^b%+o0F?I z{4ib=yw<5UBr$wY7Aif*Oa6VQ1<)sK$*e>P!s+kq!jMY!*7tg8`S;P z{H;&VLUD~ZTY(zXeUM^at5V}8Vc>{}uv7lGd$oQ$zDFO(1EkMQVzS4fOD3*|nE zSKA}0-F+~19^hy}V|JHSFUj|Yv_OivrA@6!XO0$XnN;sogBc&sSK(x=7dl%bnGmi1 zjM{hRZ4qw0--9^>EV==C9VA3O`6c#is*1YFM!mv`6*_NgTc7_Hv30Wis;SaS{&(A~ zxQO=doXc8em={4V{N34Dz)=!PhtMj7I&xdUxv@s14Tf4&H}YSI&n~CliM1Fpd;Raw zEak}D@QU_{S&hrM^T<659~0k{S;;m&*m4F$jam?6OvG+A04WE*K$e7+Y{6C^!H&sz z!@~DLY{Y-vzN|yu(93fr-yYcnpS_elt**k!!v)O7E3c%(7v2pJ?p`bDts~Q897A{K zX-rX~5$~QVw+_&8YQE7!*^AZd+sF{;(hdkO$2on>PKvZoYb_Pm`6n#M#W1$fi}KK* z2~NRmObHVMK3bG;B?dSNl$2;>M}~@*xrC)`9gz-i?boE?F%43k?=`;*r=b&^J-&=l+FMKVZTTY1*g-mP`bA9CLr)ShtR<*zG`1!YGh#tbMYoO54a zg`H*Rw#uL7+^K1Fq0d}AfI~Yx{81#FvS}g&Ow(H32>1aK$_?y;Q~ZC9$3IhJcK(Vu z4e|E&^Wa76>E7+_PD&GRMop&ZEGc=us|;$|4Ww{Dm-~2?Qic(4MCCk1h$+gh62&``O~vLZzP|BL%6xP)EI2CeS&=l zu%6OXKM+Zd1~EyLSXaQemHJm1MfmR}gbm4gj27u`C|KWl4ZP;NzhJ15RVy)2X3#2( zqLzhxempDQH*EVRcG=2LQCq({Q!eNYMAa z2uU^ZL0b`ZeUL+CLX*ZtWRp}Isk9>@|085g(NrFrmYnE9s(PG_vW~`1W*S3gdJC!Y zyNsq6AEI0ywZyR7io1H)XwUYnGDOk+DO@T3+A)^vI2Ctj?h=lMSCZ+;X{95ld_RCk)m#J?%PwClK+mp6YL*wvoLZQ~F{!MwM~wjey(MZQ+02MSyf+T%pzb8QXH4@&C{;K(KA;FUHF z8yK|Ffp|k+;(6Z}-?{gZn8f8hGxCTy`qswuUP>SEcbKx=RC3!K;eJvWIlh8>EQ(!s|I2I&RR=xseDM zGKf7}2GaIHh|5OfAz?zCFDBSnIrPGED{#Gn5`q#JQKlDWjpdgwBQ_WJk}Ldu+;r|Mv7P8=xyY#nqbhdfxHA&J&-1OG8*XJn%u!S6px1@+JDWPMYNgmlA|qSt~i? z-zlEVlKIN>(}WUAJ+`>06%>L_txwg0o<*!?(FsgZ4_aZNf)~L!bpCQQuFgw&NK#E9 zI@3Dp=#;zU4KLW@1Sb{k6oT*%-sAkhp)$N&u%EH>M`yADZ+g>Kt)|YtQWhOI=p_@| zlrbl&5`DRYhbmeOF0V`_*K6J^Wrg`V$oKt+c+*xn*@O1=&l|2;N#kXSEVQop{w2qC zhk|+x`NJAz^-xkA+&Zx&0wOIS-QA*er*uezEGi)&4bm(Pl2QVaDoBHrba#h@l%O=y zA+_v2v+(nNp8LJt%Re3VJ9FmroXIj;Z`)sKa_JV z>|WC$sun<9w=x(7o%l&WzM^w{Z^-LV{nSfmqWiG%Oyx+8A_P+P+GMfb!*Ul2p2LQc zwRftunAxWZR4y|9INk+dk%5 zF4SjMB`9~`=TIes!HLXA*y``{U&Y3YUX`hwS%Tv~t{m)%Na}5qqZ67J(mo_idPa`& z{vlw@wy$ih@c1?OJXBP!Ni+1eDr;nQ`vVK3$0fw2zD=}Z089PQs(;Dxsw>dC2aHjw zERfx1Ul{V(SeSU#;S*zfm%tGNZF8J48JT9(@0sSZuOsRMBVctoPmsWg<(}c5d>2KC zK^QUW)xr}o7!9aW%3sGHY4EJP|1)k4|6cu_qXsRg0xdS?zNh1ZSQ-Pi>b=7Nila#;VMb_qaQ!dEb!pr#Y_SJWbjH z$7it@#6fs^te1gW8Ql-1VP^^-B=};r@}4IwC98BRgI*!ZPQ~AyKuDb1Zfwlk(N(E8 z|6AgWWsNZ6Dc2u?J*~W@QkAtR%$}n-(nf>GZn7TGV}_IyB7twQpLbYG-d~32A2B128V@xrhGlMT9i?nlZ zsC1w80(?<$aPX72gwLl8$|q~gY)-+b@$o9_r;*s>ooKnjT?iyN0G?jgPTaUC+QU`f zK7!w|J)UIieAS&5u`D&V{RChC5%g_$XxHJdJreQ*!=0o$*VTG3KkHc^2=JuJUum?K z4?`Ml-X?uu3QV&Bsiv~sjhw5a5r#<`kBhY<-`7gX!!3RtisJRH1JdyID`6&J#6ag8 z4>6M_dPHRSgL8$7Cvazn0H(KVe1lT#HSJ5JLUPoE#~dnt$5cKP{fj?k9JzM= ziEKgSqz*W6AjtqxL8O$(8VBYGP8bhWPzfRnTA67L)vECQix!qyp!&OZ)eUB~ZXv>}6G1&q9Zoy0S3ylh}K@LrAWHHTjDhKB6^Kr1Z?zbW9*Fh4VQ)i7zA0dcn4W{t$C=P38o2h@Yo@1 zB%;Vl=pq`wKQwl_smgvKp@A$?RdO{HB*4T4m6462BTkEz;qGbohNqa-t2(tRw!MNm z3eh|(`@yq*j!ddd8`d2~HFO_*)XoQUO?&zCb>14zM)Qn}p|%m!WRV;$ zwN0y*R!FU(>FbapV}?N8;;W7OFFab>yvcNSp`1vQ1Bv{AcGCyc@- zVh_9$tzWv_=M;w>?TI??@ACu-Rq`aWGA8sK$9>RM!5G*VRtLqO^T>LA3Mtp8#9(Y? zG$=e*Tv}x|tms|%*ifLPw~PjEjvI`Rr3Nstk93v`R#Lt^{%MG^bI zWJ4SR(Xq_IyH@k8+TQ_Crb$C{3t5Z`imA;3{a(1gX>j|)oXq_!9?TzA0)^KvYWQC- z{KqcWU!ceLM{dPWyP)_4zx#A|Apwc10p+K<*i4XK4rC47z0C*HBeL%*ktMO|m8C)3 zpn?2jS~PVeO&{slkNjxzcCQ1C5L7cnbSo@}w*FoiezXO9sS71_xrS1P`y-I-FjEI= zs&AbEV4c>EqueAGQFn4_iEVGu>Rs<_CcXbk_2pXK@qg4_y&Hu4R3dd;mu|?5axiPX z`Mb2_ULU|85oaF!?s~fW?#f39b=vo^#Xn6OHyl~zErp(rqzBf!`Se`HSZZfD%oEH* z(O?7xm-eL@lXZZoSsbtUc)->H!O)R~=$WZX?6wQxe*l!q;%xs{#3L}-b2aiw7>OV7dx;>Y?YMd0!ku*d>Kov=$%?DU;mb^@RXD z5WwDKU9zO~8wozIciq%(qhWkDd%yp~-`p3T^E#qM|8W!j5(IO51`#=ERI6nFv-z`# zX^Wq4Zj~BHgFB_`w;v>2hMqa|$>u^s4Gh3; z0KQGGFNq_W<%3y998%q22{rzAZfPXO^72c1|LlVoo`1Iu$sWCWH{ax~KdA%!Y3sQv z=XjI(VDHV&Q#|4KJ9Gp?7x<;1ut4FExrqfQ+3!sOY2xEhou5M2F%K zGpHd-H*70~o_Wn2iXSzyB%Id<$x?Zr2l_*vHU@!DyE|HP+$>wl0vy%l^E1CV8Z{KO zd`bqHy?k=#^}+mN>!Z~^!XR?P#2YRMms+CR|NhqHp$l9=7`j2Zx00K5b=?+P^LofEe>fT22ZbGTTeBM0(ih`Q^oJh0{>*G+7G zwuP*DcO#*`MtmR-6p)8${v=u3wCLiK)Qrb=^3m2+o%^1tQo2yN*yVO*XP**!cFrUn z>4!(wzs=b!T}Z(ujB(L5S?;%P4E3aN{kJPKq(=C8 zzB65xl`iI~4aU@S0FCD2uv)AJ$lHSv@b7{Kfj8npCCP4^(fRuF?+%gylPGLI1FUHx zd6KDP#iPpWGWzT7SfWqa&+@oPEWR%PMhrAxs`ihm665Lgko7nC{uJg0tWOPXF(~mE z;}D)GI{&nYG!nny{iPX-fFBO1P5UxlFgQ zNuoSMilzS-jr^^UG{V(qw`FL<(&GR%{`$h&j2gwo)&uVcX_T;jUW0(H32!2Awx0R& zaI;;_d42cR>Z~Am&0(_xSG{MWi<;H)ydxCUsS_bf>jCgcaH)ra;ua|H#F?03zEKG1 z6yO@ivJRw!?(_j*DJQ_-B+7)`m2aVc{|Ib?KYI!5W+6UmEK*E_t8}Jj+tvXbFfbzm zV4}3$ZFN1hL>`sd7~apl9Z6N2a*faeqCwk@31IVOs#!Ir0()Ygwq#&jtabo4M7CUi zs~`}Z9qYv_FlZ1I%14quk&Qel2O@rP|Ld(Ak;gX&S0(Sz9(Iq6Uh?WV!Dqh>L%Y`T z*M@Gp_FdqFNMPQq16Y0Jb-!#ZD6Vd^98MQL1b$M{L~-hOR@+(n+b%y=elz_$0R0jc zx}TVrg5}^Mt$x{p-Xus+83QyJ5EVk`Og5vceA|%Jx&LK{!FG@cO1a+UW~;dHzP4oN-mYJ=uDy(QQEAFe@^N7f*~77i``AE{DBaxzXE@GE%GD-a%Papo6C?5h z#m73h1V|;RMHqpVOaND{_{?AU|02+lt^^Xg4FI?5@|wm|yEB!FUdJ1B04=Ehs>(r6 zzP_f2sX&|gBo!lx>Yx3?FUHJK%1|wAxd>!beE`9wxqRk_xN@Iq@~+X=mN=bvk;cM) zF)87BD7S>0%4hre6uA*R!T0}!W(kSZ`=sExKAhoverQnuW>cF3#@Rv}q(h?SZ}cEQ z+92_nQ*~I%1G5--)V=$^I#$mPmhXndcO5Aw>pf-!-CUY4J1eaI@nPVD0LfAW+xK<5 zcQmn?daQ};y2^$#chW#dPleILA0QSFG)~}mjN4b8#D;~r-ohwBWOhf`Vc@N zxRgS)vZ43l0IKuHNT*H+(iuemr=8&;QoKhk*-xkZj)nvu+D>tv1h;GF+5RCvw5e-{ zpLAB#{Lg0Y2MmeE_U2&=8kwL<>XCe-qoY%{rr>mZWw8^_q{0Y1gtmv<4`RU!LpOc} z+P@E!he~}tk~CgYjG<(cgzX32qZTtj4jT7Po7Mmfw)|P@pP7IXU@E{#AN9|IHVOT$ z0k>8`RJ?^%0#hc~oT#X0tYCf3_HQr114MR)l}&wKUa?TVp0efoQ6_}(5u;8-ljb1w z4#7~SfZeRl?qUlM=+A*cd!FSY#0P-1H@-7Okv8Q#YiB-+{IT-iow>ZJT2sGsi@7;z zs2ZH~I2Hx#5>YB(8{r$CNWbOx;}-Z0CQ#cYsPh*a2oM&~N;_ zUdOMHp~t-kyi&@^+;-lg`DJ~GK(7m7j~iNIYN7{*Ai8`<=ut(&kemJx^ik*0n2E$4T9l%P!i9;*5OxM31eQoM(fbDO{|uSu7Ln)bivqWGFV{bo?> zSpq^n1b^?w5&_YMT<`B-ku9QeJ%L#*Uc&bx|MrlTi)CWDtg+G@a}op3Nh&A0i!1dLkw+g-5s>e zdk6x^vRKdU9Qm!Q-zT9>^JZadx)u}fNaFd8y{ffr0l@j8-Rz^fH#;Aay?>vtWqPiW zO=2!y{w-%lRK{XIed{ktXXXdJc-k_fPmh8=I06zfR#|V&enY=KiN_071aB^4)Q$9L z@Nob0tIgOetE?aj)T1L65NcS_jh;hV*=+HEECKf~0rY7sVEHWnL>}27cx51=t5PE( zXx577Oc^MoKTD;L&x;%oUgnpbZ$+f~@@d!^i8KwX{k06?;r#|s?ucfUCR}P++ zg-QL^A91}F@k%JrO!j82-``uQh^`|spP$yKk?;36e9rctPnwm>)aksQ9VC0|op$e50E5L@rPeGB`(z zY!?8a^GN{o06J$NjZRZcU4ez2(yuRDux9c@$(Ae`Tz|}9S^vEeN5_4TMMC}r5y=K@ zgv4+NBS_a!1#WdIxBmh2Oc2cndT|Q7?Y(yn>;6fE9}Y13oVL#&KShQr`M0firoMf- zw>K^K;;B|-kLy6p)s}UMcDY<|-rqgZ(@wG%ID}ryYguF8(wp270iZv+=g)$oM8b| z8iliWCV-~ZB>8!($#{l1 zXH}5K%qk0w#THUy9cw-iFMW4qz&g>O>UC3M*^D8wt7d_-PJ-zz=H3p)*yziPl{B2i z0v{*sBz*Y*6{w zgxVAz)foMC7Q`1i++HRx>EeNhtjq`Qa$IvsjFvAVjUOuF5bdSE)IdGLmw5&13;3+C z++*Aok77C|tiISTmT#FZN+(R5q*f-kugZ(LH7X)8Hn|KEBBhHoqWbkM;_6KuymuPE zzndn_)k`9^fU}6_sP@5}L4Tc8L5Ix4_5+aPx_sfDu|Ej{z>I(&L zy{nJz*yY(}L2%H|vlQAzd!&GB@3ofLh(h(ZGlVuVgW9p9X{+R`7;@CPQ482E8ZY=IocyM7Fp^(aJqJjvqtOcu0g! z)VwX%-5>8W!c-2;m2x0uB!L;k309nfprDi09O3}`)<2!|{Ba+6zsTmiUkTd2CPuPw zMqX`xfAU>n(!6_~C(+{Rl?ZXJX4`z>-Nc2<4voVX8Z` zNh)l4mJ1ZH`3C#(24KEI-xhDVpiV5PE=-Ek<`FMTgwv?OP1NCUOp)y_D};`&vY4KV zB$4tBG&G&WnLo)g0>$iXXQe%rv3r-_!}OKvs|s8zxAKXrf4tUgP!J{qXv`$Pg^D(e ztd9++)WHML)7@K1etXs;Bth>SHo~TUH2Bs(jyvAe9_ZXu$+fG&W3-4c*G8N~{Mywoo2y>T)AWAKOGR{--P6br9p@6*v_9vMLnwU zytw_JeE4Tsi)^+$rj4wichysphbcC%hD57_(`5J58u__=p-Wb|awVp9iT$_(1tKI& z>?^G?o}u&`)ms+1en)WjX9z*RLR~Cz+e0bq`RqbvYW~KC)-8vQ_rA()_qEF^qNonO zI9K0e+C7VFVl-{6t#Cl_ifs>`+Qug<5baudfl#R!35(|h}s^O+r{G#B=Yil-SDH_r_uFYW@~ zj8UFRjts9{M0^dLjF`>(%lVB}tt4VbzyJPIy|O*%ajGi$LeB;{ra5S^n;2NTYfEfC zV@30%J$OuCyEH{on|!jQc*}uA`x$6tUdB1PXZn*_iIC;t>`5+>$0BQ(!8ekSolv_l zWWrQ|a}Id37uxz6TPk1KZ2JHrIQ_zmHvz(cKXb`n7!Y^u^LUd7^dgnE#^=3SIJi!) z9IqqOY~!hTB#P83v!_*_w&K)E=QA@<@pL~=AECvGw}7}^VbSRdK8S$z;~av%ExF#O zyUJQ=#0ML9HauKoSXjR8zH;v(9w63#cD7nmx%H!d(-u^=W1ZOOzxxz&Sm=Gxl1g+r zfS4M2H+rG#8XqB=#_w(E=DS|H{2)O7?rNND14(av(lU-b7s=+w-VixGJ&Pr%Tq_1X z(8#spx!WWvuAO5(w!P-nPN*YL#80l3MdOBC)HAy5Rp605;p?`;O{fzj%jlgqcOEyK%QLZD^!6&$_^|d~ zY0kD>$?QSLcg23mJdO92-8Acz(}*-jc&_twi{*fmA`a=}F{6ID5~GY#78~932G=>m z;j6X-bzO&+204A}m7=YV^$cMd_-V54NHwI^@uO;{m?8XcO-o-g#LwD%usUcQGCcV+ zYh~n25(bGMahUxU+fteX?EKJp{6<%5?!poZ*+W)NI9L`i{UUMs#DWj7rklN5~DP^#>p{=> zHF>%AQEL-vzs7fzrCmHPve!4coa$`Q(-Z#=yXw*%QGVlnHT|;N-q(8qQ>lbm-`zC7 zsbD>~$ZoZ8LZ}#Y>AqNa~S3mPseqxxu z!=UluZRr2I6oeNeT;D$Vr*HOAad8*o7vk>eofzh5spf<4YN7{Fd)7E+RkZVA2n>=k z(UfFYi6~mo-oZ$ytsp^!A&PvPkA!E?52Lf}jiM1QvTq+fC+u}cUQ^C)$?G{-)x~gueKJX!h2B!sk&m8l04et-w%`Y7vl!}B>BLj;KH{_&lav8FNhg2jA(Ti8XwOD{2B_npPz_no-__ahrDDg#gB`{j@thWSH8+y zJjl*Bjx0(FgZQ3QCn%Okyq^1F5e>VL1;f&g!Tm03*LAB)NM(}*$gDejMjKJk9U;oO~zNq6jUb5(w^n#6{xM0&?J@3Q4G@Qp-&x^kQ5q-QdvZcE| z38B=Lv^>20t}e#vV!N`i61s^u;2kLuXIomwENVTMI}k%-n=)}{$besv?HldOHgFAr zVp*&MK4>A`BPL_Z2xqP55EJyf87*fmwK{!}vS@hO8{7E$|1O~vc?ox;v+9gu5Cifd zzD@+!vhrJ-3EK4SO!6g*ZI;&-+xapLB8ACk>J%?wIpzn;<;;qph8X_{*51BsVWxhO z#P&?f_)N`ykOj~R2rgK*jeOE? zo#qBicYQuIdeO3Mpir#o)3*wDL5*p|Pw^sV!b$YkCu%3CC-bW@RI^hamOEEt&SGkL&P3grr1BJ!@{l>oEB`*q^HJTt*Ia@talc#^m)fU*=zm-^> z;yRx@Xe1lrwQve93M*!UE_mC)JA6HqFHKzHB@81GL~RJN=^ac~G;lE*FbF!Dro7V4 zwZxBxzMm05=u-s$4N@`$OFIyg5}w4r6=Ar`hSkSNjUjME7)`Kr+7E%7oMSn@&lnJK>=uW-a5pJ=o-o zL-Wq!Mak>=TBXwZzqsp1A2D`!HZRI{zhw)Y$f7?+Ev_NM6JtZ9pV#L1J-uRz5&6;` z5n=jlwO_?=n=ym;V0_ypRhLF{rWeucAL;bOtu(Vv~q8hU*=f+MaH zTjlCBm78uW;dIX5X-1NL@BDp}2`S2aS>(3>GWdHGGf_Du*Xk^4`xQDdS%A+n3!LX6 zR(?J@Qkgwtgli=5?oA3H^01gKEV$r!h`bSN)9*^Oi)U{UD(mNF#zjXNE%j7ql4|XZ z`UzKAS{%0E355+jC>4D+OX$W~o#&5D3_2~lkeL-TbWzM<_M6zz@1b;ejGlWc@Y;Zx z-@Zk)JN+-mxs@=&F$F;_u)%QIxFdCi9H>U0F2k#Ah@2kXmoUOv9e@MS;P z8ksgf+E}NF5m}`t^W6>@-iddp-$6ZJ<8-AXe>No!6i+T}xjn#r%?hn6){Wo^Ty*XLYNi3P(96*^WruXnR z!Y}?jw4WCOle=C^P{!?+Nnp`fchTvEBIPy2BT%dA0I43&B$roJeEma)AvhR_Jf)O# zn;=Ayn);=*8+@kz7h)g!EwiDM>Rx~hl2;ydClXR44_!)pv$haXlx&z9x6D>JCS%g% zRSi2#Zb{^6K5H*H^TybwThDaL$;VQk`!@a4gf$3POm_DSir~mg_wdNrpOo7ohSo0_ zp5)*b8Z+(9muDQ^zEpHrrwDawi_z2Y!`OyJfguP<7J@8-$AFqr_Qx60yE-ixfp?lq8pd>8cH}g804i zz|Sfg{tP;C^=`bxb?rvUfQUbZ9WG|^sC{_k{Km5mUNpCc_;rX#PUXC6;2uuhZaUCt ziPuu3+1UQmFrqZ3M_jdAXy325!hU5?|{7*MUE=Z8I5mU-4A>GY;c~rXOPqIgW;PY$p z$`ndpdyho{{SkOyE?Q>y?dw)a>VQtMXb;AEXfqMh^2|1m|9~H;46EOsN~&W-!1er% z_%t0v-gIMj8_OTjRV%@H{`SqL+1D#kdn}F_&+zHWW_Ft>33p$t z&U?h=5MOi8jblZRD@|oifa~bX`9&Je;aE?Wav607cB!6rQMts`>*y-m?}gnqQYR*D zYeW4RR-|hs!ME^v!~N6XpY6}DevK24H&{)RuY2!U=0k*=8XOBAji|O3D$V<(d8y}l z?OJ(yzT6xyXi7|a+?$+uh0pq=b?Ch4uA>WXLleB{aN zW5@+X-n?QcG1grmuk-Q~H}sy@>adu~(C@d(g0exNm0E8$zqy>4zx9?pjg}u~pBrgD z>$jc*Piq-UbD(8d*Z368)rMk)%_RHz9|(e-N_4H|TNhz^nDPPiOcE~-TZu@$b?9u< za$6^5ABrVuc<*&)+L3<9&KhMmB9-B*HM~q9b@O}PfbZ!7i&eT)C?)8NR*gq-Ne28* z{=X}XUwg4?$s`w|GT0qqkd?#SW*NctvcQ$=KX(yHT|&G_EB4)MI!&LfGKv{f#EIkA zkkEmy5ToJyqoC8Gpax-1WEMYWt$HF;w>m8BjOfrUdh30>@k{jwE(OWeGxlNeQxn5F z=iGV64xEak;lFB$%LsbmX9egtKY)*k3ddc=!o)fSD2kjX6R)Oeu0hnj=S=*O?Dedw?B*u=ZnTg1mqLxF2_q9gQ;*O5gT zKm9K4W?sgM=RjKgz@;9LA*_P$p`+fc-B(3HkY*tww*`6;N0Nx;6eYJe+cRZhl^?!6 zU90nW{oe&*O0^20ap;xAQP}&+{WRGBNOS=#KM(NTiQFPrcx#XyjzH_iRUYVhr0ec$ z_%o$_=<`gRM4X$i_)8_C|2~9P&v~q8xc7rpGFM)@b40r9xKcG<51b4^+JKIJLtlJ< zDbQW|82YiSSeHYJyMBY-5a|DW<7(A(UKzi$E6~HM^3dN=>)yW$!4H2g6}h(R-1`+i z{!Ps#^|G6Oy_9Z5?5GOaz4Xn3+w^Fl=f=k{bKt=_ACD=j47}%(Ey|k+Jb>mBGD`mY znJ)z41Bl7BuV!s%T1B>(r+a&_B44r65fB6PKIQqh>N!F9I3na*#T(FY^>6?J*VZdF z*>ogt-m@Prxz!P{dv;55!^r>t{wtIF4`%(fA z>dQ9vKfMm1o@4*Rl>xy7;r)oo?!O~Y_|x6mW!&{r?z~Jdo1Kn0Xg5MWCjBnmf0PP; zE0qE->>n*r&>s06;bYDsLLM4|$~rwV0Puc2 z|CNucUT$hQci?@$KE#))!F91~V~$4QPH+V6QeZ1aq{KT=b@W7j?=Bn9I>|QoyIet~ zNihTV*%tFYLcHdLXVeOB5gSfX#`g1 z@Ush)F)1lY$(m%xUkWt8K7}ImO?^*v$t3+6!9cA1oQhsbDpYYp+utA^0`XW!h4YAg zOH4OGJ8N07Q5k>{wIa@~thCj4Fw|0jLmc96xrAtcsnXZS|2!7ogND4Ayp7zX%$j%zQYWH}0Tqs`j z892`7c-mAwgFnKhr|(#2<5j&(TW60D$vs77B~sK2wff^P)w`HW6!T6G>4+1mne&(E z?kv=uF$H71VpJ0c3$`>+QC4VMY50V*4Zd;|QB+owgY!18HWBj2Z1s-r0 z9&Vf|D0bf`GSgHpKYJw3gCtr!3lBX5-m3-P8HIi#Bh9mx(NS&3KrF{5)vvXWh6KTq zeP}ZQd7RcI8Z4W@IhZ7PDiuyn{^G7WEvopildo!a?V~>AvIiPI3{jj63Dt2Qalg`DL#kYjx7oNl1`xI%Jj6LPXL)$FmcFKsuL$?dt#a#+Ho3Vp>mos5Gjr%GuQLf z;^-JR+Q*y2A%@uKEFn3;{=0%OzBIx%>n|K=FoNZ~2~KZEhlG^qv7W@fJZpL4_*6;e zhCJcfj~MXL&OnJ^w`q8D^Dpcd$@CK_Rl`v)Kfd^~A54(F$f&CMfe82>5ZVU}Y{7lA zPk1r(-|m{ER4=7yYae25_;t`!_Dz2vsb`UDaUAK-N!+lECm-wju3y zKH|q^E$xWDLE2UxRmoVw&`hgN++IeJ(P5>E)5m~Z!Q(kQPLBX9;4|_W34OEqZ<7tO z;34=y>ThZLGTG7YDHp`2b=Nay_YgI&IAIOGGdCHzM?n9Z*A?1@kiVC?Rm|LaGH!E@ zDZz;5B?h7}bfHf#s$(LYUk#XTZv~Ud zYq^%PvOZND#~gKt|H8V16^H`uM3?4ket720E%DcBVo-<#!)>c71Ldmw&F#06_W(cf z<|{zHOvUj$`)oyH5gqkXYy3-T4iF?pd8JtbM${dOGitUcg})NJ%1R(NaoJ!q9^oP0t5+Qh|ZM~ zCu=+5B7w!}7G83Iyuyl3A`0LXM_*O{lt*;DXvdMQnxUrn3DqANaKE9cdVUpS|F+%JkYl?k$xu0_;3p1qUsMzQ?X`&4z28pRr&#Z~79wVTF83)BxfLjEj!A5qzv1{UyU_yVe3_l~ zQ)0p$G!vfh*$?fGm65kAS++8$JaRI`Dc&BTaHc0mw$|{df^&#UoeR&*q?eGgk=PYmgq84r`SN<2+48* zb)$K32U3ev*lm@$ohyW+&=p=*CjFO2YZ-uJH%NcZbftdIoNy^rtP#=;Z?<2696-k^ z^Ax?KBw~*u$h*=tp%DSb@%Y8=ugPE(d7i(?^6qnjcZ{p@_J->h0ZHvXc#X#P0vE zfM7gJn<9YVKQgTB|1RdIvkFU? zq*}`LM9}c!fZ7Sh6zu#|ZBM5sJ$8rCjFMmTzRbf+z_=c7F?sKx%_{Qtk=}x#6itXc zd`n8Ja|6^!5wTg z3+=ybyb6rmVOjsAs11>tbGN)x&EJop2CZ$PjD?}L`F6+m3GI!tFYK}=%tYCXDaE+u zUGiW7spsCwL&HBAhQ-3XtL4A*oK>)RSq<4KpNAp2NfjO(A-vySmw7v8A_C0~x~gKx zu|oeu7SplmksM|_Z^X66QRw?E{J_^&RuL&iQqXE;?!d>vz`Z(jPoN9L4<$fCw=Cs+u~ex*TH-|`!=Jr?z3 znZ+Lw3Ashj8;kr9!J73l6Fu@nJQLe0ML>y;NbH0QNuXU^JZX8Wamz;FdKu1|^;1!L zW?Ne#J{W&c%r7*8%s8M%qY^{1UodB1-iCH|at8NoRokb$j$Pu^h(T1ENVUg?piB=K ze0^H*>zcR8xOL+8N6Rs@gDI!rc#By`u4e=S5a&mPWzSv?Y%}SZS^Yc*-e{^=qFb5~4-1yt!LaX7xrvW5N$>6mk{M0QrEm@rMb{Gh~HoSElBF zkRw1KD*5?yM!jdW(VxNOKc;`X2u=9?H|{2>Cl0&z4l8k}yV z=o5ft6HFR}^yH{tDyvfi{dD*!S3Z}>OpGn#xegPrAqDZK|JGO#t|b?3_WgvrNQ3<@ zRl$q#-43a7jpv}!p%9k2+gzx<(-ywl!dXlH+ouphzzeE@5mj`r&}RGs5ZL5)Ijg_0 z>AR?mEsU&~S6?RhM{qH|qgp@no{>4h6z05Hh3#av3f$8N{jct}7t7x`gLulYxCG8; z)@s)170Z3!N^*(ib`3E1KbIJM?a{OT)?%OA(#?!UY^;cau*xt;r$i0d24{=(f8(L+ z=!+d&qQqo{WC=#Y@GeI*6B-=S@ckEb@2G&6P`-bv&6ZGY`ikt`d+8p4uWA!P^U{3b z+##|FhPDCq{1Fn2DIEN1j0-)Voyw*wv^mB*Q6klX>m-DyFZ zQO-dbXtm}enV1Ze>7E}U-^(VDuex)MGf49u1&b}zHlL`aXw<%|_Q#wmTjS1X2EUVydT_iTuf>VV!vB+F0 z?Lm(;NNfPlf57h#f-JbkIp-=nK z16Pg+);O%H-Ln?CewEKM^dPpx;_Na~yFN*g#eLT*W67E^q=R7pHVzk!{!DTh4Rhjp z2asn1Uv{2XOg)m1=RM2=Ry3K;vcn9-tKthLj=)dLmjsKO3+Rh{ZC17H|l z+f2u}2WYsccbL@CZ@%qU@GLnR$NOx&`%Y3|q}HjvD_#s66H^w5h|kHt%k157^YCyE zB?uI>85#{X%6^sSuB;F)U8I=US*JpQ*!{o0C$;O@a-rNia}8qDJGnZ2YA>G=0xOyz z$sIrze{bR1^+b{=yzObmB#=D40mXefq@9KHFt!p951eq(19LA6jW$FntcgdNR^+f? z*mwsn8$_+Gzf+5G@GbAl^KHmUr;5ZY_)D`{M~r9EVvh0zW99w*Pc|5qBPjG#^z%QLvoFit0^Mb!`&n8z-Rhvfn$@dSGFgwwJE5L>S z7N3Cy$x4uwjaw!7RJXdUUq9b?UHk~$k{5*SZNKr2UIUf!6styQ&12vE}JB0M&VOgmcwb9DJ zC_AbQ4!aszT@+g4IY+=!g-#zkEd&bm?sEV)b^vmg8Khb@AXJ71BbUPN0*;KyR2pD_TJw6Q1K?I<&WQh>;eveHoe zMQkL>6*KqYgzZDr9fmW)IrF55o-bt%8wTk~Lu{inmM7uj+`#+afBC-lezqwQU=vjx zCU;fUskOBi+onhx8J!f~dXEv<$ZXc&`=ZFGoq_oco7nAaw+;-Cc1zzEu&fipasLNE z0Y{;V=#pz7U9PMHZQ^_QRM@|j2Bmip8)a|f5<}&rfJ1xkEsXX<=Q+-G%s!YL)n|0{ zxR8J@qv#HqcR!lRgWaJ^9P$45b?Cde6k+cwc#3d7%!Hv8v&{}IX3Fq z<=7Z^6HK9~htH=461bwm0OI15MCH#HRl+o=KC%)I*LJKbro`0lD&&Zg2B}M{QU*)m zAP=jkj>GBmL>To-@!nN?DF~;x$2sy}od|^@up?Mka{|D{0bQY)CQ#4hAM2>nzwL^& zS~DU^aitV{!ez0 z)1vB%omKw! zF_LLQQdC($)a)7(&Vvgyuuwh*gr%9*;|ufqd^bz&pK@;DeL6q`BQ25>VK$^wY0-E= zb;@g**xP2Ij@PzaMH+>K0OU|;gbf(UKZdFUiIG*LRK9I8BGYE$^AYJ-9%vI}O9Q_M z)G;vL{n)7v3{nGn0J?unfZ3yFeDRoHh^XWU6dd~NECH<+Ces@b3;tkoq(VukF4ann zdpB$miYxaVq{5<~c@fw>0usYaKKy*LohIvf+~Z1@ogbN}h@~8y2+Idc9KypIvW!xTPh`2e#gK}nixo}RSP>`| zkv>czG(ZVV+Az)A^&u#ef%FGZl5n~3Zh|E*>vTqRa};33Q0dX}1OMJuRX|7izC=Gd za}(%BxpXVbn(-m>_=~63_{q#N8RtY1N5C`byx2PCs(U%R|)wojRB))z2@yNYpnI zZP-Ej2R@<`5sxk&c469`?p zE3m| zWrX)RCW{`({1_Zy=n0G52_4@Ee9#m1K5Xe8u(fEKKWGeTeiV?BM})%~mdW4N0a}zk0^t_oMnfZ=?SrRvAFpMCUxya&xl{BJ%D{LI((~see)shyU28QT z$+nJ-&~?OmkCdGVsNXeP9R?ZkjnO(AvtPH+?-H76vWJHSu_4_S_?gBgmQ-a4U_xLF z*=H~XEJ21R<#x6g1J=d?I>IVPEHm&o`8rl|(pxu>ZZc$}k?xW6w|%L)R511GIz%Vq zN+6>A?<03I$c4b`gFXlxOrFzg^<#qHsI*MpHyOEBSs)Y|sLu}9RwqZgzd`*~V;rd6 zL}i_WY9o(_P3!_AqTuBBmHL&4P)nn4e4kd2-|ux`n<>T7kXsrAyc-_v@-Hxj04t

;Sx{?qf$8(qN}(;Vc&gm7pzlTj>7xQQtKW~bq>-=<+$HrQ8X<2wi6 z?wd{_@nZ3h-5(yDpnPRr_^_?3ONfM0MOhgSWTm~TzU1y=Es@PvMu;*cR6B7jmHXYH zifw&@8*mTlwA}(uTdRk5@>?A5+RT}3?R~y+NRXBC+diO=9^P#Ffkq=g|6>fJ4~43Z zP9L2dIoa66fpf4q_Tha!3m{3R?mQbvd>W(1p73vY{O>(qec~qiE7hulcC7z2iH~W#yBLQXH!17=axD$TFydx#I^MroXmNumizq2*tj$ zDzA8v+_pH{%5M3PheeR8;r}Q1oSfkKF>ts(SyAgnn=H4}+p5$_CZ5_)4g+EpoX z&_~CBl!9xqO3B;4S#2zZ^ekzX7fU+Hm>@+g*ZYJ4jTtzM4_?W2eKeNK#woh9fI}4Q zSd2We40B)<@RI0XEYa!pke1Q4KhAk|QG|>Ibis-B`sBY75M)FCUyJfe7UWmzS&MSH zF%8tDPSd~$R-=Xg!`53zRoMk?9)0Fl!TtPw-_*Q z2-oG@5CoLxT5bmmuI87{LzdMYJppNPHEkuM>tADEri?#U@6x|~SriATJP7}BpEKN! zT-Cz~$IFFmOgQJ$md8LM{JTLjTZWic^yhTRA9wY(vU|n9xQk zM%W)h`$lPi0e64g4TjpplyOi1jIw4aC$lQVarXLxz~xqWA_Ot^oNGz~m>}l(b7FFJ z3=to?=!WOK5)W2k+`n9F2Xsa_6I!&j@$LeNn_t%xnKVQ{x{*x zQ4z%CMQZ#_P-iW)Ng%kWsD{KsF^EQ%VkgbgzkkZFV{x%*(gku$uSg!bC13jZ3|KpQ z$hKX-afcxU=dWNpHRHe+al5`pFMT7zKKPt$zuC85WCBn}rl0Y)5g%N(i0&3XHXP^& zQFoCT3|@1hPvW(X^oEq5`ec60hr}0EyDD`J+ijEjYTn@Q?tJ@&tIf zSL<8&4YE-`ttUR37b!8vr*8Xo4#sFpO9A%bP0DHI1oU0=m-PmRNiAu**}=Wj)QMw4 zpIRL9rP&3QbxUx9A9EB6zd%qc-TKwX#P=u@B-o>YHseQe3VO|LeG+@db}McjFzwG} za#sTPAHa1xr@rMP7Op0E%GpGRdaOWcoHBdGVSUDge${P1@$V6M*kk|DAYmHV*djn@ z`6Iwz47=+qh<+-#vKUH<#3f$!qMMF=StT#^^zNOrGpZf8gB+_7apZJ&h+vxr`}%L?=@5a*=aV@k ze+a}6ZyDY?=YP##;%r$3|B`F)84M|FY$3uvCX2{V|N8K5s_87^{fdDSCl}y=*6Z&E zxX)lo3Vk^*SrkKKtC>Jd_(})&5dLqsKQsxGfGvGwz-)^*Vgu4569W6V}#qFBt(*s>W1w;?ilbo zP0iz~8bafD>gchY88;Bx8nnOLnnc1KL&b@>tKU7$jphZT$2f%4XfE8K=m?B==gm_Y+l6ggU@+VC75!9)S_p!51z$P2 zH4U$`UyMyXD*!&l3t(NErI;zJpfOr_C}2OkApLpJdT>2_&!;2Uk{%0(NLg}XM|Uq^ zxOdfx*(Nb4;j7(5ts;%jU$vk8#j=9f377nZahR17=GSb;ECiP8+YgfbR?fUO>sX){ zQhrnlwI$&Xlb(1IlAk!zfQ~V)p9eN9VJLh32zMIUi70lLV6$BxDs-C%LBhksYgn7$ z1}Ebq7>?j`-@3=V;8Oy%=bul?=9g&PV>R)~wRz=Z7Sj|_3*B+fBJ=5Gvf&r8VdSf` zy+=Mu*ye?X^=N+muEYFSX60QEH#>?xY?N+d<7!bu%Gsv}t>HgMn~&@CjcQ=R?V9i8 zM}?PG!wGpxcOqsG`A?6Gao$xP0A0ix#klrUV!D|aRm2P_K0NFDwV3FAC)Ty-$%wP$Zk7cP&z zGFk?$rouVbDFyRv_;AXbDt49eq|ZEECu==C1&XfAUhtnn&;CwWA<(Wx%E7#~xSU#Jv0Ue?OwKW1OkD>wSZnk|Q1eJ=^M zqBkV&x)#Wi5{{9*9MerwK784!5830pH%?7n-o3+G4qKKJlk$!XL>_GC+IOU0Jm|jA z^Ck=_HIdT44o1Y}l;XO-2YhSMdX_)kMuXhGG@MfryE%ZsJ&5+}lp6Q^kK&X(bq~$S z5o52AF*>iTNAEJOt5ynv)p8xkHjol}^4ufYl3FOkkhLbZGq0$g6_i1W$#ZKjB?oYy zp~~dYdqsXiy}4AP`V@TD#@T^3slr+Oduu2dEIT6C9C~VuclgIwT;Nz z`*GaA9>AuOW?E+(K>S&mv8LGGr7z-|M*~h#7;jRlkru3OjZ!J5aVLe2+M#PsF0@26Ou9ImWOdoLF|_NDuAx*UFTGxhU_%YU&cy3pYFQ2Wu)s z{=r=Jzpa&P3TqT9(E*Eg3Bp+e7^Mogb6ZdCvfy=E5Tj6<;0#jnVl5!h>T32zo>)?< zu#53&Dy`>yF`R<_|4rH^Xk_ib5iK32QmTCjtPMn^W15)+NLJEUe8V(YNs>Gxyi@ zh}x5lnw<`Yuz96+EZpD9k70mt-+Z3Uqrx%T5qp!?>0ARw#CGx8(N;5oE$w0{9nRsC zzUy?@xaSeFa}R@LF=;=Ho?{S#zq|{K{*LIr1Wvs>&*pl$G+ufEgrqYo!s%f?mNqNiKUPV40(v4%7kBpWYcJJjvP zithh5Q~tsbuwU36x?VZuo9DiJ#s4AUx4#1l$@^>E)40~Za#Vg+lrN7za=ZEb z8>pPVSAkmk8sq!ub9uLcocFW<$p@jv{VT8_^;_IZe?dLBbTi)d=GL2GxmY}MBu@G} zCGH`lymmuEKU~Hn-AxR^>->1Wzc!kV&RNe0jJrgtwl_AzVfgLtSEoC;*jYqBDtW~p zOPW~?Ju4Qxv}etDa^S!YBDL;u;iFGaI)(7DR&mFjx;d14LK4gS@^cG3X$lf$zL0zi zNvwzkWte=5&%U03o-8pTR{h6dTo;_7X@vfzB1sNhDuuT_yaVUq7+%sPWwEd*NS>WR z)&@hq^n&JwM}?R^2$S$IM&BRm!=(QdZZCcL9O~pmE4o6pJmL5BGSg5Gs%A=6$oy}Z zIb3~!3$`UQ8auePr!YSRlvILvzt}7%%G4!Bv~SiyY52F!2Nbxv_oo5DM4W6kOa*F9 zpuU;TSr*P|Tkp+p^u+U6{K`@)+>N|zhz*FKTcB%T+wULs0I;J>T6(AjSWiEf(8_yDo;s)a4&ENOzJmewz`=kjFAZ>PV)OzSmi zp*)JI9Kayi3%{JMWAhXLXvn3!hp_&uC(+&D*4kF|M+Rk|p7)2ABSNMX ztHUPMdm_J`J0^~)>DR0Gq5%JNez?gay2K(Si1Qc344E1>nqZ97BC=94AR*3VWQtk> zsY*ZFJ+EK3D&IgH=S3M&(y{G#hd62R9WbP16#7>xqTw6rGfr=M&F%1StF7zTz9=^I zJKIxsontm;Mm@Wv*TCvLexy~Yt(^x=4vi?yx1%p$G2n`Bw-E2NQ98)#ohRhVvvJ*G z2#B#dPJPeqo$9?4IN8gFlh3p@puiPKVK%6}JhR_kPMNPgpKI6lA%1knCVR$N6h~K@ zkl^5!O)GNYegyZeTd$Bg_Lq`MfB+IiBhhk2`4%O^_?f^@|9Kd_2+tE=L`W#30ITNM z{Az1f!vC!4LYKr=$0vmd{Jr8!UK+1cj5HaIoOTnA9=P#-)66PfZP%c4i9Ws z8ne!nAL3&@lwbdWYI4wBWH4xaaRD!Bs61B`%Bke19sEubi9Mqh?Rff=3-oUq$Q!Wo zM4lcTMxJDXbYiK;EcjPC4LMcy$G?>ETX9DPmTe}rAGH>2@>GqKTUUw z4qz#hVj*Ed_tO--9|R$zgmXDv$qO)Vvokbq1!yb&rcn0p3$)K0v6NTcmU(3HMYdmc z=zIPSl!&GAxzxEfS}J{35{8FeL9E*U(Fg{2JFlAUjgP7elvh_sC6=u5{E+7eJH7rs zp%GM=YnoLW2Vxnk8<#F<*Rk4W+n+5!d$oDD4u7>hopAJ%J`;PVR=^}=EIupBM6j!o zgBcw6F}r&IXFWIUYJlaTgoq2h{4NG#jl9SGi~%yS?waNIFge)uy`BWV?y8uK<1Y?M zR2I0$vE3Tn{XE{FIZn?{sJ?amwm)=xvP$7PNz=&koP4JD+LI}6DnqTVr_?r&*+`;U#z{bDR&fSqmdW-;ez#G(&|+KV4fzeWWdQQgBz(WAV#MU>Z!#NdXnLDX^% zY0o+5p8$q|&m&_w3XMgZ^22ZlNI{_e+%P(eOtPQvRN$$^=f2rI25sCp$X5hXiiruHTD<-QRXpTZOZ|Q0`?J_~uLFT-n z7avW8SVdB_JOO;2+PgyNMGosVRW1Lls0}eWF}mM7l1yU4P*Hr<+l`Dk%=j z$a+J-R05{WddSV{rq|>ku5F>-P~ehsg!Z~y?3hD?5qdk!Z&0*@1y@&6aw148p09j4 z=Ii};1r9kqE2vvJ>e?T1?7z)i%syo1Km{6OQOfv`ybgTB??W z;1Ni%%%@FHQ~lGgN5;VN{t>%^XLyS{@+y7%jb-qgf0QJpnBWACD=!6DvZsRWT34gW z2_sCtKK55k6`#nl8*k~R&*k&uJ9g{P;Hi{C&jxetu*~D)Tw_dQR%-wxep7c80^+Ac zFWv;K4Lv+Qcy=3l#>KVM`iXAs+`0a0l;Hb+y#TQ96X}*(hG6i7azspg=U~aZ?+2Hu z9rQjkk9IzG6`6OaKa!!H<)g>;h9*pZUgmyBF5pByxfKpTWsdhH&=H3X)a(Ptp)uu) ztYHX-04@!K)niCxQTW}~?*w&jSp6Nx7Q#n@e0ai-qpt}+cX?R8SR~W_Ns+Nqs;@tg zVUh3!a2E?dAJMvEYxL*h*-}>3J+ru0`D~O(n=ksRf8=FDC=XD+x$ZuyQN89S*||W% zmbPktsY`j{l3;(k7I6j0_$7Ve@0~sq#Kz)+tu=lc!H)-@)~Z#{DUYyBOvN2(T9H<{ z)as~*00|lStQD@qukWk!)?8z@W%ywQeep`XkDRiZ>(cpN*#v8U(m4cW^w7?4voI1R z6>Qv(l9E&C7r$vjEQ)%73aFr~;UnF1_?a%Z;?9{$cE|S-uPv}%z z!*i<#&DWdWURAKh_7EeQ4i*Y)e4+>{KoHikgo1T)j2uWKf`cgkGA=DYKc7!S5z?mB z*JF}lU2ZEfgEL?k-Zl@FsY60pFT$bWOdAtu4yLD)P&&mF1{a~%4F55tAC;%!fCq}rcwHz}P-s+XpN#RLAL#BvUnQ2<6fbIy0^$82_QGLmFNHAwM?(FkR<3sD zzmtQ2dycJ~LT&l^c193jyMpOi;WkyNhR=b5279p-)u_8mB#2cg>BA@g@g9%im#-GA zZL9(1 zDnm|a)p4zE(Kmi+ICp8JP`CKiQ2D1{S=dE5g*6+(ED9dM$=v-BA@*7Yp`}h8+v=0) z$+d^G!F}ZE2fGxUfC8_^IjCmNA+PA=gsy+_)GnBX)A;P=OI&nVsU-1ZivniTJ|O$4 zINrDTVtXT24e!cs3Bu`^)=e;mTpb;&3)ISXLv!0CiZ-QM`j!NFHvOptJ+T@$AVo4s zWbAR@Je< zcwH2aXyjkN3(0)4wXIxdLWz-~-~6Cw9yAy_%p> zVL_n@jSD+U|3*nG5X17+w($P=7u8;t?J~D}PoLt{bw_zQAoLSN+_ms^PXz;ny_evBwVPMWm6U^M~8D^7{0K$HKHx(=tA44HEzuNTd_Q!gVuP>_hr%z5g8Vn z^;NY5?)8amMmHd5o8L(k<*wttd|7?g2^S>OqVzbTFn8!v-cY#`#SobVc^tc5Iwo|{ z#r-li%w7`J%Is;*Eb^ohFUHHwI*rzPIP;T+_1+S~!De_4L71?`9)-(? zvXvhUU!BfK2Xm#kma9g7D!T#d%lsowYtmC?MdB)dYBJc8!_^6QTTVN!;^?0f!7=Iq zKL1@kKs}dRLFSmWveYAeLMi|T&)Mzn+&R`GFzATBzSI%HkUZ{1%PVscU!?F&WO}dg z^WViK{oh$SfN5%95C1H!Vs`=e%TWFbXkVt{o}6?sa>M zXncQB9Q5C8386;`6>GOv^vbiI#YnZudUWsaK=F)-PfPT-Nb*aN=*kWHe_3*rVM$8< z!cX$;-1q;GD=fkA()BzC5(Y3vY|hHo9t79g_Rt5tJQBus_3K zDrhXa0Q}1HTrJ}AJxbmW!f$H#hPZ5Rle~T;(mu3;f_V{(O5OegxPLlP1M+n4KQK0( zIJ1@)(0-<#56T!S#fta|#ci=s#E{?6+MMNk0*2tdzlKpdoD(7ru8>_>2yqE@UqwR9 zmG<#JHv5j_nn#>m@bLP1cCmzX&7L8L04k+|L2UnVFk*ia_-r%M72-eqt`v1o$34g$ zDxVPMjRp&>Z0xZRgKL|`iu?X_hQROnU6(&qaV{G*4UV#K+YC_$PF#xCruY zH(9M!Ts%!RP|tY%QUp$~dy;Gn#-Y0WS<8;e$7Ha-iDHbFIuQ;aaM~p-$Tu`1>yINU z+#wqHa`-8LW+i&KW~$z);}7ixU8gc>Y6xs|{xqawfV|VF#lEPsfWG`7DZjr(cdUi> z5PB@fa&YU=-p{ZuIbM1xo}@m%u%PeF!O!b2pl~d?hW`tS^egr z%$QPHGIfvj3cOov3dwti4d-sDepp)E~9ooXfB{ zG5TQVn1?TxIg)0Ubf{}Y8LD_3ECed%nbmU5(^Q=&83B@}u&Yhpf(c+F8eEpw%gbSR z<=Mae{eSLm8)kPij^A7+PKpdAKQI}nb9yY9*`}jd8HD9DFDDTL6m?+2SBuwB^9N~# z42u+rDwnA zU)TkM)V}lv7^uukKY#y2Px(HiWrwT(RWwRBVgF5jeu%NrKT7d?Rj`?zL$R?Q7`DHU z-C77XQ}QQcNF3d0;l;hZNFvd#nN9_F}BsOp64eY)Mg)5Fs*h?h(5R157dExqSf1a5aLvx22v zsrvQoJBGu)phM4e1kuQ@La6uu`Z0{~N4)o}O!uEyn?MZl*z!YQf-rUBJj~*FjSt&Uc-{WS?o=WOHSf; z)GDx#3RSw<4Q+~7CTbttQpJv9QE}^9{cY>8JZ{w>*^OWNwa+I&E+ECL6RZTLX4X_Y z231mp=Tti|2QjnCdMB_c3MpA-9T?Q&=VslNhh0x?SJdD z?kwkg%;!*>ahz)tV!9?i|M2G5sh>=-md5*7Qja>qH$$QzXY+E5E|Eorom7go8A0V13A*wO)BDA$#$q% z#xb@@U$-wl9GEy%{=Ku^-0nI1S?rtt>+S!GV$RC+y&l-VUJFP$5IvOG?CHSre*jzi z_7C_lj1|xj_4A6)@9oc;CM-u;qspw66N=+IdlosyNEID+zgxyvgoq$c*_R<@czW+~ zX@X^Yp`Vt3(+iDd)a0p^v$mVvZH9Nz-9WMTt>~AkaIB`+%m(Gsp5wEvb6z|u(qhvr zuzkop-9^VD`%9a8p?t)6?x+vz@|T8>W+c**l47K2SbldB&QCaaaZEc z+h!Q+bszBf)Abr$M{{ve;7c|Nz(H|vF;^sqYuJRtQ7oi5e2fFOQL9q4aNNvK=;=Ix zPwW$wit}M&3dh&TIs3&9s4Ar@9`WbV`y9>$EI{FJ45`v;pf6269v*CSPBh<_rF>x> zg4b5BC3A3MIgMDMt9Gr1Vr%$4^`ewIDc8E=z0#;&8`pwk)AI(_7!uApGklB-)?wl_ z|60->4A!{e??iF<{A+D&6I{+Rf5|&Dl0AhM(Mck_b88y7qyDQL1I+Poh&oq}Z4>LJ z&Yjpg+^Q;UKzn+e?TsXtv?=B5J5tDge@Dh`KWm}s$57xr8k1wQ zQ>t{;i&H7t-R2f1bIbp%qk`@>XLOV@cASy6Yr*<&FWP0zs-nH?!Uv-&!hfk4A&W|C zA3&9q1aTpiLM0QXijDYQ6(}JUu z80?7BgeS^iU2QPh?|FadU^XqpqMY<6T4;72i7l)il7{bKPK08X?@?7mXZ8m=cK8NY?Mc0|8w-~lyOL5x&63unjM2>Q;YZV6= z($Ki-OuKJI`;kNKw#<67_rByyts69tFC^W7+W)JE(egfyhYf?N*ysKwI;I28rTie^ z+XO=fhuI2G=n60#BV?{h%|Q*Bv*}72=do1p`8QLt zCw+BELu!5}V^eDm*E!xNH@eF}kq_v|rum^SmfFFP&e-quN}T<0 zv3qiR1*zGV_5nRS!o&==&+ycu5?6#`>uRDqe@f*si<(`{dJ{BM5H7N=$qas1bDw{i z>Bghs8$=(uErNcnSlS5#o(NT@^6%=|bx$mx9M#k74x>2j?lv#*bMtkE&H$ee8?K3- zrcUv^_x`_}!CkvH;)~7qOzs>uuyP+Zcw6C~oB^;o`XrOWh~dfAh909@%nL4sLU%@Y z^-ZteawYBck32i#^E1u@s5qqvB%~n~1Y~^_lihrQTBW$}Z63Wm{_@=6#R5}3v74*`1UdMHgx=7Ye{3~%S`cM>j}p0Da(0s0)Bc$fA5+BKj_BEoF|tBe8%EohU$!%mm_@nCw9v>e5dq?cs~}@A=(?1@n8uIi{L{dulzha_Z3B7nT&5G)h zhyIS^SMkPF+Kp-JW6irspF?zriT_#f5!}Ar_7KDW@qB6(vdBvU6>?GVcuH9qR{U@t z!_LqOI5EK|mq)_IQ7DKp`e6u^{TDg8@;gxjWgfOn$(C%Z9}%+FUbk|M&(urQ`J5AI998j-E{H*X9EDEUh$K+QH+7$_a^qQlsgHQ z=AaRjZPAc;&k{W(#0qiK&JoNZD@`rPkVO-X1Neu@7ZJk$0v!I_*y0j?s)56XYQt?m z*Z8mPlwb!1j5CxW(P-jSj+bizr9!uaEER{K{lz7k+n1<#zDtL?Fy8e2d$VNK_);f$ zw2t-gUvwU61%pxXCwsQ+UI%tJo{l<_x;=)?O^J_b0(C}sJB=56pX!$}B5ar*;!dws ziEBSu5zM4^LQpDQQ#jz2s-}zu40K;ar~YQo=^_XTm;;dR-nVs7tc{F2(rfJis);Oe z{LRrq{7wpzVu?_D<8wcrKz`Gv8UjyLXJUp{IfcyLP&tsB!!;gnC8@FFj6HKoRg?y@OtlIw zs#a~1TMuVN^`uIeU|PY{CF+o2(u@}}&P7fmSEO1HlEvnqD@GP(ok{I;TnB#^4F%pwG%@RW2LFUJ--KBI8x< z_D((4WS!6r=ivl?JRY_^?jMsO>IxO&QP;S4_9eP8G6grZ+1^3KPQOw`^$Z&MJO)$D z4YBo7xV~0mMZuHjT8R*;#jxpQ6&%%%<+0fMkV(quz~6m)4;lpS&OH6k$pJct7CNd{ zwU{g}|Lt^o0PXDCes7PS^MYeG!AP{oAPz$Juixx9x5H9rkooXD)NM_lj-fwE^Oi+5 z0kzjLBHL~s?!ETnFW z5Y}{`e>af>7p@t(;5txR!hbtbYXOeOb_-H9_b?YD-R=YE23`ZYJ?vj#w%rYfurXE? zQpkO3K6zoHa-+#~cR_Lj>K~DL0*TY>%-1^-S-r8Gg9djhQQHzAxS( z&i&-bQi>KzV-^|kz4*fyV<}u|fVo>Hc+vD&FjpOJpYAMDyDjf*^RFlcvL;1AIB1nr z!nY=X*_xNuO8ebH*W6oj1{}6bJ=BX0h=2QRN?9NbDp6tL$M47JY;V^N({26v9M(0q zSEM&-Ci@3tinww_1}#lzlalo%@88 zwNFz;USWdYfs`vq+&<6CeOqu`tVD+rJ~Gk!U0tYwL#_T0z`vgly-rOWWiqaRM;u8d z(?DTV6m%ZlZxjY8hfCR)iJ3d#4zY$L^7a;z_eOj$D#!go(g8n4Q99H{80Zf_X@2CG zVw<0;^@;i3ScM{bA=|5++e+ON`ljR;W)hoUHuPN1>^oUpua*brYjKdh7vgDL#BGPM z7?AqsDj+wkOzhMj`69~Ff_TU*nuE)li;Xped6h(`Ci;U}$#20t*bIC}hURCMqWyapv(Q@034z_+XrjO0~+etaCA-C7Pd#D)m-<%m^{9V-Q=^6EzMJ&SE-5h)3yZUEMQE%kn5{EmNoT|XuR~u_)Egf9WlU2B=8j%bIT2QB zgZnVM2F7tQ{eW99PwttY*xsN-VM|JL#>?%?*ZQ2N7eLg zmu>v6dh-5@AEb~M`mRYUW1qEB<=qZ?x$^=JIiYR=2&nWW5B~s=m_hzr(E|HY)sKU* z8ukYzCBqy!rw~mYjeZk)h2bN7tTCqXZ}MK1!3VJH7r=xEPXcyfj_^np^(d)5T1@Oq zDJJI1b9+;1jQiBh#&sQXIh#yu*2MDDz`I03o=u^s&%&wa$IhDyZoksO71X|6*Zg)n zZ5$-$+58@aGA1-)`T%M5a~853$C(G(e{FADj%h2k>W_s+wWDSme;KVx_+J!k!+x|{moQ8|mbg&m#zQP>Q!Q@ZVhgDkz(79N;|Z!Qb| zx>`vD-y}eDNzgLeR+b*YYqtRYtuNcG4N(_%-AqiScyxv6ca;IH(EeY%MLSw3u=tB?*8Z}7vG9X91B!otUH&AQmsq~cxN<~d zqfsek*{vk$cdxw*HSS%z(TaF5#Y%|5-%jME`Hloaod05R#Eu6e9b56*@*A>#;nm-z z?@*hY-G*potD)fbTAxP7QUdH&Knbl^ih+5bhj(&xSFa|y*Rh>?sa7MZtUVNQ^?gF+ zC_Vl07iXk$&Dx6Mz!?Azx0ry3os0mTwtEGpI%w)By1t*!V`h&U9cU^Lc0;lT%$DBu z**I(Cky^&Ie2z1A%K4QKXj*QwF0U<-v7&Skx(K!Rj=9CSPkKDE?D?o>;~3ae#9~W4 z`i-0dL}jGH7zUd^?j4e+4&B&x^$b1Nz$#ta_S+B~QqB~kW3#o5x&y78bdnowXczt^BPUKso5Qq()3+I>k#=njyW(DXpeBh zlptub^NhtGrn^YgMDiBo+2pYaIOz^?q{aAKC=g(|dYx76E(+rnj=L**Qf!oF#tdnX z_yR>Et{~^juRR=FxNYP<@fKmDHcKYj@^cN(u)l;Z%7W9MAUd#f8Cg=Y#$YB^hzja> zG=zYT`9{(WUzfRk)V}S<#|O7|J|b2Hz^gpqu$?C*{LS;Pu5v!_xJG%;*p1Rf)<}MB zTl(=x9e(W0qg1-?I4&~#fVHci_Tv9$%>S66?4CBFG?+?Jr+oe=jd`Hxk(9?=h`@X& zODcQ0;EutAX-tDGV(TUG6NP}3*X)79^>;<~Cm#uGZLon#(ahBj|K?u|ndscO{cV*+ z;wUicJltNx2v|S;OZDgEh>nIifK?jnBOn{Hc+vfoTrof|Xb*du1t7a^-F7w@!n9Kf z+6Cuu0Ok!V^KL>sG$;_AIGq(1TNQ~j7fe4bu$~VfwNuMUw;%oR@tTK5X04t=72N8D zNv}LLIdO&KZew=%5yeYJ z%pnPPKWXm%y?v(fxlO4wY8q@1C>0gBqKWcvZT*lU5OVZ&*kmp_;lwL=FBEj^V4}Y3 z+cC$J^e=J3=`3G8b}ca^U{VlUK*}v81K0QBxC*wh=J%hnclZoy-k_$fG6E+ z#yn{YJSmbpi`bMX?3u3vZAwLPwlAc(FR^pN*SU4)E;c4$J0=h^H1=BO1I~b4tMsZT zjD1*Gh%<3}MaB1l_Wq!@t!BB^2xg8^Il-m4Id}s8JA+k{wtv~xt;5$9vhMF(F)t7SeFZ#&5K4MuMYc<* zAHE>Ff>rLMXSXuD2Q=v$^-FIPq$H=e?0|1JX&J0d|op6JWJZ?)alV~veUMo|6GB_ zEp!ondMrk&g6GMf;{B|U^(()Anbcg`a^x@@PA*_IOw zLaxe4pPG=gYbp=>!st%SSjm7!N2}8?moe5LABH#^0*>dX!m;d6 z;lMx}OQn^|c{aPNhG(NBsi3q&zo9CQ{Ff2j_=jMnIbZPX_2v8SYvq+Lxm2t+7pj5$ zj_N7(UKJwO)muG#23v2XZG(Sml7q9}8HPA#lc4*lSqwCw@3b29I16Q9vz>5+rO(|k zXFW9u=k4XZiYzJ90`60;$ZA3^dN#bW>$| z*uWtMq7M1GW4sKflP=Y{*ZhK#<=lG3nErIq6Ho4}Q%04e`_Eyx;mr9KB<4YXXziiI zfiU3p(h?5n$e`Q0<=5&ldpVd#{mvHnfeXo35Heo1-L7V&Y6LuSN{rndaNn%Z8dh#X zu?vvfLUB8db6*Bn`(Da5YiaAZxF5!RyR9Klzen|;!kkdGQbMo9JkaIogrp4Kl3sc1 zpcXMIgmRdyHN>En^s`0#HYU#xgX@-Y#;pDXs!x{GQTtO;Spcz_@nq{M%LAZoYc}h7 z1W{X3bHPG>AGX21Ux>Mik(Tpxe@8riT|kyX;_z}{S$S`u*L`$MdFxH^O*~h4E}*#- ztGDAqz^-Cp>?)-?WYBdkBXLcgs8r;1sW0B8@t&3Zl>eoRe%fkJ?qmDxi%Q~)?z3aY z1wI# zL7A347`yHz+g}%o|LL6z>8Mv~#K4p6jh*k8Xw6xgf5|;*w+sJ;@@-g0w{rQ;n74uv z($yEopajJ}SY;tT#$^6-hIFnPE?fsTiD|Z;N7StdiPa6uE6_sTWsZXqTIfmq@7Hdx zqq(%$Y}>RDTr1~D` z{QW0S-kz&pnVZvz;^*j28hZ^wni~7kh(1Yd=(8ErlPJ(1nC~=o@1o38ci9-J+>em- zqxC)fG!b(7mS>9IDoy&1-^%DToS&X1*+As#YnxLGFrU^KLZ-LH@UY>z79i`T?k?2n zvq3`4wN-;hk(L%ld4i9AF^8Nw^4udsa~Wz4tRd<=dM99NIzYp_ktAo$Tr9aNXXu2U(c-O%x*G_f&=gv~8Kw|5*B*DK}^)N}WjvqZt0x<;!hiGg<Xw_men{p7uN`{1V~j_=y86^)4U@*;UL*B{>q?Evks)E&& z@^C=oVnb;RK%2e1TMCP}BHn|A54JQH5g<#cP~IgW@(W~lOyz4djfKHWbjO@6Q5=4xGb9bK$Sd z3TJ}1n=TLd3M_2yt4DW~}lYs=g`n_*9 z+=fsV(eI@^)@EMd)e5*$Pdc6mA!AzKT4_RJ`T$G{Hw!8>BGQj&!Ta*1+!Y|hgfEVp zpj#@Cyt@Z&HJ08ZY{{lcOrT!-oT`n)9d_!wdcDgltnLzul2JMgDcc<9sV)sl0L-7G zL2%P&cr%$7`21&XM5>h=TK&r7?gwq$<^uLy8#=8F7vmJB?C~r>#-3wz?@yf3_z{pi zi*J79N~tUhZGHLHEa!a&DIjhAiYaN#06eH@QS|n*PLlv;!*4vj<1G^W%IVM*=mgUb z$klgdR1m+UXNDqq-H9>fuFx}gh5Mcb0lNPNlYB7vDhi2YN@#xl`ydDj)-L5eZfH>% z4yBct6bi21pOnJX1eICcC?{chj!CiFlbkolD9X8BNb^KAl4Gy%4}+A*an@AJZn@;? zJJFg7msqu!El|F7VPrfjgB-%Y)9FCS{XruF(uq%hraxun%OKTO`$min9!SG5XUoqq zf*{NI)Z{Le>m5ud_7v!WC90n}iY(9UG#K`$19GAK|%w!SCKQScvYptC&U$RVUki+fSn`q4MS1{u_fz`(9T; z*Aq~U$wJd2scb_vh~w3^8!)%j$PnWD?QoSuMB%UbdWKw7Qt6+xxxUCQtmavv&tJ-K z`*(@$$Gv`(%z!bNVCB=leUJm?C0B<6SKWNeXFozq#pk4#xNT0H{eZ>WtUu|Sjb)Vh z3)fi>dc)q?>!y%f2+v*m!rU;lSf*NFUO1KZ$$4xGbo*UNjSoT}0!(8KI!)V#*!!ak zlv_tqY`{p0Dys!1(Bxh%c=2l%a3)#Q1IgX8>8=V&j= z$jX*q{$sF5Q|YI%T`tF_zAuQdDs z;^c2T2zH?#s$m`UmfgWicfp`Z1unRut>H1{T2zp8KFIlku|rC=WykQ-HXFq3Iv-R| zA-LJLO{13$Mil1F*ZY0gj|qy3vhT~kId#(p-4<2w`{ zmavIG!gpX@HxPKw5u2esjwxGmdS2xXEw1&3%70e0sh({UeWBuR zyZjhZ`a4h_+)k;8Q@*NoWBg(j=d)82BWC`!@5pFcBv^6pWOxQ1&diIaCU^e6>QCf{ zW0bc4#@Yl%W^}u{eS|AuRWHN(;MC+0P+gOip*-GUgZiHfsRGcH(lM$WQ+Sx%Defu^ zHM4qKRu`Dw$xpZRKz_(=Z%C&bT1u1J7~R+id#S$;L+xjK80~z1_Umlv5LI0vRBb4< zRc%PQJqcB9x1*!Y2#LR)Rg5K5)NJ^MuMHOXwUlcZ*2CjZ1v&GuL#`SWNuw{_%_P98}n#5qxfcuMRp5v+q8)N!R51A8%`u$$Q&2$;UW1 z?lc20;HCb|cv^mhC={{ov=FPj(Fml=DCPODC$dUwll6r&z_kw3nApoT*A>vV|}iQh?OPvJ^s z_b^DPc*U^8mq~ku`=;zlTgz^J+(ePtTE3Kav zgWXxr`fCGz+uO+^yis!vBU1Y2lR8te1-e{*Re8CED<`%M4Tni}B_1%bo4GVam$j&V zx2hJsv9!~Q3MsMeA1>K^iSLARB#GB*`);B3R_QqGA>K%P&D|t5hM6JJw8oE;3Dcv%r z1uj=S?Pk&i#41G)=;u@qD~mEh=^12TZAxB?2f$}96sjUIDCeFo(4Z?2kly#)w;u<~d zJx+22k$M@iSvNnm6OEM>iN9ANQr;8qQI`avqZ7c~S4@`JEBgdCTBZ#&cosXO--~X0 zFZZfg{K+{GDG*)%eWhx>M9QY@uxdoRb8A~GkD{My;*ovT-&;$EgjN!ar6z|NGN=_= z;WCc6hEvOrNzY&OuWjf|>T^gb*K?f+O(imgC9#WDmxXhU*hTVi`KoWEmhxH+6jCR4 zI#-58ZzLxyi5hqHDNWhI+nA!zJ36wgnvYaCoOy8Pu0qEN(7RoHBZhO)cV7%j4;0!q z5XoO|x+>95{q7Or@`(98nEuLPV{h7p*lW^B(Ui zBSWcl16?#oIzuZRxq=WmQqFJsAt!wg?Yb+dEg*ZUAU37YB_UCozMOT(YUr@UJFfn< zdA71x$wK|{Umx!Erq33;i%O$DwK)pYEza4pc^|UeCtP(_(1%|X7$(J5JnoKal{ddO z(W2B`9lWB?CuHEmw4QD9ES%VH|6`Abwa285W{M%w)s@Q`F)~q{*7k6H8oe%k_G_#? zuRivdFG-&>Hzi+w98}V5XlC;L*xR9?#yWc)VSk3t@06}x5MC$Fvz_b5IGADWyex3I z1(gES61NTyrn0-diX@WX?iV!}CnbNe`&nK0#n0x%d>gxJ4u$k(l`i)xPaK4E)==L9 zH0P>4g1S*Nru(ZlD4n0-)WnXy^Y8=Ig+7Aq>`yAamm=G=?{`@F*hT3zOz2`QNV$&0 ztv>te2fwAl!wkf5?rVENe|ZdU1na38$@h$7iB%Tc%?8A`YA#fSDyGen`Jl{{W3S~P z3Q62E>9W&71QT7Snw4WECXwRP6=bo4sj{Av%;@A}*=SCwv-eOtZ3i)rLr2!$uI1`5 z$4>8Yc}hv{-?0iij*nKWvArqX9J*=!ujLh7MQ)VmfL;JRySM_qK@QF!;|#oS^A7tE zi>V$7jmnV?YKFWNaix0CWyPaE+ZqcW)O<=lrMP#N?o2FJFQIeVH3dC1>ru#HkY0JO z_o>v9*4bwBBbA7_O^WqXuf3Feqi6j+26ysCp7r?R@AUqZ>dtgu;`Z2XB}?|ZAFHI- zdil#xFbw>Tyoi@*o)TOwwF@v;q4+C`aqWKlnQB(AA+tn^ci3+3#$sCK5tsN{tx?0i zbNs=e;*w7*%}J+)JIqTg#1;?w94s+=ITGh8(JN*_oU8uRh2O7gzGVF|pw24$7MtV8 z0|kHeG+OvqYo=txEa&%EBHuYAw{Xt(doA{#ak;_CTiig$3vAYz z-90UmN_QeNx+CG0&=#DtB9`me0i=MY^hqT)QN3l`EN2xc|Q^-qW;b)1Qe(cQ7;B(szjD)JQe)KnJ zoM*0}mu`P-q20r^i9kAO&DFx<6bdimQM91GlY42cx;~dnjHM8AXUE#e9ybqbZc>n} zRAG4;S4lIQo|*fgvY+MqH!M5d6lsl%QlCS8Zke8W>vFq($+Y>TJG!#f7w7lB|G+`- zO3iSuDXIEX>9@nG+cxitOblT>;WgLt3V>Mfq#mqEh_UJjlJQBiYeM$KCJS)=P01HOidq0I4L2H!Wu zU0zp-koFNtW)4(6Zi;gZXTNRz8rx@jS;N`l>qLm&?U#`Nv|L&rd{N`Gs$YXm7Yt6>#zGebT+qmMd++?i*m$N#7#2vCTBkp?d8U`_(NI zx(Td{rv-;LJN(~#mv78-sz#q_*ImcoIXu3=y=qw2!FPH_#HRE8t>5tV_aCe;wO969 z4wv7%cQ0Jb%Z=pkcm-!@aoE%70*9>Luo#A8AJ&+c=RR&Npt(hI!(YcOZWg9-awmIqyLkQd6;6 zyRM&!TxFw+)e={#2rK_onvp|i$cB2%3O1|L&k|Cmg<(EfpVRAi-O`w5t~~axH``)=O$D zI#*sgNuNO;Eajd3TOIqVr<$8HzuZ4;*i~)s?ma2Cd891VSdv=zVoFiZi;an-&#=}? z+h_)Rw6Wb_$ve&3P*5=;@5H-dXIU7RVC=FTC8>WK-<77-0tLzE{7ZA{0>bBpHtLqvN^Vh>X{ zb|0LoOm5=k_L5`4?~o5VZ>%P&f)Jq~*6l}TIOT1K=IX^GDEzo<*+b4RvOaY}`e644 zZ^|8k@tdr+ySTCPv5f4Z>fSE9J9@W``AMsNc`q8TpYYV~ZwpG2#lCAY(q&HmZJY7M zZb#nag>$3B%X^M$;bpz?Tk)o9ZXv^!e+_RdK2k|3A(6n_*gCs)n@JZ!?*O^s^$5a4d0U1>D;V4 zM#|VUdda%twAkJEBdgTi_9_%q(~k^GM>eDcTAi9twBJ6N5o+<$WHzH3bW8{kNcko2m)s%d5VOqZ+r$3&$AtwKialWY-DKg5{2C-fI1^odz=o<`3Ak#U=>3AZ znrMX17DVMgPh8r!PcjdR`hIn&a67Pn? z6OwJ`o<1vkdcQShRt&y6!3=aec@dNnoUtBH4#k_qve6w~G7LxUe%@T;bF^8dl>MJ& z(R8lhpHagv!rL>guT#?OCrsp*XO=tsx|I{Nd%S!4tlY{os0>;*;T`F8ni*_XS_!42 zN%o>yJU*qrE``rmj}oIxGt3CSnGf*kYm&n+v*4u4;eS{kkHcikwdd|C(Wrdba?RP` zX&~|b<)&T#!<3#yagyZjNrY5w+pwkK0Ic$7po$uQ@ zuP>M5dA1{wGJPfg1-0gEE>D~_l0aBk^VCcduyW>xQa#a}G>%Q?;++UHS=0* z1-C_>g=-;Ah0iv>KL?Y_P`g?ZCTe5Kav=u=hJ@)Vrg=eeXTK!sBW=KcU^XicPRP!oU z$d5JBe%DTXXhE9vzBBOtx{rJ9Sj4jKvrFYK#pxU;Hv3R(6HO%Ziy&Ma{z!vhyU?Sq z$9EI@?1D3v3_<6nfl02E!v_3;u31%WGV$vpl|14#nT*C$wePx~7oU!)e4_IiuLJLr z`%Tn-obN0deQD?DUtKScV`!np|Exli4E|b6d<#=2Yy3Sa;$5Z6;Mpss& znfxL+FFAd!cI3Gq?L^p-`!3 zXQBo60R+ zUg|RFWagjdvrwD*V3^y$RcSy`x!iMldG^uO$Yv4F#I0vUuvpsld%@l-{gmC!j5G6* zqHF|SpAX~BSKA7oZS;{#zULyLfcdhZ<)H1DfY5Ucj7+-gbJ+>yxduz*%bc6>sHv95D3Bu8yF@OPBbs9oO$I z$>wZa(MyoFn?0#OrWi}jGTMN6xr9OYs!kIa-A_d~>_>I=GdrO|UH?)&rvxm@U9>a% z6T*!@Wnmg`Kt}Z}2g?>=zrx#4Q5|U|^oOy>?YN{fZf_3rQIZs4Zyvdmw_51*3TXI@ z=(|q1%QCt0H`OnvEFXCi=rj??mOBsxXNGyIOFnM}l54f&)ks&p_CJ)qQO%Kdo@>$W z$T_5@(`C=Jt(utjz}I58uEkb7eQuO~K(N2i6JlDz^C{_vV%BTMgjy(C{r~L(=ncRn z;Fq|Nc=t)F(ShNs_LZHE!1Is99Waz1G`}foOg+A^f2rvEmG0>7`-c+<7L<4GmkQT3U}umuI?|%tg|utpD&4?VUY<3q0f?a{%IQMJ;rRn zw$~UoTF=!BQ^GI2QfU_4(y?a>vOD|gm(*_UZV+r4@gbBteHx9RGQ(4uFAJ--f)H`2 zzVJuW{_ngH&h(3-X3a_ijQgv-R`PfKZb~C_dDit`9(9wdpDNNgk?c?_4u<-;f){yN z2uZiQJnAjP(;JMGVI|0UrTz{29}HGZ5fR@XUdLKV!H{tD4t<@kKle%BtY?w0fhLH5 zO3G_G-TE@CkZn|A=Yn`xiXyK--GCHiz3@BRleF30mXqh9}iC8f+U5; zPtksQD%#E`lQ3A?&jpK}4$HVu=i|cUn=JNr0PxPn?=o6$vho^nA;Yi5>n=pq>8(RB zNe9jHoHFYDSgB=)c#*mjzneJNWU&++Z=0&Z85RVDkNE_-;Qx@pogotDsSEE8DH3AV z&xhkWxT7yidwl8a%nz9m%X502d^+8}eTEXnq1|YCJ<9f)HyguSz`T3MkCkN?scA$v z@V0z$m2o;eq{MZxCsQFJMevWIuW~T%{88u6DY)91cj&d<#v&y(C68{^D}9X=BzU<_ z-L%Vt#5D6hN7RiNS@)`vq2Bt1D1!7_2fp0CG1|907-*7-98Dt=8Hw2B1Skhr`S7Ef zk9U_j4(r{cR)cx_QkqS4I9o&#qwx7^oL5#D0$nQ#a%*d&iDhG`2RjJu^7-!zW3*JF z$Jx%V5zFlgqkIPFtmmzwg0yy-qb8<&;u%|Cd`T`wPmZ>O|7M~xQ)0~nOFTVzat~uo zS-KmL{u^8d#6#Rxcmx8c-R+1kz7CesOHo6Le*KWQr4hqVwY zg06$deuU;%Jg?LWa@8keEm5T%;V?{o6Ky9q+g)tWj^73<59iLDdb;Q_zUFYn`hyl> z^lNuJE}{YB%uG(b1pRTKGUy>FD)D8a)qvzl4usQM-yjQjg&mV>5GbbJF)C62l{@;} z=(rgiO6y*CP{Qu%eZcI|M!kgUo!+ZcKT~=7qij6e3oeDDmgg{Y5+DA76FO!+vdg9+ zvq|)PBuxr^@#Ma*Gq)-#==J0sdx~iSKSd0-3}3t18zIm0ci%Wu(@z`sfvgH`w$c#U z5L%{`>*cK;MZUb;@wmlhaXqTUF2YRy?CC0d@$m~KD>NJ63fJF1EgibR?l zkcmbM=IIPrZo1Hn2e^RxyT_7F`I!U=o`kedg+2Z^S^4(Q1}4Iw?>7PHCL$=$Z;l65 z5cQeN0ZK_K)D-8xj1BjD&wfAZw|0nhe%) zcbniWoXJw|y=>E4=1ZI<{0;nVW-XG|&pulv8dqLCOsOv}XfVGtHuBB2de&sqO(uO5 zck8q%I*k#1GP08xdUY0ra}vCu)O}I?y+9b*B`or!kE&WSyW^hZm5}UbRPskB)UTGk1gr*MKoc=`TBF|1oldR7U!J?AFb&x~K4>+Q(a-KU(0Oa<$4k~s zQIq4iEKvwp)kN@|l^FBplU7aud8TM1Qnc+YdUtpCoH>p8=r^?Ea+g^ZRn;7@j1wCh z``IqphapfUdb#~WzXpV zukZO0sCyD|-|mhm$EB$9ZO<$4$j)gJgSia((rO41j5lgAZcwFOy`MEpt? z=cHl?My2p+|dcXQGz1|6nV4q;lYq65I7cZF=6@Hi5?y z+QD!B+|nPaEnx=6SWX{qo%>P05?hIfLf?;3!ibs)Mk?oG*!ifNEL*-}=Hv+w({FGf z3DQdC4`XdU^%n?Ui|;w99XM3Q?t z+wI95)p``2-K-q{3#fq2etVh#YJ$AJ7y74XB9R*h9JAekM4J>95KaArzA7(Mc>mtm z4$TpVYyXA<)D=<+2mW{a@Q7$K>cKRXe}jO+D#E(&`G{3T7?n&kC?Kro8uds^HJpu_ zjm3OMV;$n>z{}qHBD8Zt&!gVdiX}YaDWR%U#MT(m)2Y>q9B^iNb7T0~4;H@(mntx( zeoD676)JcGs=tD?n=B!|=2Coc8;UW00B;8z)Z{Edx|HL)P*T!a3j#5Igd^V>P%Vyc zNxW&Fc3%4)vnV`_RS~{{{n?7KT>7cd!qoO;rEUFqxh0m39u%U8U@fzwa_NFqFyrmM6tdXu$AwVWymG=p4=#*SM6uNF_F(}R0O|?N3ks!C zKfANQFG=wPjOzqV`7c);_ZSGyoc zDCKa;fjiWEiM#~q5=0kC%niuZ819+qJ^Q3AD)Xu6WYhQ{g$TevoO0KM6yWPnxp4SO zKd_qkOr0m(WIb|9J&gGd*Q!X^J}6cVE_|huspeL$Ge8clnATxD2<%^fi^wozg9>O~ zaTOKZCdvAjCcY&Vrhu17S{)djNYDo_eM!0uDWQIZC10>t$12}N?)T#j7`o7gzF7W9 z*W5G3uZ=U_B+P`$Yrlecj3zUK^i7R$HNjcs(ykIb>%~Gbb<#Ia$+vryPsyH#32JfR zS~cSTuB9dcTyuiNq7Meg^9c)F=z;aT&-tiJbSAp!pyI^>`?lEszO4!>_nD{^gZ2I8 z0Wz3Ke#UBVzeZb7TPtS4G%B8*XXGDcZ3ZSRez@$Z>~Q5)>G0Yup&(Qpqd zv%kM+!X8kUunP=*B<`Ux5d^aE}MM`PRFavr{y(t8Px=Qz?;K`cD zf^VT^$zMoxo0Wg7)bh{XIpy|du-`nHV^Cb|^g@T(m7URGM?rz~@2JjA)WlZjMl;J<=}u_jt?X=u8^tGpBE-v=64s%rx@ zX%x3NVFyfN9uqv%#VQrm55Nn|BS6B86rvAV05;R4EXexy$=uS6ei9W(-VH1N`k~E*-CEePYg}tfJUf03lkj=DwX&t$$LP*HnHax;l1}n# z8-ML#PUUb096Ez>(=>nsvJzyYeE}l;O{mv%A+piIELa1s|tBx}WHXhrdP}9g5S`l>wsA^>O)&6v8F7>DQQF6(5 zpK56<2#IQ5puA#G#z}L#{n8{Z9mE{Q4_{Q&=5x#O?$4$Fjn4Nn4w?U!>5KfXFGjjw zo%4;o^+E(&wfpRxf|C7DIb1KXb_ert#58~MqbCG_3BpJ0( z+&#X*(XR0(R-xQ&J1OQZgvCCzk`TosAZ~zqeN5=~GjQG?5U4K_T*LRjpM)$9`%yr> z`3bf0!`0EE`Gb8z6mT5` z%YjxLBYt(`rHcX1f-D>!157*T+)`2G`cPe^RN~2bE@xEW!93Ev@~v$>K-8MJpQm5oK89qyql~L+YUX%2Ec$QqNnJmKN7XW<5|~^Q^{z4@k2mS3$;a>CU*6YAYiS&sl!yuPtDyK%i;}JFHQCT zEjEn(^XoMF0?JGs-6wtYQ?ORIM(ew;KY#Ddce8BefB`COYOUD~B7UD6wu|9xHL4n>+DB1A+W_*%FSTFB5h(N>R9?P`?y87|zvM6CBP}NtX81@>PI=kM5_(`}qryW|_D1kiYm* zz=KZYbKb<$21(0Y9!50PhfruLrNs5980ZCIssk=gOMkF39@cxu6fnQVC=2T{*!v?PkJY-!i`NKUN>fUh{4-tLc|a}XNFO=CO2%9 zUz+e}nij)I-`^eRaN7>2R-gf>6*}I=2_7ztU_6l9bA1WxCKmoxFDZMX&j9EpAAd*( zT(L&Ra#Z49L)g5WnR`kOMl}-66vy2aaXszen;y;An5Ry>pe@jzRitS3@T4BWxs(b- z`9i>mJYx9xB2yG{{B@>o>A79RoEV)!r%WKHyyPH?zYZS!o8~X%BmyodUpUo7@65-5DpVm>jIZ|?!!-PrWO^wcC=Y}Niliz zNxE}!mOQt_-KZ|t^$7T^sU5||^jTC>GOM9kJ1%bm36}-HhP-RXdkVO)*2u4{e;r#P z&qs5!L)bUjTu-W#-Tne%xLNeXw4c_yWoIUqv2eXb)AR$&-tJ=$F0F;1ly6-c)=O)xBPt z&>1oT8(`sx4UQ$YvW>t?Zpav-M#4~F1X)w}P$%GG{92~mK02pW5Z$fJUfF6@<9n*7bFwtS7 z7xicAvfPi7={{!let9Ld8j?cE7JQWecK3CI z0!@F)s(<7(il16nIVBpnBt50ei7d!6O}MsJa+V-$s4he*L8kota6$LUPPPFU7cIAS z!Zri!44S2tOc?CUMwwDpLbM<5O_p3@9r@?KINtG+G6LSQa-zpNU(_y57zo$Vqe_vw!F+dLUCy0&>WgQI znoIE27B0>xqql=B z%N>}DoO@V{F;%L^<{rVjRF_0SG(`^vSTf}K>GvHVYvAG%cvKpHJnM@f$Wbx?m<8`U zL`;(`>Y0v~w<#Qc%Po*Sr1g9R8_69u`!P<(h|s?7ok?@^{znu$gd>J-3oWB?c(`#q zWAhslG#-DmnxY!8F_*0ym)r||Yn=0r_l?-Y7)G{ge7HFHJw6r$2;@>C0&e7cwra`0 zggT$g`el|=8dH6o6c znw>sxGbC0jFk@VRpdwEgVj@whqc-{#8$MJlr&Yv8!RW;YJI`{wPncvvC}8l|yhnT% z&p@PIhma_$X9UIL{w8b?Qd`j%$aegSo8G$V9st~9J@zRc84&+u0#<}Ez%$R}+2k_d z$5tYO#uL5NoMHHG#}3P%VtscI(Z`lK#g0mG;M^}nlj*FL3G2cBXC6@t zbvX$XIyks?{4??}@PZ7+9J%UCxI~8r`_*8TJv)2T(iDaXM0kOQxfS^w$ng@tLL@43 z*E0D|bkVcta($L&dR*A1Z?FxTG5;{AMSGg8St}3)Rf!c4T32%Q|R-@5_e(*ADn)Lo7kJLkH=XJk!O!QT&t9 zFQJ@LUyc(4!2R-v5ifhDVfW$Qn99_^RzicucdJ^$BFA^Lu!#g%P@x`%zw9D~ok8B8 zD!|8fAb>hISG)a;r5Hw|HhlN0_V_6Xe>=QCn*veCl^F4sO12y9(KT9XbWD3q$Y7-k zSWnpm@+qdB$}sldZ~n+U!@D)+|70lNLIcn2NKH6O*zXUwgX5iz8#n6VkyH;A;EETI#6E+I%<#4)6kgem4AljkzC!V@j@# z5ZTC_#AcLW7}MxM$31j{XIq{fVA61uWkwuuMcHq7`{2D*z(j4n6#Ig=6(T;-29?Lb z@YDMd?0kBdY0IYL?8h*|%or`97=xK|4fAP-%hXm!hba3TJ>P6{Hpiw;TIU-lQ9Keh zooXFLFZV%zg0Pi)$Z(>aD+7{P--a(y3Mo+lCys?S6?XX6wLjQBEG{7tz?tr(vy6;I z_63Gm@6KeVh871bZI%1JJfrlpX5v_$=Fyo2NMU8GlTKSZ;voi_1Ug%{6WK^dFbqX% z1mFSSgcMaL-v>J4z{{>dR3z2W{$W+Ygo1al5a*~-q*wtv%&1hrQyc6jc#}1mPzT11 z3Bg1?5_~|lWZ?=`9gij#Sro;1)vQCk#%~FGKzNmqEVqBYH@es!DLXlH6o`1Qj`>C4 zNCnYD&!l44-ExzID|nD2K`4k(!at7NwCHQe8^kgyr5|d{KS`Cz$b`M#Hm}ti401ZX zfeL=U1CVnp#cnzVFhl!=u}HD+!Ocu@`53l;qtJbY(*I+JWFTB>ydE2nl5~#!AM(JS z<5B-B>CeM<;D$PfSL5?O(#ofF(o~ozX5bFznSDhn6+oQJ2{34ud~Si085ft^%x+Ys zldwIFkn>L5aQZS!;E)V)Umd1*`)mw-di-HINptN&8D9>>196wc5f_~&sb%5eIPh*% zeo0Go;FW2gX7=1P*TmrRwUkpZL3aeC!%7#}Bm5j(1c4#FteV(-uexYIL68pXW!L_} zj^K%eNSG6jVP1};!kmz%S*;-n#{meI!mE>7M9q$=)7&S|^?uKBeGwZm6L4P!okv`K zz7kUpP>Kfu@DPv{gB`*Z4Yrq=t|Uw-wwq0T(!aksGx?ocrL~n5EA0M7W$nAK2MRp@ z`K#dEM4cbMbAzq{MWN5*L~S$t9s~R%G{9mp^Xc(mmYPAhpZx!<8CdEE@OOo{|6|QB z^65!oXR;{vvua@0%=D?)fSGDd_Gw4>9X5X#iHHwhHludJO3${~)!Gt%Ddg#p{&2bW zwOeHtvU$vL0Kr(M_759wy}BiCygFAeqm#$-~m}(ZNr`XPKU=`_W1GJQ~Zw` zTZ6PQ=j*^398RuWyQp(aQsy~}goz2dD_k$${N&Z_HNEn@bSH-q@uzTm3YVyw_fW!Y|3_uh=~#z2tN^WRpLbX!=OdPLcR*m zTcM9cPEJmzS@BJ-zaG$#oe1b>XJ^>2zDFVm{Dc-LW{vhMK8!t?cJku@MzgA>0r5=K z8z=8UM+(@EhIvx|mJKY*eebWcUF2zW*~9Ja>l4qB2>M>l#|?TKCa1u+Ux<3kzJ%5w zGY^)U0^76nbhES>eb=e}pr|pV6Pfy}+)DRm@9;=~?ANR!o0)rM%w_*B=cWqn@8@xTfAs8i<^BZsVqNbP` zo2=01>$-D2zF8UO`C9D6TlvlO$birFjp~*H`ttjBcoPw8xJ6B<<=xNX>k#)!ljql|1n zKdVcqvaA$KDU#E`W)sp+PD`_*Ztx~27xKVPVl^BUz@cx<9oe8hP)zkkmMJeq^JnT_ z=Qs`^F1H@b5ZsA-x{#FicrAw-@DQV`4rsHbNbnyLyQ?J1Cv(=135Ey2`zG&ROx}F+ z_XFMjDXk?-fDIL`qz~l)-$QPYI-&sdy+4)t%VE;OQck%|CmQ&Q4M_E|L!Q$CD5L&2 zkNdqin{Pi?Z1lN8Q7fN?3x29fZYu$@XqoG>z%5ScP*eNWM#Nv5@v!3Op_UGn<}n$* z{V7@(lWeI9H+NG9HHpplqwc^}h51Zj(DMV?@}FCrA!~ngNOcIKZ8qoXtqMNryW1HA z-_!rXj$R<$kv8ym^SZY_&(7af=j@A^1Zr!R6oB` zk}|#Ly{4TIe9*4?+W{4gP+eWaP($yt4Jk*3_hxEh8yiv)+N|fV!h+$IxnGS_9q9A5 zLtZ}I&tm&o_h0*;yRjN2nlEceX@5{ z_#^XleY!5W0YOVs01du)aTvQL4{BIKVwp_7cxW4s5yA_i^tJ!4fd@&RYBS5jitK5r z4I?CXvNccg9sI#}7}dP!G+&0%W=ayellodu|~UQK-T z5SXPg2JOYe3KQ0h>dW;~$vn9r1OdPPH( z@_Mgt!rL}Q-^j#}na;{S1hPxB>NP%F_iQzdP>+zECNH4r7D7%)mgD_q?z-fi9kV`X ze&cpy^+2Z8v_pC27gEjI zZ3F?KYv0k>!uJ8eeplD~E8B)^nGys>&5AFN$X7KoLY*yYBGmaKk9DGv#N)x9u{ZWB zd^f8C=~Vs&2#zdLO{S#E*@ViI%~sB=I|tF#b~Z{c_ZlGi}2BwJ*6S=DFb zyQ0jv>2-JL;aS!X9_T|c)*JF)|9x+@{gkl!m(Vamx84TMhhKN8iQQa)2gbQ*bu{!F zm(6AmfzA;LJ&fC&gYktL-)+WreD_Sn_g4G7R!PQwh{ zjc0)zKLn;qO;1)dxwhVh!ao{@eNkMH>Xa$J=$c*5OSRUOO!m8=#rkL}&m0+jwD@zM zj!-|OZIk?P^)Vq`sosi;9fMT`k+=J0`ynEE-Qd8&?Nbj@tc%165&!Li=S+XVFT>Y~ zJ?Rwo>ZVBo!B}>N9Vna-pRCI^!$pZh{$`JU3|={>yHiXa5fUmLD(CqoGfeBv%^UbK z`_;>Egj&q)YTw^)@skNoltH1Gqx(r!e0J>S#l0zm%vG7`*f`~j=e$4JlUTP;W@)~y z2ONyV7!5#fR zC?~uaL1O~=fmmrfNZ)EyksGS^e?aH1(F<|sA2sdOjFL24%c)SU-*CCZ75)C}TC`k^ zywvO}r?)5+qq1_c)I_BtmWz&VuPrh%N=(XNsf@|Ghr){0fqq%C<=$8Ytv3uf)c%|! ztDSLRRi?x+vK~xQwssvvOH$Uw-52ra+Fyeke!h7Jf3d=HzFvb&$m5L;3x4SYQT5J5 zOI1TT((F$6$zc@q0Sl(&%X(}@lO7Wt!3UpkfCChV@B zl5~e%Uy(wgRVqjB$LDCod9ScjwV@%2B?#h-K?do_nmRM`NNF#$2~?*o5MDf&Uc z1DgI11?W4IzYlQO$fZye)ON1?KC8kpDnMnZw*n~A+`3Fv%{CxBU7y@(6n7vheocWd zVT2a2PL(X_odO*2=Mcuon=EHh^XQomK%0n|KjaPSXsNb~X-&f}z$88&nl<8sd?a5? z!DM{Zr$>=zy=ijK)A_^QB=uhEi?AF%tJ!!uDTrvSjRLyhlV0Swv6W0)i*m` z%Zu{-mL({{40LatcUZd(gg6_DnAD3|6hEF6{U_}F37Ko}fxVa8J22`b1dBKvO;_#J zMuGPrv$JO8ENe#atFCeug4(|%qD7>KHz)Qy6=wP({xSh|W4ZK!#^u>QNpYMqZrzGm zT;@rgmA+iTwl^m2rU9k9gn8bmN!o>}jC<0gB7V978y*kYSFt~g;Y!h#m6Pen2Pzyz zutj@^)qo%(z&rr=<$X}Ns^ol?7e29Jg3){=hk3U-8<6HDk+T=iZVMNPW!|`&7clWM zSsz@M%!_vAWH3?`7@LQdXhlS!4)pIJ9?ZA-l07ig`WyPV>U3ZzwEQ)h&XfCHEU{$5 za)J0Et+NxYdWPyF2-o*u)zg1Zk-?Z z(ZVup*2iGeK3sbOO_99ZhY!JVK|VR}>%O+M@`H~>eHgSRI%@yEwp;Rjv2LuL zV-u!SBNQ{4*|m(UI6T2AD>k$wWgVC8+Sa`mUs0SS%URa5!t-Lt%q8k*1#AH`AIry+ zAx!KuSIlbrVfmn#+3q*RwaGU$vtNJB&+vrs*!E1()}Yzt*UH9+5mB&yy3zB3266i* z!q;g*zB*=ahJh2z2jAql{K*cJU$!eGFZzc1CF<9f#>)<953iB2P;(Ak>AM1HZ(4$B zqiylz$S9-Vmf?ML-0=^P8cI>m|5S(08B?7mJ$S4&KGi_xaY1G)1`N`2=DZiRbXgagp%c6a){>r?C_2N1A=p4BSa~r>x&5l;~k$+CY=@FNyAWThE`a$G3XO zbH*Gp0?z+*m4CcES|vm?gxVuVC=9)HG!byTM=MIHh~3;P`Ysvd@B_PaO0E4{E#5+# zFrovA@Ex?DEU$XXpv%L*NKcBOr7idY!*+$}g`xaHH_z{6DF1}bBpI^-{Htvcjoh9cjS~y7q!H$nUX5oulI_`S1)`v2;1Ao>o7ehEC(E@d+G$C2h6v-`1k9=9=YP{z-Pz>@AZ+2rkwHq@(Z@VT zc(p$JqIS%!{A;|syk7iVkV|KczSJJUdGCeIkkf(dld1#%j}w z7tDpf;a{7cZ1>6IMBv+mvf52;3ZhkDu(H})K#$qnw4O~0pf#%7PZ6@F!i!w~M!+%e z@O!g6rOP|Ei%D2+lSoQ}*MiQ#%?sAK5d59!MW%}=rV?!>iUm-Fgu=XucI6b+JN;|d1W z`EQYn`1VKid`mRfB8<$lrjr3XpV8LL>bmUHqBkh*`2~nHZIh$XquV_ zsq|rE+Y2vu($=mY7ZI6*cfp>JOwXSvn?M|2BY+*URA(k(lxT@LK)ZP)*S3R!r)M?$ z;tNc=m9(gGtCx|Kl0tVy%TS-^uuB_J+F~?A#&A3CjHq*(M=k1=kiyl-2-Ck0-&?I_ z<(yzaQS7dXBP>0Peu=U%wp?GG;Y=#83)*jpnLVyHxcc6-s-kW?K5LVIjhhWh~C8&I*W{>0j^CSdJZHII5ssk1M#7>%3_ZE2`rA4A?f z>Ksq+Zrb#ovp3Ab_e^Pc?$d7f`<=4m=c`?oE(A&L8B&+~$T5s%#?2h6UKGg5pqed8 zKI5!;2QXy6nCGkct+ua48ZL=uKYjnfqpYOuy**5*8?lfPQwfnnvuI%)eg}w$`ao#_ z@EKv+)K3Yr4_IR12k-MOQ=t?D1E-|r`%);-#h*PPfExPyixX&^6d~iWH{?W=jnPoT z%6H2drTqW+!`Kg0Pqb0!FbxieEH&uTVa*}~sf7sjC)^3``mUm@ZPx`cyfZ{U^7ApjaX*bi=3{7v? z55LqRXeeJNFj`(a?3UfFMA&KjCreUWjuI__C{*lbt;KWv`kuObyG`Vnrum3;TC<@k zCVU_c&-#Kgw{%m7*-OlVtnHvwC}7&LX0NNb-BLsDCuI)%F*ph(%Ce6eb3bM>GU6RY7cpDGnNKcO1XD? znat*+PLY|LbX!-lmdO@tGTTDXtP$`k&>{D-yK?Z(UzgN=Jj-mn!>sAtHqBqT4gUP; zc?LJfc9`u0L(i+tne#dp^_gkQqN2}#cX8CB{{}XmM-P<5?Ta#~blkeKsxc-b8(3;Y z(n~|qe6ic_6$n^g4Kb~{z^%M)gu~2i#dusTLD5em2sx|4?CD6q94z_S-8EkIUU>$w znW_JP=(p!O7!HC}w0f;q&%A1*0^sa5zr>RGmOXe&{$(duc8ri+vHlek)!by4d>oQ% z?D}Zl$%XMtK8BUIDy0_gY30;yO_$Xy#`EXd#{w1u6;{C_~x-3&V{PN(Y^@R!HZTYM)9YgTHfIT)U25Wl8Q*X{J9@T@W(gmo|O1XI7nWB zHtO|rpXh)N$=xcr4$0r9Cqa{8!+oM&sAAL~flR*&vMX%V6Zb1tm$mP*UYuRq8PjDqz@E)*In^)Se74)~IVBT?7=thPd<>g=&YxL`FR;RF=fgPZ zpI=`&$<=UMDYLUB@Y|&SV6$yjyH8TOY1hmjyttmD(Vu@z`pbqUL=H(lukXs5zc+YO zg7k+T7eg^iB=ONDqwTY!o9|OPXhjQ5fVKrY5XnZ#NrO`TnS2x3hG|-X>r=AF2L9%k zk&VB|_?$Yt^&)_;Aq5p6IIR%|$kx(-@}2T$djLvLGy-C{pE?hwmC?gG1B%1nB~_0= zJa(W3L(#N#vfuike`$nO%5~m@HG(HbKo7vAIbcAgRi@nfOZP{k^82Oln5+hw+VyMi zzW_~j?~X|O)9}X2mja;jmt2iDCZ4PcILFtYmql$}QCO-9u#MZ4=0{_oFP*Vv)!KIZ zm|ml@#TGT-!mM0Tkh1lcBID!tgm*$6@KS@Jx2N#ToEPYW@0W_C59mqKnFCmufA6C* ziz|y;)9nV&sX6py4do&C%jOZo2h+VCATU&Mx{!@qvubLdqWZ=4jnhon+XC6SDH3g; z6gu+Jvx)M0f-nRAbz)5cu0a`zT(jeC?H*O>b-@9()?BEJ=iR%)Z!Kvqk;@l6a|(A^ z0ectDM;r7>6YFjE+L6X;c5CR0N9rYbSlnr?NWrpNJpwn}{mWu6lu&KxGQQMBojDCe z5+Z~0_^md_*`u@IE4zsh|`1no^2>oG8a3ca|!KKz5R9aGvb#O z)lX4(wasFbm23wK?2jg?LuR!?$HMa0Z8Z)yCOpzMHxbti>_bbxoMXBKNoBzH*Xkquh~>97!tZM4dG@4r>j!q#NJ(k`+)MkUcLJmkMp06z6=sQV zG{>@xRul0)rgJKywkld!dLMhh>j`}xKjb}x^fZUzXWx)6UZi(;u;&buxpg99@nF2} z_#kvT_2cpWa1FCxP3i=fuBeC!NcR!bd^F#ChVr#*UK5A0Y%4r(lRoYF?`uHm{a&6I z2f;mgw3bC0H<&e6-2}Rk$SvEag&jZkuHV*(3<_rr@#tPgH@7t!kuPgs3K?;E=CgYL zmFH`{yuMMKyYvo(zQU{CHQq#~xZ>+Ci?Lq8V0#DeCEt1f5F{7gczW0H9m=9XAmo~@ zRyH5rmRE?eiZBIk$2kTSG%fFZ^*VU{j>GRUFm7?JJ<|__^;f622 zrQ<+1L-|fJfSgBHwIZarz*1)iV(q49W;7K+pN$|@&;f8e0?@|C07hIG-(w5y$$TmI zIGWK~f890+rqO6e3Il5LZ_p1w>QhBO+oLuKoIF)G27GQ4Ab|h-+>0mk^n%}wZ~J;dnhe9yXOsyg&pB6 z(aNd8#3Pe3m-T9q`J2JK%~F2}m|Po4}J4Q6d+qZKw^ zYy`7~CpY-^q)}_#1ZvfPz9+u=JC=3VDM?Y!oJ?CRKux1#nsb>r#i-eilDWKn-DtL~ zy&@c-u5v#8mXDdvt}05;xVhO!Az?Te?}DFK{T=akl}nU{Dq&cq;WO{y#n{0Ef^1d4 z4>id!<5@R~3tL7&_2_|?=k}$HOSAM-3ws4HV?`FkpLqBn+H>uj+ z+)-IO%=cDe5<;%!el4ldRgIsxL1&FPq$|0%+!To|H-r4}*h)bs23FGPmts*p|LTvca7j&kIZv&izs_xE=Mu;f3Z!YDU5vV>(Gx=pD z>v5<~eKd=ljLv*}dQaM6Li>4A-GLO6F|GQ;Li@()Tw{BJTz@ieZBN)Y>UgaQNI8rR zsK%*<2|z`Qd1Nqh7f|Jd(Wh@fb-dJyXmUTmDaHKM?0;%-*R@uD2+4OW z#F|InJhsz1CuYjxkLwe9SIR`s|FtoQT25PYGjKb=l%T(V`G+w=BoQ~--SupfVN%IZ zs-=Z~=pg5-C3(k!?0_jHCC_j)RhM58PIUT~x+O(b)EpwEY685@QIWGr-yKH+#;ZRd zI;TcfBq%#<7E5mCj<+~l{`@*pP3d)!S;_?2B=YLZP$O!7D;>u0acMx}wi>=&>H89C z4KmNe?ODzKaN@<6j+kfdc-Zv(_!2#(<+IRkoS>ocT%B7qwxKuiJ-Sqs}PS}q)5*?9obqJex!IS-{N^m;+^2b zjk;pb@Odo|vQra@3vsLX?T8kc!vn_jKSuxg+dRvfhN$pG@`D~K%a*Mq)1S`g{p#&r zRIQZ|@Yy#oIpo*@mQ6mpuYSW}lYL?o*1~uU&z| z^NuDHPh4-&MrXW5kLb`c6forMr;!C{P&4A`9mU;^uOFqq{T&%{36J>c#2vZM)|z^@ zdCz8Nq`S?7JNY}&sVSwODT!&tIfzw>|{I9c{r|6ep*^(>E};f?(CkG0Jk0k)JZ!KP>0 ze%PG6AAbCQw7peWUCk0U7~BH{cL>29f;$8V4k5VfCczzo2X}%KJOp=l3oZc?g1fs1 z3pTya`M)n`W-jJxo@ZaIi`~7tYgKnwSJnHLfdoQVxOT4AC2(w>m`JoV^J{Z&L)&?re3uBSC_&4U5;Sr^4SN;9a(we`?yL|NF{&uZAzQ zD2Afa+R`7Nx~Ex6oQ6M{d6$*n;d>6LqSkQ2?m`CI800$oC|31^B2hBrDUpcdEVroo zG&3#M^KS2qIoqku$bH{W_l3eB4q+l*Q8%B|&Z06lrNji^NZ;Cv?M~Pix$@odsx8~zUiR-49bni!m5>rk2++u%~=1R5~FJ6SQ9_!DB z^z^*Sr|;uQTzi>! ze=B;Ggf$fa>~bNqu0+1AY7 z=*N#`rU{TQtbonYPWKR^r_w-RR5H!}3IK{u1>*Kc@zQ}y=AX7uXt&$&8(|P)yYa_F{NGuP0Q(?!wveCcoWi92~U#Cv_JL0sZdg$HHE8Zk%tt4B~ODojGwKasS zg(o0hTeDZpyT`1N#v;QCxTt3E-q!;9hUDrduE-L43#ASa79rWI_0i>1VNdi%v>2Px zwol!1niB34eU+CQla1t3uU^JtT1ekxA;s_UP_BI$zsnl;ot*GRcF@reF1Kv)3)t~y z19*i$&G-j`N$44(`j}ci0a)93`i$~}qWfc91Op4|oLisdn6zI8YG2dpdpv29fbA(B z$2iqozj|Wp%#Ayj{H%P3+skz=-KUh$?Q&&McO>q%h8*vSq2-~UZ3fiabSHGq9wBI@ zm1#(THmyA<@@#?McbQGvzUZqatzp{@vJ#p`oe&~B+j{xc6nDT%3VXl1N;ZH*u^uA2 z`I2gsqHZu@um7ws;|>mSa$a6X$T`$qt|aK==s#w-%ap-HaKASF)vvgcd9o>7)^WvWJ`=z>X)$+$7mpO@;F}i??4%;?I-oe)NJU6g#WvAB_pIes4Te6Nypt#ROrOZjcTW0{o`C7yd=% zTfU?9Afa0ZdXqa9y|evKyJ>%}pvy-w^mq#l$lT+^d7&p2HICSM&NR0HDXjQiWu*xtM zMHGXKpV%mF4KBw+qP620SNl)$3wvAd`^)@4_6~&~IzM_nBO%c_V$OKTSc&ss$^M9! z*ccpFp@NH^Ug@7GwO?`*kH~3mYQ0{5a+y3aqjj_Ol7cs>Ut}Fogi*jDmuNFe+1Fpg z4&5Z}78<7yx2x4CU#_Qnd2j+V9N>#8BVAM7=pS`N#g;B)R13puq0m9EkuT5H{<)CL%*4oClJvcs`7OulO++#?S(%F6vSL90 zJAq+KXR64ffV4@*p3HTMqL&I4)LJcHvLj3+iLR!NwHmib7uwit+S1tED2Lm*PJ^|q zO~0v6u`0iA%0iqJVDtZD#|pu(ONl;pF|^VCXx?>Qr`KrC#t2az!o@Tvu_>(N=|LTK z{$s}SYquSJ>Zo-jqUEB^g5S6B%}>l|kX8*KSuWa!_HDWI6bS>)Xi~}Cwvv+D(TBlV zBJZBo^zV0Qj++Z!N&783UAF~l)b2zX@1w3CN3c99LCUiG5pa)|c#9>{3k!RJot3{z zavNSxy&P~Q;6SjtCyG0jT!KDQQVCxw0{mv z{$kc=^{Va<84RX{vHo!2vLbFIN5t}Q=yce9nl|x6*YYvL@}-gcdKWY9!>#cSx5=$s z243}Q7ikjzy|Rlxb+%EYr?uwLJmN6rI)yEEJ0R=Zk~SIf2&q0a6^iZ?|-5~-|p%__>AZt<*K%TTm$?tvPjM0&uS*&BSX>gkPN8>vRH-cYQpz)5fWs1;i)!+w`exzX~aK726 zZltLHTDp0o!V&6?HkzmQaN^v36>??U9pKrJqF$2oK zT8}+CxWlM7YF|#y9{ox!F34s5JNqjs{~VgCw?qo_G+Hl}2}--rq7C%{T5mQ+w)lsA)F3o7)9$f1wx|(go+dX;2jqB_QlCdGJX#vU zKJL6}TIoF1Wna&VKkyhDJ;F-=j+xIY0CSAKG_uzDaH_9=y?nPm4#J0gbzis=LI=9e z3ewB0)-2zXKf@I z&pV3Y0M^fTd0)fi%u5OMDF|3nGB&$f0U40Qt+Z6oF^8Y_W2Qw?*RqcmUi+RlN=;2; zJyU~T;D^aGF00k{ie#Mrps@~*4$cjHW?u)mr-|Pb*S{HWZfuo`kP4dE8yS0^zNAia z$a|6L1G{|p2^Lgn4gxukXsEASGB{@@7eO*|SEoOmcIn*OEjw+Vj9Tl4RurhaNa??_f5gv^UrH_u}UZDHtH#lg@qJ5}iw?W9+D(rRDu; z3#qW<%C5aoYXIsz4iZl{rjv5DfVMH+9euS*EzUh#6`$d*wiL&_b;Iua`x(qsyF6}<7**+wilSy{5qVo&n<)0 z_~Z)sI7aN`DL8yb+1v24j6J%o-UiMUqxZT8UOT>3VX#I+bgVU>(M;#HLbdI_b2JFq z7`3Yt=;@tXSR-saTO<$S(t#M0zKV1S%?bnbw>pP0)I?C+NZ;YBtNziqzf45iRMstX zYpt~4eJC`Lq!r}@m1U02@`%w;e|R+otc;l#bk5H=vPv@frFT8Bdpg4ra;;;jjgMZ- zOj}y_wz7M>3+ESdA0UcqRKMSsqQ}pa1f7zDL+a}4JlMv!SF*xbH|05QVh;UGUjmg2 zC9@`_+En$c08v7j%WsoN%n!_rS*jmgSTQ^#;8y1cZ~b(KF84Ju863Auh2SJQ_sLAU`_q#iU9Z*^lZCL493pm0ttjGM#^LI})=muf1@SfR z?x~&XqHQl#C7cNS8juvtfsa;38LKe;&QnjcOZ}eo^4srlZe6i$OljbaT*^iJgDXB8 zF*Mt&7Gm_Q1Wi~GU~tdR-i_FZ>f}1ELdrmx--rmeTLl4l0CIu$*8}ZWIXM~Dg)0aR z*HP$J`vn|)xL}{cV0t8vk{uED0eL>m<@^n{n+DVmq!S^%2j)aUqB|Kd`ZK2O%)907 zZGp!YtnRV-`yrwn$|%5L{xN6bva)g4{~HzM?}d}&tmX4kg+XX$f>a)(k8gwTm2Ou{ z_jIM(gh`{*IqI-(iIiIe&^8sd(BbHzIgo#cNJ*+*!l-ra7y)0!k;?YG#Leg^md&7b zV1cURw2$OzL+9_4e0_(4ij0Lpq%dLhv5^-aooWpg zIANlI^E4xb4(i1TC=dvEJZ;8U&KLyLm_*NQd;lE{DGuc8e^NbnZ^7b7KmBgvn*lvJ zXRF_DtoZWiPWjv(Hn6*&7)@0ePh6peH-D-B{pkz~_VcTitw|^L#ulZU4#zyByvfqk ziM}SY!DlHV-U7I|dK59Y^aK&w`?YQDrg1#At*0~$ z9bmKJ4$#?a@8zNAzSVF4K;Jp6r)z02Md%aNkXj=)N42Y1$6 z@la4Xeh8m=(i@*w_~M6fm$0|(5AJF+>G)UjOFRnIgRY*3zis&{|7hkDlJh&hNMKYS z%4r6GTT{SkWfBq;j?1v7qCbTSU=6`o!dEWj^Lm-a|B^}PZRbfLJ9*MTEyyuHn9DHJ%%19vm9o&XS6J1W#T z;xfd$EC5E)$vsH>wO$2J59jLu(Q;q*TU|l|w)J94_vB!f3-D0zN+`5%{Ul;Y#5%!( zAGay0F!aMZx2Mjo!)+#W3|?sSB%oDFN5-TO37jT@1QbeKHleslXE9HPo$A1q(d)1z zN+Gc5H2wLbCCZ&5V{d3(j0LjuY|!?lr~9?xxHJ$+lDavy8gis6SDk#K=st58+9QT* z)J95`=oU3sBmUu}arEmj4`R0))<5QVa+e$mAc%G6M7X=Z&(Y)0xYcR8NyChOx{J{0`gW@!wP?tL$9`^zCJyO+MN^|EU1h@!*rz#%3}_(g+d&M;D_4Z?Qq zLA~J!9lbb*lC|_lswb5gwH9@>*KMI59Ibx2YZS4GO-6kJEGU;4i;KEw!bUpe=OG(I zb_ApOE;CM9ED-TV-~H!8jnwo%0N8u3&T3w5f%zC91u1E0g2-@DQ5+WJL$u*`Q9zD8 zmE>+_I6ys}rvefvfrn&CKHOB%n9Xj3t(tXrKq@m#2v+ZZDF;>)7w*O?ZGF5?R>b*S z4+!5Ds=y2-Fm%V0eE@B-yxwXB0Ne&w7KrDAu!UyOJwxpyk~Le*k zbYQ6^Lfq%isMw$Ia^5BTm>4%x!V3}k=VyrGi{SiVwq@~=PN#Z&xMc&NMz}BEymQ_u zWOIGa7C?bX!xrORXcy5=(Y1YBIK;AJas!N<(z!xnvd!|$J!<%caIv@6bW;!I`YlF> zb9GNIdQ(}C(FLXN#;`D&Y2ls0*m4tBpjhbhTB3r}p)kL6;5RljFlc6J0~B!NNoX~b7A@?;VP$hfWc~sk z{JqcgRhK*K?bW_X+7R~!$iLq)ot%0uRi@YYMmSX_+ixXY5O4_Apug%ZPLE4ZXMUJU zD4fEt4%UiMWP#kiC7@cdhOZ9ZLeiV32mBtvo>xca&9a*GAPe67!-v9bBq&Z8jy5%z z9*TDf{K$$7(}Eov|0v7N$_qsZ265(L!wP`K_(Sdlx+Xn92BOxcbSL&ha!xh=V#}9j zsCfoyP~xBT1@D4C$S~^ESlaFRC$sK=YSg`xd3=);U1##k?Yi72AC;0;bKY#ndRZ52 z*eR^e374+f)>-k7pyIa9ugsaTLI4)9el0NSLms!Vxymw+vu%Bq5h}6 zoM@2?fbsMGzpzXk*gmLE^aUK%#sHxnj&Veddms-0ux847ObIiSo2@>(I^lc6aT& z3A16h~)_bF|t`^RT^&<`;a-4(oU#TBI}pJ(6Up6y62; zaexsqas9*m)#?=0*y^q;PF~Ut26SsM!kk zjfy>c@3$~i*=F$Q)BJ|Fa(iq5p7RM%R2DwE;N( zJR$M*x-%|N-RS49Hz$L^KkB{{UTDf9n*~2=znxku5=hu-)KDpFA4Pp78<5wI1uG}< zM3nkk8@MDQyN_aj4B2-FV_3TbzKyTi`R(@bUf$L0`#P=L_$zm*ZO2*<(1k~+JvkRa zzNyIs>s3Yv=&u12ALNRCqxK8C4Tw|%z@;*>mNbC+m|O3JT|&{)ioTilLE-+B%X z4w@{CKI1r~qJP1xcA-KgA!dV2*k|!yO6Zng%IUuJcezj(d5)(!0tfHrLICGAy$VBo;9>?Z&m@9)LIULF48(WblvFwDDe8L zjfYTeymeBbb|#1JQ$+^+Lr{&)kpmj*0yYHg{!m9tG%)hH&&tG6R@jvxkEsk+Kkj7# z>=_Uf5)r%o8K@I3m_mk0r1r~71#VAvE6F3dflMlgJWUdDRbBRm24N`%HmVgz_`SF2 ze!xb4qVHz6(DZott$TLHakqLDj z_m?Eh+SP#eHCv+fziFm zDlBcYUxl@o??gQOAbDC{WxFT9EstDC0&{n;0@(pPLSN(^tf(cD2Zc0j(i+8(2Wfp? zmf_`X2m0w^Lmg-Dlg>i`8GV@5j82Gs8Q@$(71cH?Ll>rluML0lh<-5NfT>ZYV?M!7 zHk-m_^W=sC(-)wtaLJnK;H?3510a4=Rl0zx!M)RKYI`>5ru0U5wDdp1eF+sVluP5% zRGQ`&C~+ftF-mr!M}$1{?eSF<7UO$B9F9rVbOxtA$!H2&M6f6IL{#!x`&&vnV0Ww% zz;Z_NygYcE6~t+b2)}uxI83{&eHZ^$hk-^1D-6FVR4xc#H#|XKHgw?4YgJX%%ja=4 zLrT?ZW(+TMQKjw{9mfg+Vg61=Y;O!OQ*#HrJR;+xt%VzE7RB`z!`Rj`Mlp%XeG6aGKL! zA|;Gl7KW?{NNiiK_-OkbIQ_NhIvYB9Pj8nW+tWwJ*`CEs%sm}ZzpuU1q*+_0Wfum6 z$?qStQ^toG_Ki<8j6S@^wq$yo%oF2W^?f+cYVNOdO?rG^HS-fbnAYgV&eGY(0TxB; zA+1xvDWN~Slk&#y6DVxmvhZ;2FDP6-W0Bk+qYg_-gp=|X)=SbQ!ZJ332|L3=) z%7R#iXR2$)=#eRc)t>bd-@=sFLYQ*vWz$+IKNxR!55-Q>*6>wOBHAc8nrq3}mn zEB9*EqNicXxXZUoSAU+5cP=<5)FnOsd44I0L-^Tmvm@unO)ts|FfdrI@2ccXur?bV z&7lqY+EzCe(O(V~jQ+qgKPqPBc&Cs;rA}8tI&u19T=_va@KCw^;i94-#foOdowmD* z(|?dU+b%qDKUv>(HUa+b$gN{vM_sVjp9OphT9U-Hpwpg26+~`*^&Cys)IlPNENLOo;m%u?t?C$$6>-K zy8rHw@8`~GXY(@fpFx@1_pn%%ciWDYD#%(7y7c^q&ZD^gK4;6%t$&2iJ{D>cFqPF` zD)e?eeD3`CqLa6`A!t?Cl-A0YujI1A_ui`N!aJGXbAacJ;xOaS|ivXjr8r3@%>6mBiot8I)$G#cbYO&uC(mLKs?gj zKiU-#p?dI1IePQ@nI$98gY54^I>Wv$@HPGPRlOe*Fp)s&MaR z`l&HUm*wSHwV9;*^vrhX-1~C2V5OCl1c%*!mI{o{l}|9Ci3U8_-U$94q+@^mP$Iv& zEppYg7RfbA@4DzRy|$AZQ||O;PWypL^|}w62L0UCBzSpiKuI7iJ3^+MNfy;2L4=iXN@O{%ThMt;|f(MciJ$q8xR58(;3RZm^ zjVYan5^769oD_cW87!j9yvfX-2D%bL2J(jK=g5DB*bi zf?FvVBktmK!KA$a}dp4J&S^QnF)ktjeP zOyjVl`v$ zNhf^1d?)9sGs+)t9Cn!h^CP5y)tHJ_HsGWEwv%2=QPB12a2$8NXci|MSmI_^oKa z#5$+`C91zN5wn6NPqB&`Vf#Naf{Lk?dL^;$oK-%9@}DvLBPf8GA*A0ZHUB%54eZKl z*R!*c|9P*!Do&*W-}^6@WB$vo z|MNNEvBx{pHB}W<^7)JZDh#^`tbw+mN%rOF|HwKP`hAg?#s9a%zdeDsi|N9C?%YnS zvl*Oi`)4d*dK6%Q{cOsl)PD!PBP#~yWS~8d^NHeb51p-aP}LBL$PoMIpZ*B1psJBD zt5p0?hn*Wx8JwMuGyJRa?Ebr8!aIp|6EwS5|0BaAAZo!Yj}^kd?*O(DamxA7T;IUK z`2S3e3^*T@no;Qza}n96`wRm0C@2L6uh1I{=js$IZB+NZ3)lj!uK zqf8xVvfB1k67>G}qoMhO;h|9_XGs4mJr7ABW{s)QG-elxzaz-z2D7#Kzpu)fXF#ad zHGSAz|E_cYBKXqMjG-Q$f8Rh`Igb3A_V>_SP|~?l#Xmym^mx@12Q5%rxhRUt9tR7qJa~3q4@Zo2$HUPcWCF!lzB4( z!DlLOG%;*|nsN?DP22cP6|IgAzf@%|T=qk9sZLH~d|R)(Fnv@Gs`|-$lXPwQiIqcs z=h-|Mf_kpKc$#{u*42Zwz3UW@*X=n5A9nM4V|;$+R=tK)gf4k(XY}eWoh6-}g0{NM zZNGnR9?v$fdg|LYZCWrjR-yI--DVU7wk07NEGR_V5iy}8SQa_letV>A-Fn)$dA#?X z&G%+_?f%%;x7q-b;SwHzDbl!SR_cQphX-vyFyLtOXDEcsRjX?2vlWjVWd8D|o8rOV zp&Nr2H^y^eJsg^qP<>%U5>w-+eD6pNig{nAA;b z>=Q+Z-tQ{+&O)+;_jjQNqpsIke3rs#^YZp5 zQD%4e3Q}=CJLcmmVRB&k8LWu7&M>;nnJ^`To2wAfyPSe6FnvQgx7PDsHlM13`6jOu z-^VN83^!l|W~hi@M)rXk+Y5>L=~5N9{+Sj}H~QIpP9sL@A9w`T7+5*9RF6CBsp-US zWyssMg^!7vmTiQs(;gEX)*gE3L>!D%JP+$M9X-1c2+*AcHJ=uG{%*6sKhJs;Dp76P zFK-b5_UOLgRqM7egnPjO^?)NzncYS@7T1}8-lHs|$DaaxSvQDb}6;O0<> z@PV=S;^?O22B(=HZ|f?Cckb!F;bSw?y@%lB{==MA#o<2Wq0;8|u24ppv3EOw;?C$O z>KGE*+0b!+*wZ4LWfJ^leA8zBj&3PoA|+*JQ~9DFy!HFW(b84B+r^K_2McDoWmWq3 zke8)AnWC#G%Jl0O`MhmK-%FwveYORccMobOlrqF0L{I!Nnb%=UCX_|n90>WqvdgwG z{kz2CC7@>uO#!JgP6`=p=}WIYwJaTy6bq4QK%OzIYujYLf@&<~HlfJNVq&B!I+2JM z%7L78R5GTeZ3Vhv??Ar0Z0KMy=`6jz8S@qCcs9k!4R+*bDH&;p99Qn;%LM=)aVy`rKqR`=`G7{;*%>;)1U+_5T_r#s~+_9Mp1Nedhi< zmqOp`&qS7zFS>;K821`8_2@CUH5{p(+(BZaGsYia1B#U^ZGYXujx77FMjo3bdOJJzb5_px zZk=Mn=%{|)Htpw4jPD(F9t>hSO)J<*aKSsZj}$$W1;QvG%Qm$?q>IpP8drli&&7E+ zP~eD4Z){Ii_2`$ng!QHXTmM}4=L9E%uXo0;_v7SQQ;#l=(G0I8h9a6F zj=4-X%BggUJz+wGQ+Ios9gHSu=g;e1nXi4WM>TZb<>?&%KKw)x!Pf5nd8V@asUWUA zOWoVDTZ5zogm#aa2e6RkAo`ep3^oWL9-9!CUVOj2H`ueATv<1f>G{X$$NIswKATL2 z4D|hvciW;9SKnq$)6bc@c_Y&nV(m(aJvXQ+v~_Us-(~B)(V&<2>i3dt${BIYq_hC1 z7#sc7ftFY*-O(hT)=i6(cSWKjIC@=__`Glkm0@E%Mohz%z4z1*wk_nOhfRz1Wn*s( z+4j+GI@LAx6E{%JMD}Fo=8G2wZ)=xp7bnjR-^Ke}wJhiH$d+i$5Agpw{WX~aPN@E( zvT`&!e!?>f2Rfx%q#o!2#vB{+XE4`hZoAoX(d+!Xxh@(PEwzUEN%K{uny0b|dR(28 z=;wSj8)}Cy!mpl6l`k3vV8oFsXTbyrnsLn8m`$`@EwyN9Xm}M+XDxtxnQSnn!UpBH zT;@IpWRGVpM`672&te(uNpycDdB-joPdG*k4N_E#t4m8MN!m(`8qsBo2WJGu_RA8S ziTHLCnkjAc*m>{k_5wB4vu;>)EmHV(!@c+*JE{DKexRA1R{7e(C_Rm@I_G*7(>JBm zK0f)BPlCEPEo7kB<&4wkT;xW{qkv7q@Xfl3ht}lnjEL~Xy_xcT-IR{iE+h_1_%Yo)Hcz(! zhky!uvN3Pd&5n%d=et6|EJnl_IVBI`Cc4qqG?J!OQrg9R;sAQGyZGH+ngm(0g*{6=;v3Q=GjcI ziSMOhdpuLw4h=eE)4mvbB&;ltBGEhT_?}w#LFRN#PRPVa;RjcCg^mcBDiC{Sx0THO-NqE%1^dO=XJ-hBTi~8A-+`2)dKj!H6OLhZu z%bd+qTiNX_vazUi$-J=ED}Z{l@ePeF{TaToqXpAh zN@U~7K_?t$`q|U%Fgb%Cf;K}pRtD#vH|WmZO%@*a-V|}V+)&_@zQ2`W)a_~8O|A2a z*u!MRYb^9AjSOD_YhM3ToVgDu*G){J!7Op$L} zO$v*tbvX+x`+Q4zq#+;mDxCRb1d?wyM7^JB8Pg9}R~VE)rrRIykJ%R2j$4*4%FlR@ zgWt5Hw=72`*4^)1r~c0w3%w}zGfR?^;iQV&{lXL)<-0ip(Q)TzaGgY*Wmmy&ulj@;aiQ*kdDjy-QS$_7i$C^jp%Aa2uovl z!RX9BU}I!hpWVpMs@BMGLwuqna@)K;vtARgb^kCAQ{GF^l7sJXhCg7js$-*(iFXX4kSDcw@ncI~1 zF`Q?0)nIz>NM@uc8qS67Qqte7;F=#VIE^WW`<%%+MBJlpU~dQJYOF8#gO;AG^-lFqwx54kiQoD{Ub6(6Qu_4(5?S8_)xT548rtRu{V}Z z&|@R{a^N+m&HT0i>vaLUsqWS&o#@Tr3#QjorSbcC?d45xo^|{goNJxn$?%we{V=W> z>Le(CqkSrqDLpkzr?+WyK)->L;D{RIDB&S?%1lIH8~43LFILUrwzfdH?00}?YW=1c z?vRcg9f)~HkyNB%;&fYZ=+{FKHeGFBCp&%(=Txq1fnFud<6lJ^%Vb4Vs7c43 zRaPR7Ay7rgAPg+k6Q$#v$Po-Cg9~&ToO~sQA~!?^D{cK$5)JhgQHMD3-LLHC(_3@$ z+Gz!w#U*y%@X0nkFlm{;Zif$P{K@6M85T zOgOKLx`Z|{hZq>LN?J|l8#o`&@n$>(9H?{cJu)knM(~{05Y%qe$l#&ZBvp|TH7`b? za$McqwdoBH1(xwR+C+VNjD+?u<#Pv%>OjQUP;K$zum%$AekI2Vrwk;L9QKK)72z2*RVRH262M7C5OvDU z(6;27nT1m;NcbgqpHQc`L3-!0#g&!17T{P?wp&n|*ioCSPTk9Vn*2$B;<9Nd$U$l; z$i_v21gHI%5|zvaSJRtn#@orkIh%>phA0m`6ZtvL=&vjkGWn6%kLlAu;}u`T-%$ZL z&?oGa^zgwZQlJ5jpUdNZIQh;cIrTNXUaw@bn$3~%&^t_feYEL8V$Kq;;uJr;LZ8ba zCunXba39~Qhowx*NBMY~JnSem!7A5HZ#5>cUmAzwLq)N*j|u+-r(U4QZF~3CqrMTXVJFeO-*O@5Ot*&oQ)D`JA@Yx7wo~>)Td^5%AH}L+A*;4V3Czzy!?s2N~>b$$E0rB)3 z>@Iqi=O@%zs;nJE?w=k#kPOGpnJ$EEO9Dz7f4qWuRW<%*m@KUtu_eM4M1uxZe{ z?tScOcqCNzW|Js@P(7FE=FP}^m7;Re9~H5;;!JsUiIflQ*s}BWIR~$dlSv$m$E)6N zUFB-_2NaWl^ zRK9k4$Lm4|FL1~m2dLJGrTT|~#hHZiHnEPl|BK?(i|2EeW|9x~h6SbSBnH#v8;8;L zvlWaq=Vn%=xwa%5`HsD7%y6_@Yy>8GH2Osu1j9v{^qi=hB3_!FIlj@14*sO9BCr*I zQM^9GpX`P7Y%PFO$zi%-ygvPkWZSA5qZ+qJ0lkEHC}3?`T3fdv6fPCBBQ}pccQ30@ zZ-w_lro9j~^_90$ru-@OA@w z%g>#~R<^J01Y^bX6z8n{-*<$~6fX?V!pejo&o(G)wD@YFzL)}d2}?PocXr^SSjP|< zTsP&jz|00*RnIMc;Hqk4M(SASYP^!1++?j^p_f?I?LohCM)r7MCmy&fI0l~9Ef{}r z`*xB-R@-ff8T}>nTGj2@kwc8DrvhXt4Tt-S>4M*VPhRcj zvu3o$rvYC8%l(3VH-^)`bH`dZ6K&Fa-ySN&zVNd8x%c6ojix5g784*%2xo$4A`dq63X6p zi>J`FYrDXOK(Fl8x#+5hjYB;v>Zd=G+uw>#aMM=iF`4Hq^AhOFB^_fwf*Why-S;c3 z0F%vQW4pJm#mDi}RZg$pk-=pm_@=1%5y+6ayv5eN$A{j;ck`z)mYWnx>sc;mAr$w_ z5?y*r{o$Ku2D}{Xn!gfRgee;KpGPpqefbr!-`G2S3e^N}*m>(3MQqfD7gvGR#qI-= zexsRcWuW!^>WX_A-8#;DpZ%Hn9s|cF`+7Tl-k*P8P5=HayzDp)$P? z+#^uPd>s6;Wyp3zqtTpHUUEOF+3{y`vxb&;n7KwFxkhhkQ=IuJ(+lNZ_<)~>AfTzT zbk&l>+^aF4hqKqM@-u(Aa!gSnl7al;BxGAd?Wn9Y<6{p#)UVg0aQXK1t(sInxwu(- zD)72l$icKmux<~%MgWi8IW*~9{D%`E~L#F*4<4#Oyc=WX!wH=k__z{ZrF=|@}B zD>wdf#qSHFh6sMu#k;C5M3P_aU!zTzx<<)<&i3`7?=Cc78u&~Km#Rq@KP_<*#c}1Z z>Ij`A)HmoaJ$|mSfcNIToABLnOroZNM|hok@B2xHvC4r%qMq;?{enhnYlTw$^s~}Y ziKBCf3%aUY&djfs;)UEhv(AcXKUsb9)1znd%v5YcOmNOkVd})2V>5@jt^Lp zXKHb&76z~KzVimo-2F_|*U${LpFJE2mbHfth?ajM)Dxvh!!$Rl3G|9ji}I=2lDYIp z@#<;ASy~RNtM{6}wJ3?~IwstYyQOI7=`z@xHhd=9&BA)Ee>2LjvB|#_mN>2_HzChD zV&1Sj^tI}PsG@~>wntM>wCq@$LMbWh*~`f$Q%BpS$MCXLw1+)(?=h{^dAD!2ZgwT3 z1km`r!V4Hg3(Jv09xcpTAZ~^G;AyfNpaP<`*nFyA?tceIt#A`0CZ9Miq zp5KhrX1M#&R5tp&;FB>ACRaJnqVvRW18cT7UMyTm?jW31R1g6MIj5 z)Nb@WHWLv_P>O3>HhfZ;!uNs4pS3rvk;>k>{bs9|n&M{Qi)L=;Na_q@9;FZZ7RN+^ zt(17hwaO|NeQznFMJSi9H;BCK5E)EToMCpEPV2iV9uQ?qjt4wO3m$3dp7JUdtVw5n zk|`YxwRw26aFWK3OtO0V*{WQ&M9uzoreCCL>;9iIQ^98mnsZD?7Mj1{QL=34>c*LnoDe$ zjsz9IB91o60iO?`MMb$g=NN(q18dUBIYmPCw4D3+^&(*R|m8 zBYGD##0J+g^0t>!YipG&BFYZ6yFqhPLg&WfH4@iyAzJZfw4M0FI_9dXE4|$>HqHcw z#!pD}!p7qFv^g3_uL}^_7#;&3gHjk?d*jc`J;hzvh^c@^{Mgmt3ma0jqvIk*^KpZ# zHuc6R^I~_n?Z5a3zSmVuGvIFeR{qDj+;_rOUUk@SBOZ9de&LV%#Nsnp?bCLQA6X?9 z{iHHb*VY;vL_?eqD8%lGwxQAaE>fse|kY4V$6>WDXIo*vRom_u^ z5!x(HmpNEk)kkdpP@|Jud=g`Wx>9mnfrC>EYpcsBDdqqO{vX* zE^b7LT@uQWE0Wf-KQWQTFv63{>b!T+>rCN%$EDd>k`LR}qGP*mmBqnl3uEJu$!tNm zMtX)lg+({LhHs@uiw;#2ysB3=G4iBjm>|!miLP?l=a%L_F^jjAj4btk6b|w3X9ePq zE&WX-QF|6_(q&S8H0YkFn0n~vg`XR0CZbtKsP#%k4o2_AS ziB%TpjwTtOA~ml{N6VAn7&?e+WEDNgE}FqzP=#5sYBC-T+%y^AHI1^16sQkOw$m=Z zSfz}4em)sB-*I5h#-zl*nXtj#jQvpSsjMC>Ri)G5gw`TD$uL>6d6(`qS|l%_EJ#c7 zMeNADwDeG_?~;~%6@Er8d?<7XBS_9gkgk5HoAkp@j;x28A}ikVCmHfoi(1XB-qlU1 z^f48U(2=1C;q?eUqc%R^P-;$}7?h^Z&MlqwM1N7eIEzA;t^)POnj@4FXzz9!J<0gM zsAWUd??-}~!#C0rTEn{GrK6b;%RT@(9FRn$UR(J7W{}yFI9~W_p(Zl2c6YycEVW6 zPi-Jj;f$e!&AK{XI8@n-7SKyS?iiq3Prcx_?CrEa=ULAw$)LX(WLp0@J6!`qqOM+P zqNe$}j$xagZ&jkH&{j5pvWM&24llBl-k!#F_Q5N^_f}dq&$h*!+xj}$mvvYPx#lzN zxJHF{9fvUmZ0$>3tMmEL(cnwsl|}enbc)AP-NhRdB7A~b!f&?NtfjfJMe3pMWw|2|3%eVheg$OfB!}ikW>Ky zkx-*MoP`Z0ST88fT%>8?B<+-l;V=g#z_TFpf zS!;bihcCNr2a=ons$2X%c#cwzE26e5HG|9j1Ft0Zoy&4qXmfiSj_0ZhF807?6sLRB z7RF;Ttlm21u1tQ5F!?7>W%zh*Ncmrxf3N}T^k-o(qpNOm_*LvSY>l7(m=6n8dcysB zaKK_Jj<+`4dBd7&`et6`CIA_r@cH;Y3B#{XXqLsx5euVhH+$c2=Ca#X_06QM zQe)k2D*=uAgn@LsXQ7^}#j`55L&66GoqH^H?(!2Ssf~ne+eU*{-i%T@k&>ct>csUk z7SFOVS$wEx7ubGiPF-_Z7@?6!ZCC9eWBD*7-((#~Plz{Yza|C;VmMHvu^4 zZhh~T!X@}jk@EV)u>5{>eX4zfR!Vb~OPWKMJ%|53O&hxXGTr^93k%V&Gb$dG^d&FC3Sz{ywP@2s4IhVvVwc>vk<81>2~jP>_!-liLo#P1?Fvke z;2d{3?nLN!9rv?Ql*3?v=1%2uIU$u>$lWFaq3sGMs<7fRqHd-(Gb&)*VCigRxH_>5 zX`K<`sfOI`lBh_ar9&RA%7`6?-PB_}-Fhl!D?3bRklhPb(VBK73`|0B!K)M$&>45!0T{UN z*Y3?4+pdF5}B@}%JgQ$Kk(A`7Ii4p? z#j~t|>cheR&+-Cq9HU>pHdTkKKLTWI7D;e)cWf(d!?!nt1NPph`hH5jX;8 zvY#5~3&`&x$uCzL@>Jr!&a^$@U6T#6C9Oy^&f&;sH}RUQ-)0{?9_o!F{Q;+Pp6ifA z#9d1c-b*F`xkU+wZ}iZb7t)^qNjMHsH*z#rkCI78l^pVhpENq;N~7XAjQ#vHRPfF$ z+o~Rd)k!godda1YMnpR4FGlw+^q}EXl?eKiP7ubq+y!YH^PJX2P+rc>e47D8!+50U zqgeSc!8VxJ1Sv%(y?r1Gyb|D9Tn{yvevhlq*IS+=`}1Kq_YeAim4XO858^bV#OLVdj`DIn9mBJn)&XQ4j_46>Dbq z>vB$ozBkx)9yT=Z2;!7#Bi(%^>{ZBp0jwRIN#AlyR%@Ps?1Pth{WrsF2Qo}DHHXyI zA3k~}0Y)1HJM{TB0`yeGB;6yB7u87IdWn42n=XQS@QTZ7jG5l$M@eSn=i`)UO)A5wxXAq zJ(I_R@htW0MVwfUQ$2UWq2}jQo2I{C0*)BZYfD^8Ph{Y}Q+EkZew4@=O^!o^$VzGD zN7~5sp7t0{y$nQE#XKSB;a!N>}=bAx`FhAhNzba#RR`?_+XSq+P zp{P^!{5PfV<|~{Sj1ITQ)#kCz>1uMjW7MmWV1xRe>>0peTx4X*D`#0Q$1-TfyfZy} zs5TBaWU_m*GyCOR{%}GcxdhQfy)nD`fC+}HhenU2I7^^Yy?--Q0rw*=Cw4d*G0$#O z$6`!Im;ZKODr^J#J`hy-!G(-?nW5D4i)f))rcqD4c?9SA$WW&cta3Tc z@J_Y>o%#}vIdV400W6?PzRsUm*Nf544%NPc1mA|yXkm0dP$Kh!R)jXArvNc7V&BUN zY6^sEpej4fT}?jNrA&%Z z#1k+ivJJH39Wm`MH&*}O(xOzNIxhY!v-dT5BKcTO{V*ov8c}eO1m#3i+m# zNRDm10!?c@TqdCxaYv+BeSD1*!&n2N)jYGsbXN1kpoE`gMS)y_{MA({KeI@^xQ2_K zeUG1b#&HuKu!NAlh!KvW!rkQf5g(!xU%oY*cT69hI$M|?SM^vx-llT-sZa|v&|gkn zbdj#ni<;zShT)z44KlH{$xNZ%k7K9!a$@m?ttS}RDWm3<9__yp3Jt>EO zw7Q+xufC2lzMDyKe4NLihsrMk3AbIOqZ-}L5Y4$WB7TYMpMsI&_&+$U)mXs+f zobnZ2hr;g}uRT3Yzd@{oA08kZUbq%i1?iWS&<>9B)z55tw`emZ*5;LT5fIgQM4wi< z-Ibs`3e*dz@SCBvIU>Hcxp#6Noet{v#0|sci?#-t zjH+0bM(8sI*}m5Z@q1y$Dq#x)DrH=c^pZ9rfLO&wvk!c_!JohMp8?`(HQqHBWNRN7 zkKVpdaeHSW)Mg2u%#T^P%5sEXXYGW6LUh^@BGYq!z+P^tlj_}cn^g4jc{-72GN%9GL7`WS1K__>#>J zFGbUW9lP}YxDoqV^vyCn9&jamI>fIEUu~Eb@!3!3ZjV=!n*RJwMfUn3;l3>q~hyd%ar1r{aUqq^8$XWesu9lh+yUA;0OLeMn16 z^;+bhGWtFju_cmrMt8j6dt)E@c?AM&)BWggsWkXBXWVM($GR|!lvQ6Mf<3f2LoRcb z%&n!ui5yN8f|lHOfhbXBt#{SCq7raYeVRmS&c4!RkD_{9ofep`H`Qu^*siRXs6IAP zAIWYrW5KwUq@6^*$~)a9tgxH>sV7PxP(kYqG#z&5#plR-sN!3-{LON z&j}Uh_Qp?@nhx$;+w=?Iki8)62d!eRD*CKfy)M7Bcv;SLQgt^J?xqf5ft&2S$o(>C|aK4{%WJd+NfRkj6>jM^mf`sw5|`A zXma#@`JrzIK$J7(V863zJ_~<~c5h9H{}KIp`PeCqC*!W3<2~mL{5dUGrjp|c#31}l z%YQ)}*mMwW(`8t0M}aXJ$xBSrCBmCePBFbs9D9Afu^ z@!f$xM{~~99`_qguDp`?o011oZf27Amo&-m)W=o91gUe@-y|0h`+!-`Hu0Y*KRLN% zqx$jlRmx?V!O~DBdiSM(&jGI3Gd{%;#w84p2Hon=NXBu^Dqr;zM~RMN{_GNja;vOX zhSJ=YyK@`cNup%}iF8M_#pGU>$2 zK`Z5jPYWW7VOu{ROPyH}R}Ph%XcB+9@h!PNp@Njm;;U^V$Jya;G^d#((g<~R`dd6C z`AhM=PSnQN(z!3Q9XtO?ymn|k!)Jd&w5H=6EAGIKc?0C<-~EL@jJ_s-hQan$Xp4>z z95?8P;cQlpZ$9Roy5RIBnL(59VL>DaT!-7s-Ye0RitD%G-3M<PR z92T-~szQ3^xXK0#kuaC!O||KGg<;I~TP^-sYWF=5sZmPkX&vKwXQqGEW@lIaMjORh zBvp;MOm>yG;edDmjYt1>p-O|Hi6%`GvKf=#n=?A`iJvUpf zMAtXk)%p<}6#>F~GVuiCp((Y!(rWLO`E!1U=Qdqg+-v)`Bckhj82FSFB&qP+{~H2Pt31fGprI9|uOG^biM0uLjCk zm#yK?VOWd?ho4D^IYwym5<2SlfOzAfqB}#ERc86u<*+_c!JFD4$Nn!h2buaO%q11# z6Lvm_!bYB9{TsRu>N*HOO^X%C*`P;k6s(k73l3XIpS*ZYro97l`%pQdI@UeYto)|v z;QXc`yH4RJ6(&G1vX5$Po~>@yq4u~QQL%wb`_4VdW0&gWDLf<9N+%309ce$X$Sm&0 z=({Cl85u8FRn*+Q8T_0mSCU~dO#7im=RZntE+h-l`Ud;SCfdF;qB=`CHCLc3)A*%0 z!=?vI&`H_C`9tiBfYW9mHRco89Z%Bcn<0Z!c#^yB^u*ljp0a+ypyslLv*@(c?Fl_c z#ks)^@PaN@h~pCiOj}?*^u_wI)@3y!b^ncdG9(OBco8jf7Uul=q6+XU!Mts1(%NyR(|0Bed#Tn{vV;ZRbH7ED_^uDhfM9BI%aR7w zaO;%Qh+^@0ZEImP#+mYh4Gp#D%TOT zZ#nj9FO#U*noh`-I>aUli*q)aMKZ-*vf}}7UaVAm}+%CYP&@d z5L>JEmh>_-cPGufD2Z(1*+dP+No&Fm?H%^3UY>F7oJBi2Tu$GSYlD|ejHjuxrjQ+l z;aWf=je+h0xBzn_!pPot9ZeV-qT<<%nw5I37TXU>On7ztV@n4+6=KcwDI!9rSk%*c zwyzmoR!knw@V&^R+86stIHR5`MRkuYU zWNrBMnQ;%diWDLcJp~O6V8%>8ywWiIb<_-=L$or67nj1`*n(vWi^MGB8?^;=JIBB2 z+s<500>N$h^)c>FtKjD_Z3WBMy)mB~y|Q~02lBT(9?h&fUyTz46-=7czp|8{SH~!8 zLc+@gnKY|ND$~_}+TH2cEG}nd8;N&?0YP+Dp{q&T#vLbJ10a@Hi1cB#dr-zPwr9(_ zvRZLlkY%3eXVy+fC~d)Kj{p6ZuqBJdUvj#|=mePOagu(?@|7ZXVNPihs-4LOmhmR7 z$ucRb0W(F$A9A;OR&HThSf7k>WiVhB z7w>9!k%rV2xH{x%?%rp1ft0nJTg@eCEms3?^dYgAlH`Eb;HM69nn=vQl>!x076%h& zH15E3?ExPgQ6*UF$o@2NBu#Fb z$pM)zCZAN@7%IvecNXtZhSQNY+a10tDvcxV-ln6%36)tVS*j8jGL8DH)~g?gUD zl-Nv)>uS{hS;!hL`mK?`{r-cuXlM{wcu~2Vly~r>)Yk8O3&K0%ZFRExQgb>yId4?; zj_P*o@`m8ku+{DPzxD>=(0w0rwn2kz;!3wvkqQ|8l5{jFQF*|A;DnVm+upgfuo0$~ zm-*9CGojnPGEW^g;=Lhn_@>N!#&Jkwgv&0Ygb&vZRxB%r9q?1t z^=g?`iEQ~}QYOJ1As2!+mf&?|-NudFY8&l(Pf}wg!HwKyX2UbNGb{wzjIu9~cP0yw zWd_M(PT>KmZ{ReDo*pnmr{X@J0^+CyA3Ffup?|FCTf?&cCSF#QYhR3R5M7>G@ThSj zek}X^akRi%lK2eK$~B&yo5Y*%^ILNU$AG#+@8NnaGg_8&*|z2Db+811)e$o|EF!6P^I;!!_s!O>qk`R@c%ySa(Cu2 z+@ah%iGaMvA3#M^&c-G%9M9ZIUMKMQ5g3L_N#BT2(X3AQ+^yg6!X~1%|0tvshEjih z7_>73r@)%#Ou|{hq(SFD@6F^^-Ek(8f-jQWetoush;BcyvGbg%X`Tm84I|^Fi3T2) zl_>Vj;3;0m2zI3KSYP97-nn1JiG zxFWw~8iTnIx`g;x7pE)Hdmi`l84NFy0s0#oB4_gvta^>0WM&XGUarmz;LW5t24fWe%DmdPpzmrOEJidJiEZeMB-U@5v-+M~ zn6Ggf;LrS>lnO>J_hnek0$A5~P|k%v9|by;Qg*=0u|zS8-oK$#NR`rx_!*oi9TH0K z9->4cvaO@zGAOYAf|iJxTD2%!PwZj%XBR*?)UCsb(iW0r+s1CxHCJzTUGR}YtM@fuZr9{X((ed^nn23Mv*VOkt8Ly z7HReuNG-L`cEeVggQmfOu-Hx>1n6y~_%uE10 zM9vzzEq&+$@QOV)a6333+?43JSs^1c4IxKHbCZnjhkOh=6^V#7nE5AXyVHAIx03iREFcl0eL6R$E*5Bbz> zyv5cd;m>Kb8b$L^ZEtFH51^_D zm4=xiGBt1Ll%HI$lqkVmx-$XrL*1(wzBuzqr>N&j+&oa6K~G4kMUw2&Xtys>B%lD& zRU6eLwz0V^bezdgTL470_trppG?#mT5?8tp)?}<} z!LcF1UT+}rV5Brc#7DKYQvDy_z|(lP!KLqjq7Vwh1pUeigqPN?vkT%+-yZQ%SWkiM zzDuz->iwEoXlMt5kQ0dNT!_G!YuzXo)xPoM`buGJkg*|&!2*GE49yvv7`6p1Jx zXY;?shnvO-V#B560(ejUY%%})#Z=-|+$kVBx321Z2so7g>xyhFz+ObWt+(XqjuNi| z9e`QGcS-SwD*%X4aey{cB&NL={@W`?9w-q!C8+gJuN&Q|Mww8jYp3njW!AuI2`F?c zzdx5A_}c|f0Ez|dA)OHC-`ea`XNd^_#YA7ee|hWoWms;I9laRWuYViI{sv{+gaYqd zl6*2+z=#pcwd=Po<3O1kveRjnOk?%eHnl*#OS??u|1-m%8vOT#)DT4=XmMXj`a9rW zkpo4-vLSW1Zr_7m6VN_GJ!CJ%@Amg*Nv3yVaex4L6j%GVzdVRGZq(RR^z-=x5CJ4D zaef#Gz=n#O_&|E$f9mn>XaIQmy@j9W!xqih}U`RXrI z;y*k6H;i;%WXXB~#fJZ&fc~w_z&}8pE&eHL=KgcNfAaHxyEvV({r^*E{%tf}AkbnZ zDN%uhTg83^kPU%YXZ3$?81WFL(qiCxN_@-M5_i9{ri_vwu4-!~ofRNfj#`M~I~k%RBvvNSH1n~E(i=Qq{ACh+H=27nx< zpYBHm{4FBF52!PZ-&?l7zx}UM|Nmc%c<;MCTCg5=(Z1DEN7OtLfF0jjprn-A0v%|i ztqA@uF-Fht1M}duc`D7V`KFE<1NTBvIjjGb;Ds7z|2uZutRGeA@u${MB>b)Gz*Cet z0CdqUtHluh&YJCwnU=U^Um4U?h?!cGxiyrDfDxf2W`gtgGGN94gT!v0-~3j)Qqfb z%QCUFS<{s3H){Tj{g-Ayy;xUgU8nyPNEhXYiJ)Wce`@@<`biH_t)$W3DZFJML6m_g z>hInfzDp?E3JbMb{#{HCLj4f!XV6ro@c(|=CXQkSi4S@g{F&-TCik5L(?cU0X#j`L zpOu;28&#@z@bS0*IuC%v`L~&@JhrV<F{`PVk`Wmx8a30E;1fK73 z`a=$QLeg3PYmfg2alpzSVn>t;{?w_7I_n!hux*h{ZM^m{kbuKSODyIXTa(9j(xl7C_oc1z2_l?gnu?J_RRZl$yd+PqzZ1kEB?2r z$JjsxN#vA-(*HE}Aixh=3v>co)kx^y$^B=&`uB_WA~5>&7w@(8@%{IYq?f?38r1V? zUZrLGKOd*w9&n&xL`(PBEZjH)o-ITXI^;M+I{@6mZn}x{Q5}$OE3I}p1Z3r9TsI%r zmb0K_pczB-SKk2P{duQRlGA`>1|H!MY)=m$uY~q#0Saqac~ttv!4L?PhXC@r$s50Z z*9-zO+CeWc018B(^*sS6{-QV(H*T0pZu&Wd_k%Wp+Jkygh>CF7cv&1Bcdf{PY<0Ek)! zoOhw&yV-+J5njOKjf-RwuR!Jtg}LUvmJ649vI45kOy0)KGvHhw17ELeIVxRMsOlzZ z`TfDCoCin*UHFas(mKR@^!-am28(;?6Qvae(&42L(Zg&FUX<(~ZzIq!unnax<-Z1q zZcoS_m|=8V82~YmiSt7Ch2Oz^qoN=HszFaZl#T+Pq|-zTS8XuU*pmddrvot^1IYR3 z6^*UEW*{$nHUD@?xI%*xCM$=V;Tpxi{Be+ma{nW5xnjhO%Hv$&Xjv>m!gYg(-T($7 za~RogcwLnk=Wgc<;A>uBa3l#HTY8UIXT&d`AC>!xbR7X;xZQsNCtA&$V90Dwp4TJ3 z>5%5*>0(q*lI0H$)OWUNFF%&s9XTEN#;988TnH(WCzC5X$MUML^pE2h{`cAvwJT9^4k*$Ec&iqPL9>JiS+0uvv9Jp9PiJ`pXs{>%*uMbcXM5y$XB|(gdS*& zCpXp#U^r{b0UG?!OM}a0k_picPEGD3FWxXrj<`C_js8^}QhZKmx$)#8XhwyHQESyS zj@={}8G#G{&$+CErB;ueh&B{E3WI01_;mMp&TM<*1P7rv$qKGB|*J$M}<&)KoLxLj% zeblA#!}>wj`_Tcd22#SL?8R_y8)V_&P5uJaUaT+l$j4me&3W@oaSI*D5CHMyYoS1_ zB;+ME5gLF-_Aaym*QWZ_U{H`jzAR}a)B87&%1nhxW`nEk zLN1TKB*l*BH(-KeoMKCF#j{MM=6N>!F^gO(!~*Y(wYoQS`A5xF8qz_VyVP|tdPX$` z1-L3~5(OAtWk(546M3bH1H2JlBQ>^f)fYzT^1B`AlGFyblE8rlrN-e`bVnvfa}5iM zt>l|^bURNMG~@P)zq`sjEk;VWHqa_t#5r)z!9H8g$Ty|b#JFD;cn+UO*I(@v*LTr3v6!VFplTRJ0aOegN^Z zgBz>gNY~aak$cgA4kpgx0_fFELTM8ijM^MY@i&_s6CT@|ZjCOZXk8wO{3|3D0*GMY z=SuZw09FVrd{n5cxB;QXI&+$~ETGN1zy8%0;G*>jG-0jhP+h&<4>!DgK8Ko>L(Ot0 ztKJzCvyLzl=7+#;Qf~ryI$!n^+gtDuFSUPi%TX!AA0A#ZV^2+Pt9WJ?1^Bk#bSY6j zLoWXAAYgl}V-{mF2G(Nf2hY{}t@epwQb3V0>@qz7mSldm(a9HJzZDQKqtHEJd8JdGe@|ze8R=M_<$>Dg+la;=} zW|^oic&Qm`|$@sT3gb+HCfkPgqTg zw^<8D?a~6fT&3JUXDsq9c-S)Fq_Gv9$1pP=7awWF*I?J$ zFw&8vHAUiXr9}1>Aj@_W_$cNg4Rp$0*5q0T~H zN?hTD8?1fFiIQ{I){>^l`AxBIq1K-XXlB0*c(yps%I1^3(Cv-(u_>PYB}Yjpb>t}} z)s}HU%TZL2nh=-2^8gWk{u&V_JP(}3a9Zw@wGb~mz!?j21;eXky$=CApWEWePkGI6 zd-;eEuL9duU3$It0;P6*)_O~%_xpL8jlP*+m@zdKOZ{r>JxGj43|GcJAz4GBDkX2s z{nP{;IOY=PzCD23Rouba!G@4u&>25m0H$(Cp}~tA+Jm3LIorL8Bj*jWjTsj+YX(<% z>3l7K^f8xHK_~yp3m~)Uc<_B0WTw!xogNkHly)H7;Xh(HiaS?zQ*{5Cn@BIbbYH*9 zjrJEKXrG=&i!6~8YA33qg&m(RV!D1| zc5*x@%xvqF^ihY!q`#nNEXhZh307&u!L$}6M|jotwRGk6b$9>`t0ogBs*%OfR=#_{ zsOmFltm*xUqJKDf0_#IwdgrIogMH3OPnoIkm3+>c_N3Z(uV??q<0y{Rxo803eS%i+ zhwiVw=0bm;FzIw<&nh?IqZs-_fZ;5;mgD_``}9c0_>t1CG=sFz*HAIPH++>8_DJ3J zmDRmXQ9(A^*Mk(XB0hW^g3>QBu;UJXeWC+>9^pqgr|Za*axga$&TzPxz}y{%bow|s zJipdNqn#)@aSELle3i7|6^Pu~i>25YQgj-KZ63tH?{Yh1wb1PTM`;~6!WvcuDuO!P zrLYsXfP+z!5Zn#>D&S}K=3q~lwZA4A zl33DjA(1N_1|i>IbbpP+cc}{dRywR-(!J67%BoLh0UJN(ZpQ4xg(R``K_X4B`tDbs z1HLr#-lsSJnFtZah=>JoR2Xze9~5vvpMChwch_Q&FCiUnSev#BP#yKt!1H}M+EX#} zJ*-ea#;5TTm#E|GWGGrEC#cVGIlPcLRdKk#Q4x-_>mYHJ?@+?o@0i{rJ8LGcnmH_a z$#Ir!n+cFK1=DO~&fP-ncE)w>c4`Nx+LTp&}9udtewLrFzt}x=kMMs)jv()35c`@6m@BUbh zz+KA|>;+|mK5QnRkxbYNdQ?)=gSU`3yU*&3xqy9!?YaHFXD|Ou10!J$_u@(wW2uNg zcAZmk_<7yf7W)y;0xQB05_c@0Z+X7j;$fp^aiC<-`iU=%NBY`f$greJ)|*yvL!_OZPtLjlWa zeASR}T1!00YpecHHedEGYP;U3;ub8Gl}@~JOI4llk4 zRl_VvbEOyP78o6(-5#|1tcPhHQ~(E|8oe+0;Ohr`tF^qwg|=-3gJhB|n)O*PLjstd zX=(>RGdj ztI;PAt!LXQilJ3ie!=%^VA^UP7C+^1mOpVaRbxwit+as~@(_n!q~!A5`CMmEZq+QcvT7N4qO)^z^_>Ch8UE4>GK#uNn%86=BYP?f zk0GS>oXTvt719`4Y4$^z=bVcDeNJYD`8IY(jlU+O){cs&1NDv{zmDXhKH%f=kgk$d z;8h$L)wH}^5?VM@LO$YUr}a82PJ#bWT;Y~;eVc6a_mtj-P8alQz+4$V?Zs{<{#o?+ zr^Y%KEyG=t^T>9roG4Av(~s@1cYpn8Mo~pOt$vh=(PG@)$bGBw;KZp{6rf?|MrqI6 z=N>%d8WjS_i6L?zED~B4DGH8ulYfdsf3``M49gFiUQ}@ob~5vw%3uC!f-f4RKgSE| zhDWdB-=naP4XrMXZ6@M&`n69WDPub-Yw-Z>Ui9Qz*oN?|wtsT#ymO}OQ!@U9?6~5l zY{D>Aeuo@?>|vTfT|DgWqEK#`NDzaaS<&6`s`W5ke)vbmKmrDDlY5d4avZ>96^s&2 zY$gg~;)61iT^hCd9m>cSSey~_T_WVTH}RKGVRg+c0`cuwu4;=&KQY!>+55DTSlOXWk+LO4{KhYc)@~{o`4_Rfy9fongdJ7J)3t^>8sNB>{> z3z)Xuvk!l-v9lUCqn zFz=C~zR6a*ZAZ=E&_?gO2*@Yt(JS|QHBSWrXd#aD5b|Bz9>$3k--~jS5b@~KAN0>e znG&Ji)2d`g`gx-^2Nat#@QVqB1RA|-ld*yT#tZ)<^%USVW+O?a;xbaR^L?wy$L0mF zu4>g%EQgR+S~y6O=j`bb`t1`&ZGt|M*-+EDV95*>+Hu&J{W@%ac~f2GlHf1If9pXC zAC2*y>CBG}w-wiSt`**p|5yN6Upj&Sph7o`kjf;@YH?0H^Sfk<;*(!y@Gpb|Ttqsg z7;dY_y)@C#ks2oO@tGeC3K5Qrt??>c!V5fTVnY-f(0FYZ%lSH+tZY*QIKzWMA-4MH zos?m_JR#WnmP5~C8C2o)+bbx>1nFYW+s7?X_f5TaEjf92CdgN)P}NZRCV!aYwF1Si zozFpP-y3%ZJ~d9O_iv8BO8<%ivw|D$9!#3vES@jQYb*lqU+2j-iM*(|g@Eoo<9h>Pb@9L;X@6}Tp{g^8DP2J#&G@1iQmyEb z3MFYTc~W|6^g%=l64n6aehX9-@xBc5T4Z|lFfIu9~4KlA~9QMMyJks8RP0z$xDbQ$IkE7rLc=%yr8)^ zloijJ%cnKY8_$AOpfyv(zpAwlp3AW)q9S(nUJrzawzOI`2epp5$jAkGxnj-6oD?y? zP0cGQg*{YkjTY3&Q#|`wm%)7Vv7lvQQmKBd4v^W*u;FTd(Ob1)u@?aFbR*St08EY( zV4G(9-&pCFA~_j=u*&f3`PdfVweX7@$(L`{42x&+V-u0@Q)Y{CYgET!r3Xkiz=iS4 zvuzJh$oCm4pC~|A$oAW0Lb;eAHbRVN+~d6Z2DGS8&B{$o)yi02)N842^R2fKV3mzWw!TacrK)aIN@V&Jh%~0DC&k; zF(`)XS<;p|rW80i-hZCaoIv5X`$X2Og70Kqu9T=KFnn$pTIG1|B|+)OID;0Qs07!IC(MB(Jc3&rPwvL|i9qyx z(}V(5zivoJ`)1>&A%h(K+0(AHc`Dm!5sFT+BRa3o4gh}(%ng83CJz$9cM2R^f&0dn zONTTFH^w`*K@G~%z!9NQW(~msc^|(G-x2+}uW0dPDY!b>sZ8BL`+$Hi=~r%kbpn4N zzhIxF0$tK_LrP*=Y50AJMuWrnlVGm)A{6;oS(B;%Vz1TroZ>FkM#QV-K{5r7S8DIC zb#R}?um1=}a@FTG8HqFg=(7X)`~s=zTzle`3CrT% zrH=d4`N|uM)A@trp6)t5N)Qf=3WOOPZ1Fu71J}YveMr4--MRoEkN?zw^%|BYe7~Hk zI0=Nyrn-FOvf$wOSy*h7v#Zi9Rv#xu*Z1AphbpMJ0c=n0g$priSO~e?CJCM{0r1u} z#ia=S!C@P#X?T$0C9qz2Sg2^HiA9M?6=ujP&M12-8$N?#NXDJNiNNTS4_i<$m>G9> zwkOlr3o>DA*0k}hgep8Cl-+E~fa!yeRfJ|W%(qDV^PMG9&+`r4&Z?8m-azvA^snv@ z`sVF*==2;ao|OrJG#6rK5&INQ3#*OUHtn53DU9=7tdAtOKMfywYtEn{>X-QjT$2)= z<7u^5G00f>P>1m8Fqn;9ydpROzEbXaE;H(kir(OyFLKnEFTT{>7B6J7h%8$r4!*ue zN5@C!`>WNomw9?v+dZ{Crg75!xriN&L;;EU*@8*+(~Q8ssQ`ek6$6ckB&q0*(G!F7 zF-mlm;UC#X|IlgOit;BVg$t`gWZcMWcCk~+Vxqjbz z+0U?7OP0`y?}8i7Ow~sf4s{5&-9MIa6LowS*Bez!@hk`qbxi z3Be36AO7sn(Z-n{mAAc$2}fu{lA#inw6-46_(&jH^rlm;i@WccrYW7L^AE;yQjF`| z-^!1tT7jf`KzCBzmk+0u_h#!8i}qFQ__+l=(7sCZafp6&N3sRi`EDE+yB7)Y>HyPk zuY(YYhAnR2a15v|5MX%p@Nz5Pxoy$$m=lbp+-7(tziD+Jl@L5qCqLf}{}9kb+e)V3 ze;q3Jg&@AmmDFA50&TkqVKN0i0noipAp^{T+W#{I26>)?=A=r57?qf(2jFd+h z?n25Jn&FcA^S=kPDm3WMy_KQf8GME1#I0h;*kP`?iH%YBu!$@W>C08oRwlM$XPsSh z!IS2eDFb&M7Wu1o+c$h)W+%?(q7_>!E}HrD^8=C3T@08^R2rA%p#&2lqEC zko#7Zu>wbNq#++Ew8MCJ_^%%s(hYYX|FlFtuM)Y6Xkv|`2pJ~DU*V5%&dRlLV9Aff zO*cqF_yXZ`&i9zN-U~DGEDIlse!6jraJ?WjdjI4@g1H@>hf9pZ>E$nmtbcNg`BZGR)LW z>Pj%shfhxmGL+^I2o=7WDw-O0aszOtHSjz4-q@PVj325qQ-y-U$T1v3F|T_btABt} zI#qDw^1!|_$2U)A|1w>xYVRsb{P}X~vfg}}*$`=g+xBBh;-PfL8ig=4)m{qxZL?Rf zD;p}fx!q@oQItyheXyIwcoxWiWHIa*n(Qvr`;N&bm{KA~%Vz3XlIte0=yx}E`TmDE zqJ=zq`;Im?cd>N;;ZLoTmgu_=GP_b~rd$1Hcy^i^vhE?Y z#r~?}CJd>sSS*jPE|&#y?ofi(M=*?vbM{R_v{`#7(86!)C^aUh?B zBdb5Ddz0!m=7SR$c1=^ax%WTtSE@O~6s(&wG)<~wpTW4~eHkvuNX3oae@c?jLRgr8 zpJ}?m;7rBPpQd?k*3y~KgOb-aCevOsikDjm;Q3beN^CCZa~g>5refN!kE2q=Ud^CR zA1%2joHwEYx)&e@@xJ@jxT=?WggbNdlI-XN*o3$X%Pk2l*BM>M0QjOmhJQhv=oq2m z=C&ZrX8Uc=>005;**qfwkbLV&&K5hvTO~<8L@OcttjIMLs$BoQ{7UlxJo8L5QJvKV zfP;t^e?pKr5S3^edLKQ9B|@DioJGB<^zU!&OK|Q3sk(fUym4MrD`Tu%bev0H;9Xf2 zHH2u)FN&RCgfW{rn+dPh(j7UExHRMR;$FQ~TOaHDV)L1XEcyB$xt7_t(j?w-XVvzt zc5nC?Y{nWs9F9%#$e!qN=9SCOT?Ek!XB%NM9PzCrCDN&uJCCe3s-}Jswb^}W!{W>y z7oM6jb&e0X`ZsEb6=w{(h4M}RO^Ytci>^UIEz2v&yV)Pcagd&Okt$xt8y<3)Zf(9; zSL<1G@FFLBbNzkIIc5rX!1!;zB9QHXdMDG!%Ea+bi16w&*~-Z+5p%TuiZJwhRLt77 z_n3y36{=TX2Z~=Bz4!NLfZj!kYCHx(nqC}95*8noFyM^P_9xbo`yB~6mt&>V%pzgr zn)gg61?vXCICgFpZTittSdu|QrYYn9A75`B7UjCO4=)u31O%i720;m>QA$!$rMp|2 zA*6>ANf}b<6cCW^E@|oRlA57w==^T>yVu%ly}$i_$1#6A3^30;cV72-o!5CbJq<2! z`1GMgn3>Wf(&>~*Lp4(WrF>imrZJaK6!u9gnOlbQ_oz_*JBd@OR zxCt)87a#fT!yo*3mChhCfq1Khval68_HkQm>tgDw|T_$_Qv4H0O-2R6y5%7En}s^ zV#=h7fo+ul>_Cyz*d0p$k%iL-5A3+C$DOWLu%Z$GOzg^hN%7|ddgg;&G^}fZW&P0S zQ}hm6`kBY8HA^MaEa9xZFd}Bs^*8I6PeOUpr0$umJ8Dg)WjDqXMpojNG3RknwU?O( zD3g@L>oG24e2o$u>Ho!h+0c|ZFPW6bk+iSWVO#?)Q&jjW{xHUZt`n4}SH0FPkX_Dc zNVvA9AqdUU#=Z|9bzmTS`!)2of;O#?Io8C#rce?agBFEAM7PTQ+D?^~cLZ;8c3x(q za(2ot^UuQHs-0SkQQ$uzd!zLKS^*G{K3J)#>c>DP9l*T6i|<`+D}F=}G2nP}vsWk&kr3j%~Q!WimH2z07n_O_52? zR=$7R$sa)mbdH0PNL_-Q=`oeXG^5NQ?0DSmrVYzRE3_0x!t?FQb(gM(V>fU_oSqp+1bK{)@#C| z;yZN`nd3H%b{pJTE4;VGI;uqc5o8;aNNt&Cn-nCZ54Fa5 z__7_Sx;AC8-#LkVPTJHb_p$6W*S!+iOBrfc;TP?sW6jderne+=5A!5l$p_S^;lCSK8xy%B?Pqus)Zm8iC`v zHNKvo*mcO`+Tq#JQ5B%8cw9ehlcH47f7%{iSb^i*?n1aEHF~1_D8+oT0YddgGj7k% zThU!#=@qEUDDe!zonY1?953|@p+4DcAZfeJ7s^ho_wdJ*7h!l~J6v~qw6FL4)qPjb zM-|^86IDd@HWEAMo7!n=#X784^=%#%(Wfr`$j^p}QJppqnFQT?s~~o*_)a#zYrpJu z(Kq0ICBm=IXJ!sE*3gXb?BbwjB!FEi5PBqj$VX~_$(*nlJm);-s*<-MS#m- zCF?a%Hs~BA>p*+jVpQ^y6vB&X7-#kBSQ<@u3Q3)&zgn_%b_>kqC(jd~ZuEgt@8Af5 zP5vLOeNgrF`9?3N*)PO;@g*%evssgIRP^GF0Vi9?as0-^#XIXI=kf;yCyR1u)E*Hll_-7u@FJ5MK83p<3?GM)&u; z9yl$9a8Ff)G4D(xu1vbG8?qfdi0ior8q$u*_YwJuv&+xAUg|-n3HIGWuE{*4KX~k4 zht21{LEcHQIGhNbNl6>;{r1`?&wU%`V&~_M?lsGT7jNEF)v_}-RI1P?Z9G*^%e>SG zjhT^EsQtooX}m@fKJ=>EOfe*E(#T#8Yp4&SA*)VO^W zCVHmdX}Rl__^ZX|)5r<^eS_iaNz{iME4|GWnf`v!Kj!-%HetaHo=LI$>QA;XKpZ-% zO}o&F4yddl-;_&Fj;cpn?i^|oBkjuEFNTBII(c_-<(}6K_3^~W)7^7*y=XyPub%Vn z$t@Dd@h@m}MPK!^UQ?)XKq7a;l4IJwG<-|eL|pUGL|6aORGf%@9hoKZ=v+}0FI8?B zh9Bhe#|i@1>zAmWQJ{P_&s|Xn&*Q|V!4StI6%L45EzjoR@D6*PlEx8_8uZEQl$!Ja zCyj;+iA3bDGnSQiS}wO9H#AvIUfxpC&2ubR^D#n0i&gZmj45&5U0qD9ytOxgs`}O< z=r!@$Yb9jkqgw(MQYseNjZCrHk=q|UzS94e&93wv;wAifH}a9k($=Ck9j`tVlG1|(M%*t!{~_LPsTXIjMnA%DYBp zQoYdzE!1I)x8=`nqt7SH$MdAOnjd64CaFETnd77zEIqE|W_+x(ReSpn&#TZ8`KyN! z*ZGS5C)rN<=gtxE^0NG2hfM-esJD%%^=u#Xu%aq-4+qd*f&KI{aJ9jbym+`1O~^Awwf@5Q%W zqe2;B^9$U|D|(PsW>Yh7acQF#7W&aesU%HRObBiIj~H=sPwy*$W@lh zI0{!?uA!#74mgQD+Kar>Q-*58=CfGQ3F{wRYG=>qKP%>{?K5vsUu!Z!BA=kgvf5o2Ql~IJ1mRFkGn3G#I7V#HnZ$l`Ugz~pr zcnWT6-X#&3A0uTgpNg9AD1SpfbZUfhU7tk=68X_>q-d^u31chROZ}>|aX9w1U+-Y5 zk4kOFY4Dux*r9-Nn|h6Z_Hay%(aV0PDN>^>W&f~p?2wxpskrPZQ(GOdH``RtlEgwS z&=JkkI96vmJVpxrndzagdvH?32lXr+(>klX8}jkVv225>$15KbP8MhR!JX8Tp@><% z6ImnA4On3l&A4R7{Bve@&ktEv2?QTJl%Mwx$99tm0y1P8{p6QxuSF~!YC-+V-9ckM zuUWG`n`T5P95EvP;xrUa<9BJD$9XSkrV#E~sAj105=YRzV|P|#V)23c$+hT}D)=GM&nVZuda@;BR}HNS{zl zVD3J|VU5$T>uHmMMOhhM(Iv(@s=QLmxEcS#ZZtpPw?M7DX`m95FDaD1l|f=F`;;I* zpORivCg%;BnDC!3{ZE)x9Z8?krPAS2Q3JJX1soAIsH!V2jzc0 zRsQ40)~Va!k;XwZY+Gf+nhY84Rdvtqf0A`7p~pkcm)gyJN#&z=Gkf(KAJ1Er_U9!= ze@DO2HUEt@-~w>8vmQNDK8bv-e?04qdt(?;b$xZ-5>72@WdZlRJo5u2Tb|ZxbW4%g z*havH(&3cTn34SROQ8 z&(jrKU;yl&Ea2u3_G7z6;eRe-1JRz2$%;+ny!*2|gl{D1;N`U0B9q_-cW&epxz?(mx(myVC`r_>U z=&ufYe7gGr|_&$cpMBa07H}q z=oGfXs=67sZ)8qtRRQcxq9xm7xiYGzhvS2eTQHKx1O;S%%m4_?%$E3unZ7!js|IF; z0}o)WwgAF26(|O(W%3eWoO$ApP4>_qmkLNPH(pQ1-eBl0s#;JYlSI3ASWph5O+(7Y zt13T^Fmi5|8Q^~Dw9Q(7ptw4eA$23r6LRCMH6>C$qK9oiDaSdgya4#AIoH22`4?l2 zLNRq}op3JCk69VBGdeJj8+RJ71+hulzU4un|L7TEH@e;0ld#YkjwyTGoUOlxz7nA~ z(1&2Aa9xft06c4qtYp|aq2uI9XkT9wunxO)V9W`??q7#RuHHRf#$^*S>J?7p z1n9~+-AI>>{L1H$t1O|*&UWucZ54JUM%q614vGZ;NMadvghDeO<&bor_5nMTh4e7N zE{^hc9u2cpzh<|cdXfVM#+E#yzmLn#j~|;K%iT-*q4d|piT!{f9Td>0^gt)+*(au> z4xuadU@9R&{)^ofx_gv>A9sYO>nrvxA5ZwZbO-HR2xvYRIHwIL>M1I@aTU)ou2Bzt z!@rSNDqCV{{8fevQ#9Z~{)y{vZ}hy&I^SD`oa5T1>!*aX-fUYUTi_tl44%w-@i)*M zgMMNYc?3Dlr_L;Q5F!yU?GNC7!J7Yg#2V3xu20RhS1*N93qu>8zAB|T#KbKzy9eP~-;!3p^!_nAL8=>;_lpW`~= zE2q3G`_rrxPmTyfe=-w{BtGW?)yRx%F!t$8Uedp0IDC8$Pq!o~L?H4_}9^j^Hx!rdAG3Fe=zC5S;{Dvx`T~Mdn5COJi|Gk}Vcks4;p+ z2nfAuUi*MgGI;;+) zG6~H7$Vxt_f{j~-bXyqPC4rg6?H5}B67#nTmFs%(mv4%3B-&_{4Z$d=WwtEO=B(}k z*V9T^7fATP$j;I##Hf@u8um|@bf zE3lBgjtP2Sd;8!G0|4C@^2*k0uT$H=SicvN(u1L!WLiqQ21a>G{*t%{AT1l8?;S0R zR&Cv^<-iU48577Z_dZb3?RQy#^zgnk)*f~bTBe-FD#&fyWa-#XnA^wPgwvBMyk%l@mA|@ecnQs0Jj;id|J*CHS{7RNTN<4@h#F z>V%k}1`=L2ix3w}LR_B$pCMvzm@A|pp&UCt_uc%s{ut#qApXx>I?1KUq+-R zHVI^Tnx=ZHsc%EvcbgEerF0STt*8gnrYRnqu?8*vcM;UfE{HmZ^d3N)=kDP}COTOM z5(3aRX62^ZglQTu6KtkOBDlqZ!SX90B?u-JGpm8+p8T>wRlnOC9bb!#de6|2-j;*R zJTotR%|!e64GOJv>jAvd$*u^p+Uv%Rea9_*lP2-2%0zZ^saQs(f7+v^$Sq+X@SbiJPq1K?ZLnddSp2z;i6aI2iCn8 zAVJAkA^Ql`#6g9KWj7kKu<9Fvonzt?CJUO>{Zx|WZ5xskx^$2$p)5^36=60C?ymDVSqNAFmR{U)UDxN0 z*QK-O3H)A47xcc~;|lNB#_5l%H1Cm|42;+1{p7Tr_c&JJf9gdU{&@p&wo!6 zVNqvGVc^eu?{(}_HTMS_7_j}SpD3Y43JdM)i0g)~yh^oETxvr@kRxVJ8$LbZXRTSK zF?itFkPxBPT|0?H_<3Y(+F*PrnTlmBO!gSaD{9*^pv^Y4Mt6s9*rVkFnR zJ8I~L*e0tv;~7tej2FAbDsEGw=#HlsM9l)_gQ-Os@<{SUFtn>{x~uC2p&mx3nVFsP zvcL+W7F#^KNvOuO1cy#r*)lSq6_a#Qa2h!oHiefx`-&Ym?rUk3k%yp>r;2Ur_9UWB zfn5_*TL3bHba7bXrveB@+3ros@25U(BF2RO&iBjH{+Cywn)_-it6s zYVVfISRW6r$+IAK+V%HBbxS@TtZ9oSW?q9ak!vXo_f44I!VP_*sOs05`hR-g>0XE9 z9xw~^K{cb3txtGwQtcTGVllrL55)&(OC<20k~<#|Y=xojX*B?%&UCw4gAkdPxyC`d zmV)P+i*aHb+XSu4uweA}J%gD6o9{1JW`o9-PCYtL&#Gk4Ibg+_ltmxT!ANArVNrj@ zQl0!;xR%Gim>g-&ywaa*Gv7cax8IJ}r0d>ByOiQSLttiUq41`{BUq`jOy)b$0tYpA z+8>tCZ;kOKJl6#)A@;kPWADbZ^jH&L95andXwT z)n7^)_m7dlSf z3&2FD)em8vv2ReYCv(9D%6sos46B~D*g#-mk2s(6!GT+h%~AN#$2$*-&=;a2Z$sDk zt2&;4eqHh8s#9o0&QID!f|Yk^Yc24}m44zY0RaJL*6A1w67loyHcd#YJ%xlvAaNN{ zDwaLI01?_7NhW$K59MAntQ6(QV90kTiY07&13#D%&uSh6$(c>{N_5H9`^4(D@N`wx zDK6v)VWUHxgq#@T7nC!8;L!j*=~A4%Ooz-MEbrq?3aG^H?m`uBi;y_(e%4)d5<9pm zhmuI5ntx!$&?mbXs*1vNSprK@;M`VGqG-U!$56S*iY^w`Wrb@ra5r(aDIo(ntsE(3 zPj7X;0?Od@=& zYjrp)jLJmn#C}%(9s-$og!&%&okTwjxe%D<1lkPVu-W*9yXKd;a_ z@GL(w_o>Wf=s)1w2Q})yiuAtG!L zzEtQM_n+{hjN~@tUUhw0WqsM#YNF!CixpPyQ>s}EXHxM{JgW97iUM#rc7ObG_cj)j zzQ&@B71s45ff6e|!KX)}(6<=N5CPbGa|lbhqWYVD!ZyQ%9rmDNYM7^MiC=LY$n}aI z{PDnd#A7uzz)DsIcl4bEV)MyMIwzndI5BkHiatY3H<)1Q)mK`Ffuj^KOuMDs=)f zVmJJ-CqN@Vje$@d!w}I;JM(n2m?2v0M1QRM9>#ER2K75;>Qhm&!y}T*9n{Ff0vDhb zTJ#Xp=d(CFLo50r@gu&V=xB*Ron||ZtY@hbxnu@h=`Ih`j`RIgLz`>-6hoTz0!0ET zkAQ>v#25KQ-%pND_~|d-URo;%V+@`Tax=TXAom@0%#EQk5F^obG2lx??TqHeKOi9Zk*4+w7 z{cvfS1*5{7a-G4_m)#Hj3h~-{@%-A@`uS1s*G{9-v^7878n?S5cyfC=LD_LylyZ1H zScSX&PF)qN`0LTHP;1p@0zDukOAVvBF|Jt%Q`2!A6~?gK4pWcVFe#`;l+^hyhO&mJ zpFkhRUPp%`*Cr~1JE=rQ4Hej!5{$STmi4z5rxsvR%=I=L{Y>qo8XzDmwPMUK`;yNm z*=VDdkj}}+Rv6(BxX z989&fcV2$Xhhibvzg+$*S7CJ0avU7fU5|z0Fmedn)P4KvIPcxP>9nQq$>&s#cC3z1 z2utQ){J^z*AlF_(PF>30(1D2_VyCBTUAJ= zbiWPrh0tuep-?%fy|?8}Ik6=d%~RY9{6##GIH8v?&Pj@dz$6UKKr)Of(q=6SAm-s! zV{ZS)7rgr1uAyO^6s)jUC}IH$(SvBLL9K2lW}q$1rKT5;yNAIZa*B?RHe=>yQI-^N zd8m2tu}pNIMkQINe&vf0^vPm47Fys^u*Y@PKw^;O=~?`+UiKSt*PhA1j7z_7!IuDj zC^J52Ey;kxDijuQZxgm(g!PO|P1d3P=AT50V0^N*D8qWq3Z=3)%4~O!#1QMt%?4;%Cz<&HC7D zI?=l3ckE&}p261c_3zkAj<2I&RniPTt1>p9=+aEi3>p^F+TDvuZ>Q;*HXOvzTr?~h za|p)T#9IDAO@PlQfm1ZwL$xuEfhnWIUH5d!-sA}yOT)>claRi(x=q4(g5H^Se&Clm zlW1)MPXV?LTZ^^w<)ulz<~{eG>4f;kV0;1g;pLzWLA3(7dtiAi{7U;c7{49waO8z< zUGu?RG&!l59`jJw+F=h*%2OLXGoIAI*#F2IH$m#BR?#y}R~u<3<%#6=xPiNLA_|%h(%C2WjrIzfQCuP8_Jf z-2LMb3Ox&2TfUX$cqL5bT^LX8zSv3HBCAOZiG1?qw}*WvZOnTSEf~My>w3gobzt^r zo>UA=MXo9Y!jN^IFdS`h__dGpVDFcc?hiVg?yym1Ve4Oe(|TS!viEUkX7OyEYpi?3 z&(9}bHIs9fN2X-GxVQV8{H2+^aCmbm9vnpmiw=_Vb@KIWm&0Bozp10M1yxMb^nKps zyNfIw59)1wBQ?h;JfB^cZN8&$_p)E#Gt&3~VVBN#ebC`*awj>e9(Gs~^N_R!!;ejp z@HMGbXmzE7edvm2y3H;rSb5x9B*>wJixl~E(#5*H>kAz0q{^0ASxe%d+lpG^kl%Jt zw7R3DY3twwBZ4iOJZtpIY13>j!R05-0~(Q`nC(lQ<(j5xf}J<~#`ws+k+xilTc-;F zR1a<-@X?fOAK{R_eV<1C@cyrj6~pWc$3HBVW{x}9)>N4wKgzKq5-sEQHqzK2wRO@> zmy2NO#0F9ULU|Y@b{k}@x|MGAXHK4L#}@~JTdw@QytcA+VxI=L%f~bse?$2I=o7%R z^@%FF{tuu$Luq$+kk=ox@5?$R{o%~@kMCobjlBH1AxWA`X}pK;KbFz_30C|Idc?f- zxf~Na|L@BH{m(n9kOEbh{M6X=T=X;&k`!p3z4sePIXYM11%i`i5+>Az5dRa;VFfP$ zfj5gZMc!*#=R1;!&AedjB}YzRyEsw;eT3+XF2=&xH}QhfbnoeL>oLwJ;lD>(zfu>yBi(8XyVgkP2w93%L+HtYTJGlG<{F9iCZa}@p7r*kOo(|i<95}APE{6zgQqXxYYY46 z_JN~)HonFhgR}1v+wMi5loeHh506})ADQG#9^Kf{=H#$Q2Wqj0rFL^sW$FvtRAAWl zf@Vc&9}cb;93#XuFQBuR#C}9UQe)@=-)6WUuIUV3!zDm1i(@!Rhh2jB8ett$4SnyX zuXGeUTOLtUpzyBTEA@f=OQ;O`@A+p}AHD=cWa8o_R$mjWG%@$JL0QvXPE3b3Kt68U z%Pv{7umn-U>7trrYdETuZT+tt!=LNEv@TjsV}X!sQdXbDf1+kOtlRuEw81pE%Eru+ zKLs*(UH0X(B14@?P=kB=(?|w!(l=BC9LN-*lOyygf*}sEx6N;9FuvnX;P-I za3+Yz95~MhJMMJURq1ZjEUaL)H}=mdP(2qh0BlB8~GEs8(l zSMl)F?m9R%y_~^k!kiBN*(oK^*K~YdKP-w>dO%{aPRXiklAi(Z#%M8TFxQ3b?O>9O zT^c6oxyP{ns70^484O*KX#slAg6?H7;Au<&IVi;-+)`)~KRc7n zvmmaF)x)Ta2ncfeWvHE}lvG$6jBNbm=giQO&sE96#XVys$YDpodIrN#bXv!kj3nTi z7cCISILF-hP zLPH|&eIa8~+A!yn6=q^e+L0siq&zUUGV z)JoSDy1LHz6u-Od5$N1`eb5U;el(q|0J+HMbm_Fjx9TCXv>b*y{&n&tFaue5?ZDI; zK6@naGZwWGX6zn8mrtV=#`;)2jxB9&Bq%iIgeEAesh-;A)a}*$>yf)10FTQc1Q1LE zH8V^iCJn<%-8mG{-_+1tS69 zI|io5{8L2(N;IB#l}>?d>t#!0;o?QTn0!lj@nvPF?Q7JK4Ui?ORjFsZ>gN(7B;ANL5~jDZnGXxag?KUdK= zCzSW*gM4LTHCVhDcvt#-L|%B)inoG}sH^sW3wivY66nbbUCoFYU0`hS$LVgpDGE0nSl6-3VES;o8~&BqTE!$1q{315}8OMf0?-5aK2q4y1(=guv*A|gL5Z~ z3Lh(p>GRJz#!*kn>qvilG*{)kKjn(Zg0LKN{E*D^L*GI^0#HJEV0PV`-^ewozV5pY zd^}g)&WT1`1bzIh$AtS&zNwaIj`xBlQvEPE(4BtGD_02yP6N-p-BC#pRrjG^QGEP- z=};Lsx1)8|u~*=6)wAf)B%(NkIZ@~D555<~X8dwF1tztYhpK0g&iUc=tYQzLrsnB4 zkTdEnC0(>hpvvCeU|+TK*u92leUZ=O=bm|70L6zu52|qPascKi{d;VjUq1I(%fjfl zI9A@ty*RFdmj^aki?34`!pjGzCvis=|Fii3NHX~6&I!3!S@+-3NpMYbtC>JTVHEKw zSmZVFLk>RzKX2jH`R&`SdVS(*t|#QKSSr&`QAtxxravOo^-9O<2X<{Z4b+NT=R|}3h8-}sf0NKOA$*$Q_EnI z2B|7%$=Br;TkhBFdasZV+a_{OCtjPG?{i4hIBjXZVCCB&qp!K<{H#Rw_lBfbq zed&YIwSOK7Eg$j#fqN0`EOUE+Y)u%b65)=;kw0AEfIaae)h*>rVC(EQNtQ*x-b1DU zcy9frQW6Kx`$$-LT*hscwp`&KJt(HHh!?KT_v}Zm>_29>Sr(ch&m107r+jQFwR}JX z%MWBN<|~%mOQ`$WNM3=Alx%;2Q)vK51%t1Ej)Kr4eQ*Uy7b)V(affvAW1BHoYvP*G zKg#O;^LF3f-p(g~lHa)=_4fK+cek)Z0AQ?mdWLIERCJ2KQhAb zvXH>#y$EP_1%Ml=i$L8TRatC2ee!L?jpGzargs?t4f#Vn1%QXIgCk~k;WcvRj8u(v#|Jh0h*a(IMY99Y{k9@h`0sWed4@TvMyk$$r zyn;jjO5gsM3-8X@n=Eg8Qmzug-?F3s_{%?(yl(&T2Q~7o*lXo~{*75J;4W0mHYoh> zss8r?`}b=)qnp9I)Hj;_|My6Ld%`ju@DJ10^FRJY&3Syg@gdM^4JeG;8x3sQIFv_b0+o9@uK@DTS)XTkl{(aU|U6uCFW z`5!ik)U*wZkRZ{PuWzrVsGS(&XEK&EZ;`3+tY){@2#-m7UAU9 zEAyLqXhwx5czt0{@VGsMoCE?qL4Zsf4$aEyVES|iW|=KxBiXSL&*BWSnMG1iQFVWq z)#Fhp|MjT^)&90umy$uuRhX~qcrA<7Va$0;)t*okrB=U0`B`DzAx^H_3AxewGwZbF zdLZ8q-DIlz{fTpm6!_*09m3y}M129kF8_a0EFGft!J)GmLKY6j@9 z=+0a{eN~?OE!*{hRMFMxN}CgqxJA__%t$aTYbMz=oW}P|>vBA9*eM&9Z3i@zF|$1r z6iru>^BmbdY*KmWNl*o)QU&I53uqw2OD7-}H^q}YV#u5(F#c7yD7#k5@$U`u*Y!`r zH#aRbvG&UNUoFInc6E>?LWw*ZiXfpq6jTyLy*#kHosY+S03qU`vd~8AHhMp6kTMzRCjmgmioYtX5 zqhAZ&!W~isDaLKuBydjFkb2DvNPd#OYDARnY`AkXZ$LgVA~xf&ty=;o?iVJm&_&T` z6(F$4EyrsW)_@w6-uoR`_Bq*_$fR13S{JlxI7QI$3js}r)F0eyznshY?W+0KFx)&|h;SM4Y@Di@g8Du+x*5n|uo*IAH) zJel<9?a5~Mop?7~h z$E&)Fu~@bb2hO(?A~}eYz0OMN^nx!1`L9z34W?L>wCdOTY#Ps>@t-czr)rI7=i_Xi z>$i?Ch)>UpH9<#d!6NTN$g$=|d4qrVaBSm4ku*7V3c zw}Kl0$_|@sDm*UZ$#i31WHl1`sr-Wd=kR9hup3gJLCqne>6Xtsz zkSw($QQJ8O0c-0QFl>-OKd0BT6fxSkg!&<|nZGudk+>EEY`r`|LG#Jn0zYXP^Tvbt!} z>&NdE+d;7xbfNwR;7>r0BEbuq_!aTwjzLmZg7vF}^s*s#7ZXKXR}!EPSv2&H@$UND z5E9B?l0Oy!)uc^il&0Wn0oMxy(AFxAX|Xw|L;8VPS#zotrdM!v3=$C143c}nc!K_P zd&!9~_1N#E7X^V>SF1ikr%mesBxA8C`gwoa0Z>vJw|@-t&IOoxQ@r*M0&`sE*N=NQ%kyYQ5+w00w zKBA~hTT|ct& zW>jmMafj{98hYWE{L)_f z`%xJRVLRGE3*O{gwTMVvCw}8yvb_P>==m_2n9F0$f`i$C+0qL+?75yUt+EbmjrbT; zyUz4f!&+w2t7+%g*PKjSMYA!?%%!V6AyXwwbXYO%xRwl!$MGnxsd<}o+WfrLXO`z_ zY)MgDhiCf%3vTZ_=H0jNX*6`-%hO4dS#v#omDu399Tr`wdYMo-J{vlJc8sOWw6T+Q z)pd}6rE<<3WKT5x8~Qdz!8b=UbSE$k;2GaC81A`#VP0}C3|aq-DvM52H(48m^L6*1 zz^MbsSB@(v?0eWXbOC(MJ?E0X6=rDjtdCWzn4;a`;73nacKHV|inPdXOo7s^o=5~U zwAuC(ZLET_o;5&w>JI%(lduAZthaYeV;AxqXRpB198^_%H<%gAm8a}FD7Rusb0!s2 zToy5>LuuZB$u-~%N~j1!Im4SRo$B2Z+^gq3&!dv=bch)YpG}}nID$TAFzmY)R|Jcc zjaC9Vqv;`3JFjl{eg?Thc+Y+V0HoJgv>MlQayLDfjZeRzLJVCrJdf-O290luv2RBJ!k?DY1J613Nc+#0_uzBC zqD{e0QVzXpl^>!#^S@w9$FMxV(XiH<0sO8WHtXDvzDRX&L9Dg*#4%5Mz^2m6~uBZg|oQnL{cg-=6%2;<`Q# z7rGx$9hAG!*Le5DHzMFu?q!!gPRKKQ-K@X2*kZ z>6bc^s#1MqEJoWb6YuUok$sBz{V2Iit!*2IfJ0j1#0YVhm9733!44rv!_`p-p5Fr7 z>UYN$75W153hl;#Eo5C`^%7KzRZ+;%VQpqmggwP$)q?u|fYT<+B^CRQ$@GtX`YW@X zk}SIqw74DaEc^y>rVeoCieF9(x2nPBG<9?fokCxC>L!mKD58zniZKplR0zh})^Ocz zLJweQh|gNH$jvvIS?&ke z-oxFRU{11S5qDE>@|opSh6!Ibf{T z6GTI3M~Esh+zSe&NjYTSdg{B2!#$(#@llN>xz5EB5hA~?@Uq-U8MHI5G_O`ccn$E z;@9CWhzB)2kKfBO8ZoiWk+$rKlPonKek5_7Du8t$01!&`;MMBm+@J>Pp<7GX(FfjP z)u57hNdb;z^tdC(KaB)1xqC%TMVuCI1De6yFI(doTLB60asKg*=hAUE?D7BirLXwV z3^%A`is9b}EI-U;e#vVMy@vAiK;VQ<}>X||L& zY>vKgmg(HqlRK&OwfS{YbXT8Iy$X`$a{ryr_-?~Ar%QEy{bHi=ih@asF3wzu0$$&d zO748zV(R(P?6mReUYBF0vv!q1wsH&jus$l!lhlE%U+RGiZu#2`bjFi=-!cST*fNR5 z+w+pPHKg2fRsgGh=S5i8$cq>En^Ye-)-K%L#9pMRNw(@Hz2cmXfvHCupE(+BP84hC z)`ctGq}eK1cWwoZ7mQy6De5T$@F}pwC!CD;ZI(xX4FCoo@lJatlYdKjox(+T$=HSl zT=F^e-Fa}cA%la|&Um4osuQ-irMU}J*3nX!P;^?X^?6!s?AC{V%{r~6nVM+GU4)fa z2GPU-gx_Du$m(}Smmt5VQj5=I`mOIB5ixRv=2E(atd}nQZ~;A~C2yk{H?I4sx?-II zmcyv`^yAU?OogdP z%fu}0pJ}V;Xsw18OD0p9cZ)j1lO3yF)hYw$vIMg>(^+3Gm-h6Y_@F|o+Wg|oVt2gYa;k7Wk%ybWfm>?%S`Z3*9qV*6Q6AFO~bx887>&1%0W?O}xj2?3o7 z>FXQghe`fsm+f&5{`c&NBbhC5lIleaK-lz!Yq}9#0zjgd2}f?JZcw*yJSI6H@RrLD zL-xordNu9ldpjCu(XIg~|6@jb##%d_wxXtrPCNB_qpzpY#SF>`$2ri8GgCk+E<24R zLtk@U=!K)dgcQ0wENz^st2TEPAG`6^3-9CZnW+|p#HOp${dXW`%TKuDXh^ECNa zyy&;{nc1!f2~dQ&m0RlRqX5NZyE9pX5#)Ru-gle;e!&9*t=G$W?{k@7xVa!Q6c$2` zEwCb@LS=@Zmm44HO`pyl3uR~|Xon@Y8oMHrJv0g8WAqH8i+cUBB&b)^GPn@&Zbt)WPQkN3LT1gVGLG z%5T1W3=Dca0iesL7_)GmS1s5$P@0S95@r)*rdAktk|2KJ%hVwPNqFV6wa(mw zPA9tyw~E&Rb`a+G;gEljd{m8*#hl(i4?q8=T7wU=M{p|!uhm*6ilF`;GoxX0%2%q5 zW?E_&MzT-32bnlgV&iqY2@qUHtL0yrdJ?t<4j@W?2mfJ7iBCywH&J9j4Qrk%9&~$Y zRWWHoTrkmss9BZ0=X;PI`YI6xz2;K)(F--x3 zKV%1|n|U0Mj*|L%SWbu;>$n!6fv)UCF@ambqRsclOTbRnh7B|Z|` z97ghBoif4GBM*3P8`C`Vqq(;O;(c?+3fU?%?t)sF$i%tbXf(*1S*7cEhCZ|;PgVHz z-(nYGasYaP8uifl{}~-@3urT1TzZOG+1>RRlk>%*OD~7w3M;JpR`TvFmAC( z!6THqD~3bJGB+Zvr_Pj>F6nZ(rZi}8rE>Wj7L-)l?XR-72ZS6|(__2kYm;iau1)rk zpLUXfVrKb}$hgcxmTzM`JxS1+DL!SHFS0fz>sC%}yf^Wx z**+c4z}lOeUu$rf6^Pr@%zqE6x4mExcJ0#7<%hw&q+F9hIpZ&)V=(Z3o1*}ewq?$3 zXG_N7!&)Arao)1RCNl61%YO$8y^lnM4WVvp&;C6tziFWHFWW@;c%4;i3$L@wRqoDp z7`6Oh$1#XN8M_~Jaz~3{tc*N&O%FXObBdilIQd-O(7_%SCl4p}Soh9VS4V6M_pW@QIv@2`GN2`w?3T+!y#Vxuv7Qfr7(RK0!!_}^yR!A6)Wn%2l}BVo2BYh1 zVs@-Oyq!0#bj@9h9&ppAscMq}>g34EH*oVZ2r{GBcM7-z-SA%@sLla|gkUi-BTju< znRmPCEuXp8^QoiN!Ir5nLg+TZDWx$BYe}y-J>UL3EH!OS+yf06}gXJI@h3WF5`{?I)VcqOFSQd#(L1nb<|_9^J1rOnpm}JSpDW z&&S#|?5w^{mJ}U#|Cj6Q;_Nel5w>wr=Gx1nX&EtH^NgtHCb@*k)6;(6J`ZT`OgP{! z@gQy5UhVO0kL7FW^VisI{XX>oc?W+Ublb<9T;sCw9VpdD^elL8$qt<^{ciql(>bnj zSsj)-|1F3TMw_}1$Rp4U`jH3!I)Zw0N$M9vUib$jU+Pt%BCh>sE40h@$i-QUtdRB4F)a*QH4$uGg+(XA8KNG5y`V7rp*P zwXU}kE|?XBR=)f5aGDK#dzn4C0V)X-Fg46$l|>#I1*!+WyUE6=3erSig(jbXgr2%? zMVt-SA>C6U-AZ*@?`MaP92ZFURsq^uX><{WWk1@r=gy=QCXJBR0U$@|Y$CZqO577i z@TuKH8m}h~tCe($Si0qnTBao#RyEt}8YaNM>P7d~TSTFo_4>OwL)H$Db2D!3f_Obpo;=ND#_V(lI#PE9X0vh6cTlgloCP z#cg|e=Ru;?cfKSfwqV%Syiev&EFq90;gwrI^H8o?c7dH){dO|PUXU-smwh4~c^W?Y zi8QWKX~4)|UK~+v$sHN*c;C1(#=5miYm`zNN-}dZTUEXf2DEWFbtc5<@P6kxb5$co z^gLnH=m*!@*|CKX%F~bIY<15AM}5aj%!rKfP8x9AIGW6}8GFZq@@}5*6>jAk9Vn`0 z?SG)5X$T9HFCc^ZB?PACcZ2u4WD{s{4MyDj9%1ZqHUj`EYg~-lZ`~&jAFI&P(QP@P z@LZg{!EcNKgH#|f)IJbtmMWO*#Ue%S*13<~K` zdw=`Z1qkf*rebEqix-Zd#?kWaXQSQ^0u{ zl3gZtSDiU5+apDGZ^fWJ&~1H0zLkQr6Cx8N*>O`-E>K!8Uw*B! zb*2m)_y$CLR7i0I+gi_(X-AV^FMsLYaB!jZ1F2)vZOx%TTx2{(JI2~5-z)CPH8=J^ zdBryRSJB=VChil5q8_U;2gA6_69mY5qqm2_4?gK$ zez~(R`JM(YJ5iGw0@V6~*6rA$!eE%d4TOIk#7>=81Hexy!VqD5Fm$%WpZ##74$v)j zu#eS~uO>Nip2fj%fe{mRU52Q}Cp@6X%f2I)F1ttub6$biiqoHJp%(0_#;PWxWt+^C z$8EItSY9+EJ;O-g_)S!wTR7x6ze2C^Y{ zRT*?G#LdEp`qfwRf|QsBMI3!v#E<6EHlk+CG`TM4=mXgwVtl5@k={W(w!r~VDyNn; zr&Pw@G7deNR$6c^39cN2DSbkz;ggp(y~VI2RcZ8J74#$8IJC?IIm;W9@%uyS=yIKQ z`I8AH(T03s%YwU6Vifx!B}`m?i%5_wUd43H+zwT;CllHj$0$ju#-tFXPEyHZ047#+ zg}wD`^<;Q9zZS+FVjH(ls}BGt=ytJZ=N;F0Je{R+%ex%J7a>HtXaf%rjHV5L7z#>Q zp}*}%0t3^E5aJOkT53@@Qs;k7b*lK>b$Iw^5`Zw$5ULe7T7mIkw~UGjf2vMwSVyqD ze0_Bx$-lFaXE7i?mGeKFK!D@HX`-bLW_c z>%uZto-5x%2I&hP0Qc=j2bCHWCFWfdC-93?->XON6aR5tOVxMSX;M3wl8#V`g2HPf|~F6s$*YJyJ&ShKTOc8Hy($8W$1;kxC|D%{wTOKYc~VRJKf&7 zn}z;F*Tq3H9YVP6ZobrY3YCYq+GAXpi*xL zA~mhE9Sno&cEaqYWgxt57SDUtM+y>V)5R)7eNFgJgA>>H*bE1Nk6QR@_GQ~1oV>E) z^&X5S+NkwVs|MpoMnM$m^T*kK978B*VKT#=#;UYXE!&_<cj%zP(xP64Y2pSRcaWP`x=?fP78U{WeDUlU|dBk z?wt%FvD@sqDy|Ze&loE9o0D$V6DbdDJE{qv4ocLM2!zcC+%&{#D#*y;TaSNOvglH% z@INvqt2Jv-ymwb>c^x5@Mx?Ee4Rg{Uz+x*qro9CCZ6)%YEtA3lrv{}`W?2uUk?PhW zOaEU@)qf~~WHHy?hI7vR3XJ@=JrGk~6Ln|U`EnC_D^a@oHax%==YfJQ%!~}Mk9ba; z;3V!K`0iF$Q4iGCdtf+4w)a<0dLDZf_BYDRqD77NsI~C#k2*7STi%2XKZ$VAd2z>= zR9tsqKTGsG?Y~jtwY9^FLV3*0n-k{O`b4zbmIrv7z<`6T9K+8)hznWJ(;UD0~3Yrak7(w%9DAv=w;3 z;FCK2<`=jJ5#hxuVgKUuv30Xa$l0&L<JV!82=n|Co#XkV2%)AQ# literal 0 HcmV?d00001 diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml index b87ffea2b81ab..c3c0f19319f11 100644 --- a/hadoop-project/src/site/site.xml +++ b/hadoop-project/src/site/site.xml @@ -85,6 +85,7 @@ + From e7dd02768b658b2a1f216fbedc65938d9b6ca6e9 Mon Sep 17 00:00:00 2001 From: Abhishek Das Date: Fri, 5 Jun 2020 14:56:51 -0700 Subject: [PATCH 007/131] HADOOP-17029. Return correct permission and owner for listing on internal directories in ViewFs. Contributed by Abhishek Das. --- .../hadoop/fs/viewfs/ViewFileSystem.java | 27 ++-- .../org/apache/hadoop/fs/viewfs/ViewFs.java | 41 ++++-- .../fs/viewfs/TestViewfsFileStatus.java | 118 ++++++++++++++---- 3 files changed, 146 insertions(+), 40 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 4f02feeebec8b..142785a88e4cd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -1200,13 +1200,26 @@ public FileStatus[] listStatus(Path f) throws AccessControlException, INode inode = iEntry.getValue(); if (inode.isLink()) { INodeLink link = (INodeLink) inode; - - result[i++] = new FileStatus(0, false, 0, 0, - creationTime, creationTime, PERMISSION_555, - ugi.getShortUserName(), ugi.getPrimaryGroupName(), - link.getTargetLink(), - new Path(inode.fullPath).makeQualified( - myUri, null)); + try { + String linkedPath = link.getTargetFileSystem().getUri().getPath(); + FileStatus status = + ((ChRootedFileSystem)link.getTargetFileSystem()) + .getMyFs().getFileStatus(new Path(linkedPath)); + result[i++] = new FileStatus(status.getLen(), false, + status.getReplication(), status.getBlockSize(), + status.getModificationTime(), status.getAccessTime(), + status.getPermission(), status.getOwner(), status.getGroup(), + link.getTargetLink(), + new Path(inode.fullPath).makeQualified( + myUri, null)); + } catch (FileNotFoundException ex) { + result[i++] = new FileStatus(0, false, 0, 0, + creationTime, creationTime, PERMISSION_555, + ugi.getShortUserName(), ugi.getPrimaryGroupName(), + link.getTargetLink(), + new Path(inode.fullPath).makeQualified( + myUri, null)); + } } else { result[i++] = new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index 607bdb8d423a0..df10dce50b78f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -917,11 +917,25 @@ public FileStatus getFileLinkStatus(final Path f) if (inode.isLink()) { INodeLink inodelink = (INodeLink) inode; - result = new FileStatus(0, false, 0, 0, creationTime, creationTime, + try { + String linkedPath = inodelink.getTargetFileSystem() + .getUri().getPath(); + FileStatus status = ((ChRootedFs)inodelink.getTargetFileSystem()) + .getMyFs().getFileStatus(new Path(linkedPath)); + result = new FileStatus(status.getLen(), false, + status.getReplication(), status.getBlockSize(), + status.getModificationTime(), status.getAccessTime(), + status.getPermission(), status.getOwner(), status.getGroup(), + inodelink.getTargetLink(), + new Path(inode.fullPath).makeQualified( + myUri, null)); + } catch (FileNotFoundException ex) { + result = new FileStatus(0, false, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(), inodelink.getTargetLink(), new Path(inode.fullPath).makeQualified( myUri, null)); + } } else { result = new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(), @@ -976,12 +990,25 @@ public FileStatus[] listStatus(final Path f) throws AccessControlException, INodeLink link = (INodeLink) inode; - result[i++] = new FileStatus(0, false, 0, 0, - creationTime, creationTime, - PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(), - link.getTargetLink(), - new Path(inode.fullPath).makeQualified( - myUri, null)); + try { + String linkedPath = link.getTargetFileSystem().getUri().getPath(); + FileStatus status = ((ChRootedFs)link.getTargetFileSystem()) + .getMyFs().getFileStatus(new Path(linkedPath)); + result[i++] = new FileStatus(status.getLen(), false, + status.getReplication(), status.getBlockSize(), + status.getModificationTime(), status.getAccessTime(), + status.getPermission(), status.getOwner(), status.getGroup(), + link.getTargetLink(), + new Path(inode.fullPath).makeQualified( + myUri, null)); + } catch (FileNotFoundException ex) { + result[i++] = new FileStatus(0, false, 0, 0, + creationTime, creationTime, PERMISSION_555, + ugi.getShortUserName(), ugi.getPrimaryGroupName(), + link.getTargetLink(), + new Path(inode.fullPath).makeQualified( + myUri, null)); + } } else { result[i++] = new FileStatus(0, true, 0, 0, creationTime, creationTime, diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java index 0c31c8ed6a901..29fcc22db1fe6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java @@ -29,10 +29,13 @@ import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; import org.junit.AfterClass; +import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -48,6 +51,17 @@ public class TestViewfsFileStatus { private static final File TEST_DIR = GenericTestUtils.getTestDir( TestViewfsFileStatus.class.getSimpleName()); + @Before + public void setUp() { + FileUtil.fullyDelete(TEST_DIR); + assertTrue(TEST_DIR.mkdirs()); + } + + @After + public void tearDown() throws IOException { + FileUtil.fullyDelete(TEST_DIR); + } + @Test public void testFileStatusSerialziation() throws IOException, URISyntaxException { @@ -56,38 +70,90 @@ public void testFileStatusSerialziation() File infile = new File(TEST_DIR, testfilename); final byte[] content = "dingos".getBytes(); - FileOutputStream fos = null; - try { - fos = new FileOutputStream(infile); + try (FileOutputStream fos = new FileOutputStream(infile)) { fos.write(content); - } finally { - if (fos != null) { - fos.close(); - } } assertEquals((long)content.length, infile.length()); Configuration conf = new Configuration(); ConfigUtil.addLink(conf, "/foo/bar/baz", TEST_DIR.toURI()); - FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf); - assertEquals(ViewFileSystem.class, vfs.getClass()); - Path path = new Path("/foo/bar/baz", testfilename); - FileStatus stat = vfs.getFileStatus(path); - assertEquals(content.length, stat.getLen()); - ContractTestUtils.assertNotErasureCoded(vfs, path); - assertTrue(path + " should have erasure coding unset in " + - "FileStatus#toString(): " + stat, - stat.toString().contains("isErasureCoded=false")); - - // check serialization/deserialization - DataOutputBuffer dob = new DataOutputBuffer(); - stat.write(dob); - DataInputBuffer dib = new DataInputBuffer(); - dib.reset(dob.getData(), 0, dob.getLength()); - FileStatus deSer = new FileStatus(); - deSer.readFields(dib); - assertEquals(content.length, deSer.getLen()); - assertFalse(deSer.isErasureCoded()); + try (FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf)) { + assertEquals(ViewFileSystem.class, vfs.getClass()); + Path path = new Path("/foo/bar/baz", testfilename); + FileStatus stat = vfs.getFileStatus(path); + assertEquals(content.length, stat.getLen()); + ContractTestUtils.assertNotErasureCoded(vfs, path); + assertTrue(path + " should have erasure coding unset in " + + "FileStatus#toString(): " + stat, + stat.toString().contains("isErasureCoded=false")); + + // check serialization/deserialization + DataOutputBuffer dob = new DataOutputBuffer(); + stat.write(dob); + DataInputBuffer dib = new DataInputBuffer(); + dib.reset(dob.getData(), 0, dob.getLength()); + FileStatus deSer = new FileStatus(); + deSer.readFields(dib); + assertEquals(content.length, deSer.getLen()); + assertFalse(deSer.isErasureCoded()); + } + } + + /** + * Tests the ACL returned from getFileStatus for directories and files. + * @throws IOException + */ + @Test + public void testListStatusACL() throws IOException { + String testfilename = "testFileACL"; + String childDirectoryName = "testDirectoryACL"; + TEST_DIR.mkdirs(); + File infile = new File(TEST_DIR, testfilename); + final byte[] content = "dingos".getBytes(); + + try (FileOutputStream fos = new FileOutputStream(infile)) { + fos.write(content); + } + assertEquals(content.length, infile.length()); + File childDir = new File(TEST_DIR, childDirectoryName); + childDir.mkdirs(); + + Configuration conf = new Configuration(); + ConfigUtil.addLink(conf, "/file", infile.toURI()); + ConfigUtil.addLink(conf, "/dir", childDir.toURI()); + + try (FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf)) { + assertEquals(ViewFileSystem.class, vfs.getClass()); + FileStatus[] statuses = vfs.listStatus(new Path("/")); + + FileSystem localFs = FileSystem.getLocal(conf); + FileStatus fileStat = localFs.getFileStatus(new Path(infile.getPath())); + FileStatus dirStat = localFs.getFileStatus(new Path(childDir.getPath())); + + for (FileStatus status : statuses) { + if (status.getPath().getName().equals("file")) { + assertEquals(fileStat.getPermission(), status.getPermission()); + } else { + assertEquals(dirStat.getPermission(), status.getPermission()); + } + } + + localFs.setPermission(new Path(infile.getPath()), + FsPermission.valueOf("-rwxr--r--")); + localFs.setPermission(new Path(childDir.getPath()), + FsPermission.valueOf("-r--rwxr--")); + + statuses = vfs.listStatus(new Path("/")); + for (FileStatus status : statuses) { + if (status.getPath().getName().equals("file")) { + assertEquals(FsPermission.valueOf("-rwxr--r--"), + status.getPermission()); + } else { + assertEquals(FsPermission.valueOf("-r--rwxr--"), + status.getPermission()); + } + } + } } // Tests that ViewFileSystem.getFileChecksum calls res.targetFileSystem From cc671b16f7b0b7c1ed7b41b96171653dc43cf670 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sat, 6 Jun 2020 10:49:38 +0530 Subject: [PATCH 008/131] HDFS-15389. DFSAdmin should close filesystem and dfsadmin -setBalancerBandwidth should work with ViewFSOverloadScheme. Contributed by Ayush Saxena --- .../apache/hadoop/hdfs/tools/DFSAdmin.java | 13 +++--------- ...wFileSystemOverloadSchemeWithDFSAdmin.java | 20 +++++++++++++++++++ 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index e626160b98d8a..74257cf697d9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -479,9 +479,9 @@ public DFSAdmin() { public DFSAdmin(Configuration conf) { super(conf); } - + protected DistributedFileSystem getDFS() throws IOException { - return AdminHelper.getDFS(getConf()); + return AdminHelper.checkAndGetDFS(getFS(), getConf()); } /** @@ -1036,14 +1036,7 @@ public int setBalancerBandwidth(String[] argv, int idx) throws IOException { System.err.println("Bandwidth should be a non-negative integer"); return exitCode; } - - FileSystem fs = getFS(); - if (!(fs instanceof DistributedFileSystem)) { - System.err.println("FileSystem is " + fs.getUri()); - return exitCode; - } - - DistributedFileSystem dfs = (DistributedFileSystem) fs; + DistributedFileSystem dfs = getDFS(); try{ dfs.setBalancerBandwidth(bandwidth); System.out.println("Balancer bandwidth is set to " + bandwidth); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java index 1961dc2b48396..a9475ddc8d0a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java @@ -263,4 +263,24 @@ public void testAllowAndDisalllowSnapShot() throws Exception { assertOutMsg("Disallowing snapshot on / succeeded", 1); assertEquals(0, ret); } + + /** + * Tests setBalancerBandwidth with ViewFSOverloadScheme. + */ + @Test + public void testSetBalancerBandwidth() throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + addMountLinks(defaultFSURI.getAuthority(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER }, + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString() }, + conf); + final DFSAdmin dfsAdmin = new DFSAdmin(conf); + redirectStream(); + int ret = ToolRunner.run(dfsAdmin, + new String[] {"-fs", defaultFSURI.toString(), "-setBalancerBandwidth", + "1000"}); + assertOutMsg("Balancer bandwidth is set to 1000", 0); + assertEquals(0, ret); + } } \ No newline at end of file From 3ca15292c5584ec220b3eeaf76da85d228bcbd8b Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Sat, 6 Jun 2020 08:11:57 -0700 Subject: [PATCH 009/131] HDFS-15394. Add all available fs.viewfs.overload.scheme.target..impl classes in core-default.xml bydefault. Contributed by Uma Maheswara Rao G. --- .../src/main/resources/core-default.xml | 110 ++++++++++++++++++ .../conf/TestCommonConfigurationFields.java | 18 +++ ...ileSystemOverloadSchemeWithHdfsScheme.java | 9 +- 3 files changed, 136 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 0d583cca57cd0..accb1b91a937a 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -952,6 +952,116 @@ + + fs.viewfs.overload.scheme.target.hdfs.impl + org.apache.hadoop.hdfs.DistributedFileSystem + The DistributedFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are hdfs. + + + + + fs.viewfs.overload.scheme.target.s3a.impl + org.apache.hadoop.fs.s3a.S3AFileSystem + The S3AFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are s3a. + + + + fs.viewfs.overload.scheme.target.o3fs.impl + org.apache.hadoop.fs.ozone.OzoneFileSystem + The OzoneFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are o3fs. + + + + fs.viewfs.overload.scheme.target.ftp.impl + org.apache.hadoop.fs.ftp.FTPFileSystem + The FTPFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are ftp. + + + + + fs.viewfs.overload.scheme.target.webhdfs.impl + org.apache.hadoop.hdfs.web.WebHdfsFileSystem + The WebHdfsFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are webhdfs. + + + + + fs.viewfs.overload.scheme.target.swebhdfs.impl + org.apache.hadoop.hdfs.web.SWebHdfsFileSystem + The SWebHdfsFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are swebhdfs. + + + + + fs.viewfs.overload.scheme.target.file.impl + org.apache.hadoop.fs.LocalFileSystem + The LocalFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are file. + + + + + fs.viewfs.overload.scheme.target.abfs.impl + org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem + The AzureBlobFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are abfs. + + + + + fs.viewfs.overload.scheme.target.abfss.impl + org.apache.hadoop.fs.azurebfs.SecureAzureBlobFileSystem + The SecureAzureBlobFileSystem for view file system overload + scheme when child file system and ViewFSOverloadScheme's schemes are abfss. + + + + + fs.viewfs.overload.scheme.target.wasb.impl + org.apache.hadoop.fs.azure.NativeAzureFileSystem + The NativeAzureFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are wasb. + + + + + fs.viewfs.overload.scheme.target.swift.impl + org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem + The SwiftNativeFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are swift. + + + + + fs.viewfs.overload.scheme.target.oss.impl + org.apache.hadoop.fs.aliyun.oss.AliyunOSSFileSystem + The AliyunOSSFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are oss. + + + + + fs.viewfs.overload.scheme.target.http.impl + org.apache.hadoop.fs.http.HttpFileSystem + The HttpFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are http. + + + + + fs.viewfs.overload.scheme.target.https.impl + org.apache.hadoop.fs.http.HttpsFileSystem + The HttpsFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are https. + + + fs.AbstractFileSystem.ftp.impl org.apache.hadoop.fs.ftp.FtpFs diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java index 1ce23a0eb81f2..3b9947e213512 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java @@ -124,6 +124,24 @@ public void initializeMemberVariables() { xmlPrefixToSkipCompare.add("fs.adl."); xmlPropsToSkipCompare.add("fs.AbstractFileSystem.adl.impl"); + // ViewfsOverloadScheme target fs impl property keys are dynamically + // constructed and they are advanced props. + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.abfs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.abfss.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.file.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.ftp.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.hdfs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.http.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.https.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.o3fs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.oss.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.s3a.impl"); + xmlPropsToSkipCompare. + add("fs.viewfs.overload.scheme.target.swebhdfs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.webhdfs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.wasb.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.swift.impl"); + // Azure properties are in a different class // - org.apache.hadoop.fs.azure.AzureNativeFileSystemStore // - org.apache.hadoop.fs.azure.SASKeyGeneratorImpl diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java index 3860fa423e386..b3ed85b45827a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java @@ -313,13 +313,17 @@ public void testCreateOnRootShouldFailEvenFallBackMountLinkConfigured() * Note: Above links created because to make fs initialization success. * Otherwise will not proceed if no mount links. * - * Don't set fs.viewfs.overload.scheme.target.hdfs.impl property. + * Unset fs.viewfs.overload.scheme.target.hdfs.impl property. * So, OverloadScheme target fs initialization will fail. */ @Test(expected = UnsupportedFileSystemException.class, timeout = 30000) public void testInvalidOverloadSchemeTargetFS() throws Exception { final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + String mountTableIfSet = conf.get(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH); conf = new Configuration(); + if (mountTableIfSet != null) { + conf.set(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH, mountTableIfSet); + } addMountLinks(defaultFSURI.getAuthority(), new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER, Constants.CONFIG_VIEWFS_LINK_FALLBACK }, @@ -331,6 +335,9 @@ public void testInvalidOverloadSchemeTargetFS() throws Exception { defaultFSURI.toString()); conf.set(String.format(FS_IMPL_PATTERN_KEY, HDFS_SCHEME), ViewFileSystemOverloadScheme.class.getName()); + conf.unset(String.format( + FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + HDFS_SCHEME)); try (FileSystem fs = FileSystem.get(conf)) { fs.createNewFile(new Path("/onRootWhenFallBack")); From a8610c15c498531bf3c011f1b0ace8eddddf61f2 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Mon, 8 Jun 2020 01:59:10 +0530 Subject: [PATCH 010/131] HDFS-15396. Fix TestViewFileSystemOverloadSchemeHdfsFileSystemContract#testListStatusRootDir. Contributed by Ayush Saxena. --- .../main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 142785a88e4cd..2711bff0ff2f1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -1202,6 +1202,9 @@ public FileStatus[] listStatus(Path f) throws AccessControlException, INodeLink link = (INodeLink) inode; try { String linkedPath = link.getTargetFileSystem().getUri().getPath(); + if("".equals(linkedPath)) { + linkedPath = "/"; + } FileStatus status = ((ChRootedFileSystem)link.getTargetFileSystem()) .getMyFs().getFileStatus(new Path(linkedPath)); From 9f242c215e1969ffec2fa2e24e65edc712097641 Mon Sep 17 00:00:00 2001 From: Mingliang Liu Date: Mon, 8 Jun 2020 10:11:30 -0700 Subject: [PATCH 011/131] HADOOP-17059. ArrayIndexOfboundsException in ViewFileSystem#listStatus. Contributed by hemanthboyina --- .../hadoop/fs/viewfs/ViewFileSystem.java | 2 +- .../hadoop/fs/viewfs/ViewFsBaseTest.java | 22 +++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 2711bff0ff2f1..56d0fc59e90f1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -1226,7 +1226,7 @@ public FileStatus[] listStatus(Path f) throws AccessControlException, } else { result[i++] = new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, - ugi.getShortUserName(), ugi.getGroupNames()[0], + ugi.getShortUserName(), ugi.getPrimaryGroupName(), new Path(inode.fullPath).makeQualified( myUri, null)); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java index d96cdb172b702..90722aab2f8a7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java @@ -56,6 +56,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.FileContextTestHelper.fileType; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; @@ -69,6 +70,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -1001,4 +1003,24 @@ static AbstractFileSystem getMockFs(URI uri) { return mockFs; } } + + @Test + public void testListStatusWithNoGroups() throws Exception { + final UserGroupInformation userUgi = UserGroupInformation + .createUserForTesting("user@HADOOP.COM", new String[] {}); + userUgi.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + String clusterName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE; + URI viewFsUri = + new URI(FsConstants.VIEWFS_SCHEME, clusterName, "/", null, null); + FileSystem vfs = FileSystem.get(viewFsUri, conf); + LambdaTestUtils.intercept(IOException.class, + "There is no primary group for UGI", () -> vfs + .listStatus(new Path(viewFsUri.toString() + "internalDir"))); + return null; + } + }); + } + } From 0c25131ca430fcd6bf0f2c77dc01f027b92a9f4f Mon Sep 17 00:00:00 2001 From: Mingliang Liu Date: Mon, 8 Jun 2020 11:28:36 -0700 Subject: [PATCH 012/131] HADOOP-17047. TODO comment exist in trunk while related issue HADOOP-6223 is already fixed. Contributed by Rungroj Maipradit --- .../src/main/java/org/apache/hadoop/fs/FileContext.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index ba0064f0813d3..e9d8ea4a4ec1f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -66,6 +66,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import com.google.common.base.Preconditions; +import com.google.common.annotations.VisibleForTesting; import org.apache.htrace.core.Tracer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -507,10 +508,9 @@ public static FileContext getLocalFSFileContext(final Configuration aConf) return getFileContext(FsConstants.LOCAL_FS_URI, aConf); } - /* This method is needed for tests. */ + @VisibleForTesting @InterfaceAudience.Private - @InterfaceStability.Unstable /* return type will change to AFS once - HADOOP-6223 is completed */ + @InterfaceStability.Unstable public AbstractFileSystem getDefaultFileSystem() { return defaultFS; } From fbb8775430666e99264f357599cf73ae93313377 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Mon, 8 Jun 2020 20:35:27 +0000 Subject: [PATCH 013/131] Revert "MAPREDUCE-7277. IndexCache totalMemoryUsed differs from cache contents. Contributed by Jon Eagles (jeagles)." This reverts commit e2322e1117a2a3435aabecd49de0ad3c8d2f64b9. --- .../org/apache/hadoop/mapred/IndexCache.java | 95 +++++++------------ .../apache/hadoop/mapred/TestIndexCache.java | 56 +++++------ 2 files changed, 56 insertions(+), 95 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IndexCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IndexCache.java index 80cbcca4e27ea..0e24bbe53307d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IndexCache.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IndexCache.java @@ -72,12 +72,11 @@ public IndexRecord getIndexInformation(String mapId, int reduce, try { info.wait(); } catch (InterruptedException e) { - Thread.currentThread().interrupt(); throw new IOException("Interrupted waiting for construction", e); } } } - LOG.debug("IndexCache HIT: MapId {} found", mapId); + LOG.debug("IndexCache HIT: MapId " + mapId + " found"); } if (info.mapSpillRecord.size() == 0 || @@ -107,91 +106,63 @@ private IndexInformation readIndexFileToCache(Path indexFileName, try { info.wait(); } catch (InterruptedException e) { - Thread.currentThread().interrupt(); throw new IOException("Interrupted waiting for construction", e); } } } - LOG.debug("IndexCache HIT: MapId {} found", mapId); + LOG.debug("IndexCache HIT: MapId " + mapId + " found"); return info; } - LOG.debug("IndexCache MISS: MapId {} not found", mapId); + LOG.debug("IndexCache MISS: MapId " + mapId + " not found") ; SpillRecord tmp = null; - boolean success = false; try { tmp = new SpillRecord(indexFileName, conf, expectedIndexOwner); - success = true; - } catch (Throwable e) { + } catch (Throwable e) { tmp = new SpillRecord(0); cache.remove(mapId); - if (e instanceof InterruptedException) { - Thread.currentThread().interrupt(); - } throw new IOException("Error Reading IndexFile", e); - } finally { - synchronized (newInd) { + } finally { + synchronized (newInd) { newInd.mapSpillRecord = tmp; - if (success) { - // Only add mapId to the queue for successful read and after added to - // the cache. Once in the queue, it is now eligible for removal once - // construction is finished. - queue.add(mapId); - if (totalMemoryUsed.addAndGet(newInd.getSize()) > totalMemoryAllowed) { - freeIndexInformation(); - } - } newInd.notifyAll(); } } - + queue.add(mapId); + + if (totalMemoryUsed.addAndGet(newInd.getSize()) > totalMemoryAllowed) { + freeIndexInformation(); + } return newInd; } /** - * This method removes the map from the cache if it is present in the queue. + * This method removes the map from the cache if index information for this + * map is loaded(size>0), index information entry in cache will not be + * removed if it is in the loading phrase(size=0), this prevents corruption + * of totalMemoryUsed. It should be called when a map output on this tracker + * is discarded. * @param mapId The taskID of this map. */ - public void removeMap(String mapId) throws IOException { - // Successfully removing the mapId from the queue enters into a contract - // that this thread will remove the corresponding mapId from the cache. - if (!queue.remove(mapId)) { - LOG.debug("Map ID {} not found in queue", mapId); - return; - } - removeMapInternal(mapId); - } - - /** This method should only be called upon successful removal of mapId from - * the queue. The mapId will be removed from the cache and totalUsedMemory - * will be decremented. - * @param mapId the cache item to be removed - * @throws IOException - */ - private void removeMapInternal(String mapId) throws IOException { - IndexInformation info = cache.remove(mapId); - if (info == null) { - // Inconsistent state as presence in queue implies presence in cache - LOG.warn("Map ID " + mapId + " not found in cache"); + public void removeMap(String mapId) { + IndexInformation info = cache.get(mapId); + if (info == null || isUnderConstruction(info)) { return; } - try { - synchronized(info) { - while (isUnderConstruction(info)) { - info.wait(); - } - totalMemoryUsed.getAndAdd(-info.getSize()); + info = cache.remove(mapId); + if (info != null) { + totalMemoryUsed.addAndGet(-info.getSize()); + if (!queue.remove(mapId)) { + LOG.warn("Map ID" + mapId + " not found in queue!!"); } - } catch (InterruptedException e) { - totalMemoryUsed.getAndAdd(-info.getSize()); - Thread.currentThread().interrupt(); - throw new IOException("Interrupted waiting for construction", e); + } else { + LOG.info("Map ID " + mapId + " not found in cache"); } } /** - * This method checks if cache and totalMemoryUsed is consistent. + * This method checks if cache and totolMemoryUsed is consistent. * It is only used for unit test. - * @return True if cache and totalMemoryUsed is consistent + * @return True if cache and totolMemoryUsed is consistent */ boolean checkTotalMemoryUsed() { int totalSize = 0; @@ -204,13 +175,13 @@ boolean checkTotalMemoryUsed() { /** * Bring memory usage below totalMemoryAllowed. */ - private synchronized void freeIndexInformation() throws IOException { + private synchronized void freeIndexInformation() { while (totalMemoryUsed.get() > totalMemoryAllowed) { - if(queue.isEmpty()) { - break; + String s = queue.remove(); + IndexInformation info = cache.remove(s); + if (info != null) { + totalMemoryUsed.addAndGet(-info.getSize()); } - String mapId = queue.remove(); - removeMapInternal(mapId); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java index 09db2c680fcc7..dabce770e820d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java @@ -21,7 +21,6 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.Random; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.zip.CRC32; import java.util.zip.CheckedOutputStream; @@ -217,32 +216,23 @@ public void testRemoveMap() throws Exception { final String user = UserGroupInformation.getCurrentUser().getShortUserName(); writeFile(fs, big, bytesPerFile, partsPerMap); - - // Capture if any runtime exception occurred - AtomicBoolean failed = new AtomicBoolean(); - + // run multiple times for (int i = 0; i < 20; ++i) { Thread getInfoThread = new Thread() { @Override public void run() { try { - cache.getIndexInformation("bigIndex", 0, big, user); + cache.getIndexInformation("bigIndex", partsPerMap, big, user); } catch (Exception e) { // should not be here - failed.set(true); } } }; Thread removeMapThread = new Thread() { @Override public void run() { - try { - cache.removeMap("bigIndex"); - } catch (Exception e) { - // should not be here - failed.set(true); - } + cache.removeMap("bigIndex"); } }; if (i%2==0) { @@ -254,7 +244,6 @@ public void run() { } getInfoThread.join(); removeMapThread.join(); - assertFalse("An unexpected exception", failed.get()); assertTrue(cache.checkTotalMemoryUsed()); } } @@ -272,9 +261,6 @@ public void testCreateRace() throws Exception { UserGroupInformation.getCurrentUser().getShortUserName(); writeFile(fs, racy, bytesPerFile, partsPerMap); - // Capture if any runtime exception occurred - AtomicBoolean failed = new AtomicBoolean(); - // run multiple instances Thread[] getInfoThreads = new Thread[50]; for (int i = 0; i < 50; i++) { @@ -282,15 +268,10 @@ public void testCreateRace() throws Exception { @Override public void run() { try { - while (!Thread.currentThread().isInterrupted()) { - cache.getIndexInformation("racyIndex", 0, racy, user); - cache.removeMap("racyIndex"); - } + cache.getIndexInformation("racyIndex", partsPerMap, racy, user); + cache.removeMap("racyIndex"); } catch (Exception e) { - if (!Thread.currentThread().isInterrupted()) { - // should not be here - failed.set(true); - } + // should not be here } } }; @@ -300,12 +281,20 @@ public void run() { getInfoThreads[i].start(); } - // The duration to keep the threads testing - Thread.sleep(5000); + final Thread mainTestThread = Thread.currentThread(); + + Thread timeoutThread = new Thread() { + @Override + public void run() { + try { + Thread.sleep(15000); + mainTestThread.interrupt(); + } catch (InterruptedException ie) { + // we are done; + } + } + }; - for (int i = 0; i < 50; i++) { - getInfoThreads[i].interrupt(); - } for (int i = 0; i < 50; i++) { try { getInfoThreads[i].join(); @@ -314,9 +303,10 @@ public void run() { fail("Unexpectedly long delay during concurrent cache entry creations"); } } - assertFalse("An unexpected exception", failed.get()); - assertTrue("Total memory used does not represent contents of the cache", - cache.checkTotalMemoryUsed()); + // stop the timeoutThread. If we get interrupted before stopping, there + // must be something wrong, although it wasn't a deadlock. No need to + // catch and swallow. + timeoutThread.interrupt(); } private static void checkRecord(IndexRecord rec, long fill) { From 852587456173f208f78d0c95046cfd0d8aa1c01c Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Tue, 9 Jun 2020 18:45:20 +0530 Subject: [PATCH 014/131] HDFS-15211. EC: File write hangs during close in case of Exception during updatePipeline. Contributed by Ayush Saxena. *Added missed test file. --- ...tDFSStripedOutputStreamUpdatePipeline.java | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java new file mode 100644 index 0000000000000..8e50b797d591e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; +import org.junit.Test; + + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; + +public class TestDFSStripedOutputStreamUpdatePipeline { + + @Test + public void testDFSStripedOutputStreamUpdatePipeline() throws Exception { + + Configuration conf = new HdfsConfiguration(); + conf.setLong(DFS_BLOCK_SIZE_KEY, 1 * 1024 * 1024); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(5).build()) { + cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + // Create a file with EC policy + Path dir = new Path("/test"); + dfs.mkdirs(dir); + dfs.enableErasureCodingPolicy("RS-3-2-1024k"); + dfs.setErasureCodingPolicy(dir, "RS-3-2-1024k"); + Path filePath = new Path("/test/file"); + FSDataOutputStream out = dfs.create(filePath); + try { + for (int i = 0; i < Long.MAX_VALUE; i++) { + out.write(i); + if (i == 1024 * 1024 * 5) { + cluster.stopDataNode(0); + cluster.stopDataNode(1); + cluster.stopDataNode(2); + } + } + } catch(Exception e) { + dfs.delete(filePath, true); + } finally { + // The close should be success, shouldn't get stuck. + IOUtils.closeStream(out); + } + } + } +} From ac5d899d40d7b50ba73c400a708f59fb128e6e30 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Tue, 9 Jun 2020 14:39:06 +0100 Subject: [PATCH 015/131] HADOOP-17050 S3A to support additional token issuers Contributed by Steve Loughran. S3A delegation token providers will be asked for any additional token issuers, an array can be returned, each one will be asked for tokens when DelegationTokenIssuer collects all the tokens for a filesystem. --- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 20 +++++++++++++++++++ .../auth/delegation/S3ADelegationTokens.java | 14 +++++++++++++ 2 files changed, 34 insertions(+) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 6d2b3a84ca702..fa0251aa73aa2 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -114,6 +114,7 @@ import org.apache.hadoop.fs.s3a.select.InternalSelectConstants; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.token.DelegationTokenIssuer; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.LambdaUtils; @@ -3377,6 +3378,25 @@ public Token getDelegationToken(String renewer) } } + /** + * Ask any DT plugin for any extra token issuers. + * These do not get told of the encryption secrets and can + * return any type of token. + * This allows DT plugins to issue extra tokens for + * ancillary services. + */ + @Override + public DelegationTokenIssuer[] getAdditionalTokenIssuers() + throws IOException { + if (delegationTokens.isPresent()) { + return delegationTokens.get().getAdditionalTokenIssuers(); + } else { + // Delegation token support is not set up + LOG.debug("Token support is not enabled"); + return null; + } + } + /** * Build the AWS policy for restricted access to the resources needed * by this bucket. diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/S3ADelegationTokens.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/S3ADelegationTokens.java index 5005436c8242a..ef0b129e6e0c2 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/S3ADelegationTokens.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/S3ADelegationTokens.java @@ -40,6 +40,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.DelegationTokenIssuer; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.service.ServiceOperations; import org.apache.hadoop.util.DurationInfo; @@ -447,6 +448,19 @@ private void noteTokenCreated(final Token token) { stats.tokenIssued(); } + /** + * Get a null/possibly empty list of extra delegation token issuers. + * These will be asked for tokens when + * {@link DelegationTokenIssuer#getAdditionalTokenIssuers()} recursively + * collects all DTs a filesystem can offer. + * @return a null or empty array. Default implementation: null + * @throws IOException failure + */ + public DelegationTokenIssuer[] getAdditionalTokenIssuers() + throws IOException { + return null; + } + /** * Get the AWS credential provider. * @return the DT credential provider From 56247db3022705635580c4d2f8b0abde109f954f Mon Sep 17 00:00:00 2001 From: Eric E Payne Date: Tue, 9 Jun 2020 18:43:16 +0000 Subject: [PATCH 016/131] YARN-10300: appMasterHost not set in RM ApplicationSummary when AM fails before first heartbeat. Contributed by Eric Badger (ebadger). --- .../server/resourcemanager/RMAppManager.java | 13 ++++++- .../resourcemanager/TestAppManager.java | 15 ++++++++ .../TestApplicationMasterLauncher.java | 35 +++++++++++++++++++ 3 files changed, 62 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index debd4d9d34527..440b0ea6717ad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -26,6 +26,8 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.NodeId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -190,7 +192,16 @@ public static SummaryBuilder createAppSummary(RMApp app) { RMAppAttempt attempt = app.getCurrentAppAttempt(); if (attempt != null) { trackingUrl = attempt.getTrackingUrl(); - host = attempt.getHost(); + Container masterContainer = attempt.getMasterContainer(); + if (masterContainer != null) { + NodeId nodeId = masterContainer.getNodeId(); + if (nodeId != null) { + String amHost = nodeId.getHost(); + if (amHost != null) { + host = amHost; + } + } + } } RMAppMetrics metrics = app.getRMAppMetrics(); SummaryBuilder summary = new SummaryBuilder() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index 5f5c3f2b0b36f..15a31bc00775d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -22,7 +22,10 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.QueueACL; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -972,6 +975,17 @@ public void testEscapeApplicationSummary() { when(app.getSubmitTime()).thenReturn(1000L); when(app.getLaunchTime()).thenReturn(2000L); when(app.getApplicationTags()).thenReturn(Sets.newHashSet("tag2", "tag1")); + + RMAppAttempt mockRMAppAttempt = mock(RMAppAttempt.class); + Container mockContainer = mock(Container.class); + NodeId mockNodeId = mock(NodeId.class); + String host = "127.0.0.1"; + + when(mockNodeId.getHost()).thenReturn(host); + when(mockContainer.getNodeId()).thenReturn(mockNodeId); + when(mockRMAppAttempt.getMasterContainer()).thenReturn(mockContainer); + when(app.getCurrentAppAttempt()).thenReturn(mockRMAppAttempt); + Map resourceSecondsMap = new HashMap<>(); resourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), 16384L); resourceSecondsMap.put(ResourceInformation.VCORES.getName(), 64L); @@ -993,6 +1007,7 @@ public void testEscapeApplicationSummary() { assertTrue(msg.contains("Multiline" + escaped +"AppName")); assertTrue(msg.contains("Multiline" + escaped +"UserName")); assertTrue(msg.contains("Multiline" + escaped +"QueueName")); + assertTrue(msg.contains("appMasterHost=" + host)); assertTrue(msg.contains("submitTime=1000")); assertTrue(msg.contains("launchTime=2000")); assertTrue(msg.contains("memorySeconds=16384")); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java index 3cf809d7f3ea9..7b3c2f02e56fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java @@ -453,6 +453,41 @@ public void testSetupTokensWithHTTPS() throws Exception { testSetupTokens(true, conf); } + @Test + public void testAMMasterContainerHost() throws Exception { + //Test that masterContainer and its associated host are + //set before the AM is even launched. + MockRM rm = new MockRM(); + rm.start(); + String host = "127.0.0.1"; + String port = "1234"; + MockNM nm1 = rm.registerNode(host + ":" + port, 5120); + RMApp app = MockRMAppSubmitter.submitWithMemory(2000, rm); + // kick the scheduling + nm1.nodeHeartbeat(true); + RMAppAttempt attempt = app.getCurrentAppAttempt(); + + try { + GenericTestUtils.waitFor(new Supplier() { + @Override public Boolean get() { + return attempt.getMasterContainer() != null; + } + }, 10, 200 * 100); + } catch (TimeoutException e) { + fail("timed out while waiting for AM Launch to happen."); + } + + Assert.assertEquals( + app.getCurrentAppAttempt().getMasterContainer().getNodeId().getHost(), + host); + + //send kill before launch + rm.killApp(app.getApplicationId()); + rm.waitForState(app.getApplicationId(), RMAppState.KILLED); + + rm.stop(); + } + private void testSetupTokens(boolean https, YarnConfiguration conf) throws Exception { MockRM rm = new MockRM(conf); From 635e6a16d0f407eeec470f2d4d3303092961a177 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Wed, 10 Jun 2020 11:50:37 +0530 Subject: [PATCH 017/131] HDFS-15376. Update the error about command line POST in httpfs documentation. Contributed by bianqi. --- .../hadoop-hdfs-httpfs/src/site/markdown/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md index 6eef9e7d30e99..665aad52c7b5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md @@ -42,7 +42,7 @@ HttpFS HTTP web-service API calls are HTTP REST calls that map to a HDFS file sy * `$ curl 'http://httpfs-host:14000/webhdfs/v1/user/foo?op=GETTRASHROOT&user.name=foo'` returns the path `/user/foo/.Trash`, if `/` is an encrypted zone, returns the path `/.Trash/foo`. See [more details](../hadoop-project-dist/hadoop-hdfs/TransparentEncryption.html#Rename_and_Trash_considerations) about trash path in an encrypted zone. -* `$ curl -X POST 'http://httpfs-host:14000/webhdfs/v1/user/foo/bar?op=MKDIRS&user.name=foo'` creates the HDFS `/user/foo/bar` directory. +* `$ curl -X PUT 'http://httpfs-host:14000/webhdfs/v1/user/foo/bar?op=MKDIRS&user.name=foo'` creates the HDFS `/user/foo/bar` directory. User and Developer Documentation -------------------------------- From b735a777178a3be7924b0ea7c0f61003dc60f16e Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Wed, 10 Jun 2020 12:06:16 +0530 Subject: [PATCH 018/131] HDFS-15398. EC: hdfs client hangs due to exception during addBlock. Contributed by Hongbing Wang. --- .../hadoop/hdfs/DFSStripedOutputStream.java | 10 ++++-- ...tDFSStripedOutputStreamUpdatePipeline.java | 36 +++++++++++++++++++ 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java index aad4a00bdeb35..4222478f976de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java @@ -503,8 +503,14 @@ private void allocateNewBlock() throws IOException { LOG.debug("Allocating new block group. The previous block group: " + prevBlockGroup); - final LocatedBlock lb = addBlock(excludedNodes, dfsClient, src, - prevBlockGroup, fileId, favoredNodes, getAddBlockFlags()); + final LocatedBlock lb; + try { + lb = addBlock(excludedNodes, dfsClient, src, + prevBlockGroup, fileId, favoredNodes, getAddBlockFlags()); + } catch (IOException ioe) { + closeAllStreamers(); + throw ioe; + } assert lb.isStriped(); // assign the new block to the current block group currentBlockGroup = lb.getBlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java index 8e50b797d591e..ae29da08d41d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java @@ -61,4 +61,40 @@ public void testDFSStripedOutputStreamUpdatePipeline() throws Exception { } } } + + /** + * Test writing ec file hang when applying the second block group occurs + * an addBlock exception (e.g. quota exception). + */ + @Test(timeout = 90000) + public void testECWriteHangWhenAddBlockWithException() throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1 * 1024 * 1024); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(3).build()) { + cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + // Create a file with EC policy + Path dir = new Path("/test"); + dfs.mkdirs(dir); + dfs.enableErasureCodingPolicy("XOR-2-1-1024k"); + dfs.setErasureCodingPolicy(dir, "XOR-2-1-1024k"); + Path filePath = new Path("/test/file"); + FSDataOutputStream out = dfs.create(filePath); + for (int i = 0; i < 1024 * 1024 * 2; i++) { + out.write(i); + } + dfs.setQuota(dir, 5, 0); + try { + for (int i = 0; i < 1024 * 1024 * 2; i++) { + out.write(i); + } + } catch (Exception e) { + dfs.delete(filePath, true); + } finally { + // The close should be success, shouldn't get stuck. + IOUtils.closeStream(out); + } + } + } } From 93b121a9717bb4ef5240fda877ebb5275f6446b4 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Wed, 10 Jun 2020 15:00:02 -0700 Subject: [PATCH 019/131] HADOOP-17060. Clarify listStatus and getFileStatus behaviors inconsistent in the case of ViewFs implementation for isDirectory. Contributed by Uma Maheswara Rao G. --- .../hadoop/fs/viewfs/ViewFileSystem.java | 36 ++++++++++++++----- .../org/apache/hadoop/fs/viewfs/ViewFs.java | 24 +++++++++++++ .../main/java/org/apache/hadoop/fs/Hdfs.java | 22 ++++++++++++ .../hadoop/hdfs/DistributedFileSystem.java | 25 ++++++++++--- 4 files changed, 94 insertions(+), 13 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 56d0fc59e90f1..895edc01397dc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -488,6 +488,14 @@ private static FileStatus wrapLocalFileStatus(FileStatus orig, : new ViewFsFileStatus(orig, qualified); } + /** + * {@inheritDoc} + * + * If the given path is a symlink(mount link), the path will be resolved to a + * target path and it will get the resolved path's FileStatus object. It will + * not be represented as a symlink and isDirectory API returns true if the + * resolved path is a directory, false otherwise. + */ @Override public FileStatus getFileStatus(final Path f) throws AccessControlException, FileNotFoundException, IOException { @@ -505,6 +513,25 @@ public void access(Path path, FsAction mode) throws AccessControlException, res.targetFileSystem.access(res.remainingPath, mode); } + /** + * {@inheritDoc} + * + * Note: listStatus on root("/") considers listing from fallbackLink if + * available. If the same directory name is present in configured mount path + * as well as in fallback link, then only the configured mount path will be + * listed in the returned result. + * + * If any of the the immediate children of the given path f is a symlink(mount + * link), the returned FileStatus object of that children would be represented + * as a symlink. It will not be resolved to the target path and will not get + * the target path FileStatus object. The target path will be available via + * getSymlink on that children's FileStatus object. Since it represents as + * symlink, isDirectory on that children's FileStatus will return false. + * + * If you want to get the FileStatus of target path for that children, you may + * want to use GetFileStatus API with that children's symlink path. Please see + * {@link ViewFileSystem#getFileStatus(Path f)} + */ @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, FileNotFoundException, IOException { @@ -1174,20 +1201,11 @@ public FileStatus getFileStatus(Path f) throws IOException { checkPathIsSlash(f); return new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(), - new Path(theInternalDir.fullPath).makeQualified( myUri, ROOT_PATH)); } - /** - * {@inheritDoc} - * - * Note: listStatus on root("/") considers listing from fallbackLink if - * available. If the same directory name is present in configured mount - * path as well as in fallback link, then only the configured mount path - * will be listed in the returned result. - */ @Override public FileStatus[] listStatus(Path f) throws AccessControlException, FileNotFoundException, IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index df10dce50b78f..4578a4c353e40 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -351,6 +351,14 @@ public FileChecksum getFileChecksum(final Path f) return res.targetFileSystem.getFileChecksum(res.remainingPath); } + /** + * {@inheritDoc} + * + * If the given path is a symlink(mount link), the path will be resolved to a + * target path and it will get the resolved path's FileStatus object. It will + * not be represented as a symlink and isDirectory API returns true if the + * resolved path is a directory, false otherwise. + */ @Override public FileStatus getFileStatus(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { @@ -436,6 +444,22 @@ public LocatedFileStatus getViewFsFileStatus(LocatedFileStatus stat, }; } + /** + * {@inheritDoc} + * + * If any of the the immediate children of the given path f is a symlink(mount + * link), the returned FileStatus object of that children would be represented + * as a symlink. It will not be resolved to the target path and will not get + * the target path FileStatus object. The target path will be available via + * getSymlink on that children's FileStatus object. Since it represents as + * symlink, isDirectory on that children's FileStatus will return false. + * + * If you want to get the FileStatus of target path for that children, you may + * want to use GetFileStatus API with that children's symlink path. Please see + * {@link ViewFs#getFileStatus(Path f)} + * + * Note: In ViewFs, the mount links are represented as symlinks. + */ @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java index 290f2c0e6766f..4162b198fb124 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -135,6 +135,14 @@ public FileChecksum getFileChecksum(Path f) return dfs.getFileChecksumWithCombineMode(getUriPath(f), Long.MAX_VALUE); } + /** + * {@inheritDoc} + * + * If the given path is a symlink, the path will be resolved to a target path + * and it will get the resolved path's FileStatus object. It will not be + * represented as a symlink and isDirectory API returns true if the resolved + * path is a directory, false otherwise. + */ @Override public FileStatus getFileStatus(Path f) throws IOException, UnresolvedLinkException { @@ -269,6 +277,20 @@ public HdfsFileStatus getNext() throws IOException { } } + /** + * {@inheritDoc} + * + * If any of the the immediate children of the given path f is a symlink, the + * returned FileStatus object of that children would be represented as a + * symlink. It will not be resolved to the target path and will not get the + * target path FileStatus object. The target path will be available via + * getSymlink on that children's FileStatus object. Since it represents as + * symlink, isDirectory on that children's FileStatus will return false. + * + * If you want to get the FileStatus of target path for that children, you may + * want to use GetFileStatus API with that children's symlink path. Please see + * {@link Hdfs#getFileStatus(Path f)} + */ @Override public FileStatus[] listStatus(Path f) throws IOException, UnresolvedLinkException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index b4a932ef142f8..55e228d34ebb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1143,10 +1143,21 @@ private FileStatus[] listStatusInternal(Path p) throws IOException { /** * List all the entries of a directory * - * Note that this operation is not atomic for a large directory. - * The entries of a directory may be fetched from NameNode multiple times. - * It only guarantees that each name occurs once if a directory - * undergoes changes between the calls. + * Note that this operation is not atomic for a large directory. The entries + * of a directory may be fetched from NameNode multiple times. It only + * guarantees that each name occurs once if a directory undergoes changes + * between the calls. + * + * If any of the the immediate children of the given path f is a symlink, the + * returned FileStatus object of that children would be represented as a + * symlink. It will not be resolved to the target path and will not get the + * target path FileStatus object. The target path will be available via + * getSymlink on that children's FileStatus object. Since it represents as + * symlink, isDirectory on that children's FileStatus will return false. + * + * If you want to get the FileStatus of target path for that children, you may + * want to use GetFileStatus API with that children's symlink path. Please see + * {@link DistributedFileSystem#getFileStatus(Path f)} */ @Override public FileStatus[] listStatus(Path p) throws IOException { @@ -1712,6 +1723,12 @@ public FsServerDefaults getServerDefaults() throws IOException { /** * Returns the stat information about the file. + * + * If the given path is a symlink, the path will be resolved to a target path + * and it will get the resolved path's FileStatus object. It will not be + * represented as a symlink and isDirectory API returns true if the resolved + * path is a directory, false otherwise. + * * @throws FileNotFoundException if the file does not exist. */ @Override From fed6fecd3a9e24efc20f9221505da35a7e1949c7 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Thu, 11 Jun 2020 21:02:41 +0000 Subject: [PATCH 020/131] YARN-10312. Add support for yarn logs -logFile to retain backward compatibility. Contributed by Jim Brennan. --- .../hadoop/yarn/client/cli/LogsCLI.java | 13 ++++++ .../hadoop/yarn/client/cli/TestLogsCLI.java | 42 +++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java index 833e779be4a20..4d67ce8178d6e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java @@ -100,6 +100,7 @@ public class LogsCLI extends Configured implements Tool { private static final String APP_OWNER_OPTION = "appOwner"; private static final String AM_CONTAINER_OPTION = "am"; private static final String PER_CONTAINER_LOG_FILES_OPTION = "log_files"; + private static final String PER_CONTAINER_LOG_FILES_OLD_OPTION = "logFiles"; private static final String PER_CONTAINER_LOG_FILES_REGEX_OPTION = "log_files_pattern"; private static final String LIST_NODES_OPTION = "list_nodes"; @@ -202,6 +203,12 @@ private int runCommand(String[] args) throws Exception { } if (commandLine.hasOption(PER_CONTAINER_LOG_FILES_OPTION)) { logFiles = commandLine.getOptionValues(PER_CONTAINER_LOG_FILES_OPTION); + } else { + // For backward compatibility, we need to check for the old form of this + // command line option as well. New form takes precedent. + if (commandLine.hasOption(PER_CONTAINER_LOG_FILES_OLD_OPTION)) { + logFiles = commandLine.getOptionValues(PER_CONTAINER_LOG_FILES_OLD_OPTION); + } } if (commandLine.hasOption(PER_CONTAINER_LOG_FILES_REGEX_OPTION)) { logFilesRegex = commandLine.getOptionValues( @@ -937,6 +944,12 @@ private Options createCommandOpts() { logFileOpt.setArgs(Option.UNLIMITED_VALUES); logFileOpt.setArgName("Log File Name"); opts.addOption(logFileOpt); + Option oldLogFileOpt = new Option(PER_CONTAINER_LOG_FILES_OLD_OPTION, true, + "Deprecated name for log_files, please use log_files option instead"); + oldLogFileOpt.setValueSeparator(','); + oldLogFileOpt.setArgs(Option.UNLIMITED_VALUES); + oldLogFileOpt.setArgName("Log File Name"); + opts.addOption(oldLogFileOpt); Option logFileRegexOpt = new Option(PER_CONTAINER_LOG_FILES_REGEX_OPTION, true, "Specify comma-separated value " + "to get matched log files by using java regex. Use \".*\" to " diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java index d422a2a486404..80f39b8f90302 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java @@ -529,6 +529,48 @@ public ContainerReport getContainerReport(String containerIdStr) createEmptyLog("empty"))); sysOutStream.reset(); + // Check backward compatibility for -logFiles + exitCode = cli.run(new String[] {"-applicationId", appId.toString(), + "-logFiles", "stdout"}); + assertTrue("Failed with -logFiles", exitCode == 0); + assertFalse("Failed with -logFiles", sysOutStream.toString().contains( + logMessage(containerId1, "syslog"))); + assertFalse("Failed with -logFiles", sysOutStream.toString().contains( + logMessage(containerId2, "syslog"))); + assertFalse("Failed with -logFiles", sysOutStream.toString().contains( + logMessage(containerId3, "syslog"))); + assertTrue("Failed with -logFiles", sysOutStream.toString().contains( + logMessage(containerId3, "stdout"))); + assertFalse("Failed with -logFiles", sysOutStream.toString().contains( + logMessage(containerId3, "stdout1234"))); + assertFalse("Failed with -logFiles", sysOutStream.toString().contains( + createEmptyLog("empty"))); + sysOutStream.reset(); + + // Check -log_files supercedes -logFiles + exitCode = cli.run(new String[] {"-applicationId", appId.toString(), + "-log_files", "stdout", "-logFiles", "syslog"}); + assertTrue("Failed with -logFiles and -log_files", exitCode == 0); + assertFalse("Failed with -logFiles and -log_files", + sysOutStream.toString().contains( + logMessage(containerId1, "syslog"))); + assertFalse("Failed with -logFiles and -log_files", + sysOutStream.toString().contains( + logMessage(containerId2, "syslog"))); + assertFalse("Failed with -logFiles and -log_files", + sysOutStream.toString().contains( + logMessage(containerId3, "syslog"))); + assertTrue("Failed with -logFiles and -log_files", + sysOutStream.toString().contains( + logMessage(containerId3, "stdout"))); + assertFalse("Failed with -logFiles and -log_files", + sysOutStream.toString().contains( + logMessage(containerId3, "stdout1234"))); + assertFalse("Failed with -logFiles and -log_files", + sysOutStream.toString().contains( + createEmptyLog("empty"))); + sysOutStream.reset(); + exitCode = cli.run(new String[] {"-applicationId", appId.toString(), "-log_files_pattern", "std*"}); assertTrue(exitCode == 0); From 7c4de59fc10953170bbef9a320ce70bcddae8bba Mon Sep 17 00:00:00 2001 From: Tao Yang Date: Fri, 12 Jun 2020 22:10:25 +0800 Subject: [PATCH 021/131] YARN-10293. Reserved Containers not allocated from available space of other nodes in CandidateNodeSet for MultiNodePlacement. Contributed by Prabhu Joseph. --- .../scheduler/capacity/CapacityScheduler.java | 28 +- .../TestCapacitySchedulerMultiNodes.java | 7 - ...citySchedulerMultiNodesWithPreemption.java | 271 ++++++++++++++++++ 3 files changed, 276 insertions(+), 30 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 5cef57adac63f..a6aa82443cc11 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -1721,31 +1721,13 @@ private CSAssignment allocateOrReserveNewContainers( */ private CSAssignment allocateContainersOnMultiNodes( CandidateNodeSet candidates) { - // When this time look at multiple nodes, try schedule if the - // partition has any available resource or killable resource - if (getRootQueue().getQueueCapacities().getUsedCapacity( - candidates.getPartition()) >= 1.0f - && preemptionManager.getKillableResource( - CapacitySchedulerConfiguration.ROOT, candidates.getPartition()) - == Resources.none()) { - // Try to allocate from reserved containers - for (FiCaSchedulerNode node : candidates.getAllNodes().values()) { - RMContainer reservedContainer = node.getReservedContainer(); - if (reservedContainer != null) { - allocateFromReservedContainer(node, false, reservedContainer); - } + // Try to allocate from reserved containers + for (FiCaSchedulerNode node : candidates.getAllNodes().values()) { + RMContainer reservedContainer = node.getReservedContainer(); + if (reservedContainer != null) { + allocateFromReservedContainer(node, false, reservedContainer); } - LOG.debug("This partition '{}' doesn't have available or " - + "killable resource", candidates.getPartition()); - ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, null, - "", getRootQueue().getQueuePath(), ActivityState.REJECTED, - ActivityDiagnosticConstant. - INIT_CHECK_PARTITION_RESOURCE_INSUFFICIENT); - ActivitiesLogger.NODE - .finishSkippedNodeAllocation(activitiesManager, null); - return null; } - return allocateOrReserveNewContainers(candidates, false); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java index bb2cbfdba148a..29de815040e27 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java @@ -258,13 +258,6 @@ public void testExcessReservationWillBeUnreserved() throws Exception { Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); Assert.assertEquals(1, schedulerApp1.getLiveContainers().size()); Assert.assertEquals(0, schedulerApp1.getReservedContainers().size()); - Assert.assertEquals(1, schedulerApp2.getLiveContainers().size()); - - // Trigger scheduling to allocate a container on nm1 for app2. - cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); - Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); - Assert.assertEquals(1, schedulerApp1.getLiveContainers().size()); - Assert.assertEquals(0, schedulerApp1.getReservedContainers().size()); Assert.assertEquals(2, schedulerApp2.getLiveContainers().size()); Assert.assertEquals(7 * GB, cs.getNode(nm1.getNodeId()).getAllocatedResource().getMemorySize()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java new file mode 100644 index 0000000000000..65e0a1743e683 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java @@ -0,0 +1,271 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.MockAM; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmissionData; +import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmitter; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.MultiNodeSorter; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.MultiNodeSortingManager; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy; +import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +public class TestCapacitySchedulerMultiNodesWithPreemption + extends CapacitySchedulerTestBase { + + private static final Log LOG = LogFactory + .getLog(TestCapacitySchedulerMultiNodesWithPreemption.class); + private CapacitySchedulerConfiguration conf; + private static final String POLICY_CLASS_NAME = + "org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement." + + "ResourceUsageMultiNodeLookupPolicy"; + + @Before + public void setUp() { + CapacitySchedulerConfiguration config = + new CapacitySchedulerConfiguration(); + config.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, + DominantResourceCalculator.class.getName()); + conf = new CapacitySchedulerConfiguration(config); + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, + ResourceScheduler.class); + conf.set(CapacitySchedulerConfiguration.MULTI_NODE_SORTING_POLICIES, + "resource-based"); + conf.set(CapacitySchedulerConfiguration.MULTI_NODE_SORTING_POLICY_NAME, + "resource-based"); + String policyName = + CapacitySchedulerConfiguration.MULTI_NODE_SORTING_POLICY_NAME + + ".resource-based" + ".class"; + conf.set(policyName, POLICY_CLASS_NAME); + conf.setBoolean(CapacitySchedulerConfiguration.MULTI_NODE_PLACEMENT_ENABLED, + true); + // Set this to avoid the AM pending issue + conf.set(CapacitySchedulerConfiguration + .MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT, "1"); + conf.setInt("yarn.scheduler.minimum-allocation-mb", 512); + conf.setInt("yarn.scheduler.minimum-allocation-vcores", 1); + conf.setInt("yarn.scheduler.maximum-allocation-mb", 102400); + + // Configure two queues to test Preemption + conf.set("yarn.scheduler.capacity.root.queues", "A, default"); + conf.set("yarn.scheduler.capacity.root.A.capacity", "50"); + conf.set("yarn.scheduler.capacity.root.default.capacity", "50"); + conf.set("yarn.scheduler.capacity.root.A.maximum-capacity", "100"); + conf.set("yarn.scheduler.capacity.root.default.maximum-capacity", "100"); + conf.set("yarn.scheduler.capacity.root.A.user-limit-factor", "10"); + conf.set("yarn.scheduler.capacity.root.default.user-limit-factor", "10"); + + // Configure Preemption + conf.setLong( + CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, 10000); + conf.setLong(CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL, + 1500); + conf.setFloat(CapacitySchedulerConfiguration.TOTAL_PREEMPTION_PER_ROUND, + 1.0f); + conf.setFloat( + CapacitySchedulerConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR, + 1.0f); + + conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, + ProportionalCapacityPreemptionPolicy.class.getCanonicalName()); + conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true); + } + + @Test(timeout=60000) + public void testAllocateReservationFromOtherNode() throws Exception { + MockRM rm = new MockRM(conf); + rm.start(); + MockNM[] nms = new MockNM[3]; + MockNM nm1 = rm.registerNode("127.0.0.1:1234", 1 * GB, 2); + nms[0] = nm1; + MockNM nm2 = rm.registerNode("127.0.0.2:1234", 2 * GB, 2); + nms[1] = nm2; + MockNM nm3 = rm.registerNode("127.0.0.3:1234", 3 * GB, 2); + nms[2] = nm3; + + MultiNodeSortingManager mns = rm.getRMContext() + .getMultiNodeSortingManager(); + MultiNodeSorter sorter = mns + .getMultiNodePolicy(POLICY_CLASS_NAME); + sorter.reSortClusterNodes(); + + // Step 1: Launch an App in Default Queue which utilizes the entire cluster + RMApp app1 = MockRMAppSubmitter.submit(rm, + MockRMAppSubmissionData.Builder.createWithMemory(3 * GB, rm) + .withAppName("app-1") + .withUser("user1") + .withAcls(null) + .withQueue("default") + .build()); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); + am1.allocateAndWaitForContainers("*", 1, 2 * GB, nm2); + am1.allocateAndWaitForContainers("*", 1, 1 * GB, nm3); + + // Step 2: Wait till the nodes utilization are full + GenericTestUtils.waitFor(() -> { + SchedulerNodeReport reportNM1 = + rm.getResourceScheduler().getNodeReport(nms[0].getNodeId()); + SchedulerNodeReport reportNM2 = + rm.getResourceScheduler().getNodeReport(nms[1].getNodeId()); + return (reportNM1.getAvailableResource().getMemorySize() == 0 * GB) + && (reportNM2.getAvailableResource().getMemorySize() == 0 * GB); + }, 10, 10000); + + + // Step 3: Launch another App in Queue A which will be Reserved + // after Preemption + final AtomicBoolean result = new AtomicBoolean(false); + RMApp app2 = MockRMAppSubmitter.submit(rm, + MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm) + .withAppName("app-2") + .withUser("user2") + .withAcls(null) + .withQueue("A") + .build()); + + // Launch AM in a thread and in parallel free the preempted node's + // unallocated resources in main thread + Thread t1 = new Thread() { + public void run() { + try { + MockAM am2 = MockRM.launchAM(app2, rm, nm1); + result.set(true); + } catch (Exception e) { + Assert.fail("Failed to launch app-2"); + } + } + }; + t1.start(); + + // Step 4: Wait for Preemption to happen. It will preempt Node1 (1GB) + // Get the node where preemption happened which has the available space + final AtomicReference preemptedNode = new AtomicReference<>(); + GenericTestUtils.waitFor(() -> { + for (int i = 0; i < nms.length; i++) { + SchedulerNodeReport reportNM = + rm.getResourceScheduler().getNodeReport(nms[i].getNodeId()); + if (reportNM.getAvailableResource().getMemorySize() == 1 * GB) { + preemptedNode.set(nms[i]); + return true; + } + } + return false; + }, 10, 30000); + LOG.info("Preempted node is: " + preemptedNode.get().getNodeId()); + + + // Step 5: Don't release the container from NodeManager so that Reservation + // happens. Used Capacity will be < 1.0f but nodes won't have available + // containers so Reservation will happen. + FiCaSchedulerNode schedulerNode = + ((CapacityScheduler) rm.getResourceScheduler()) + .getNodeTracker().getNode(preemptedNode.get().getNodeId()); + Resource curResource = schedulerNode.getUnallocatedResource(); + schedulerNode.deductUnallocatedResource(Resource.newInstance(curResource)); + + ((CapacityScheduler) rm.getResourceScheduler()).getNodeTracker() + .removeNode(preemptedNode.get().getNodeId()); + ((CapacityScheduler) rm.getResourceScheduler()).getNodeTracker() + .addNode(schedulerNode); + + // Send a heartbeat to kick the tires on the Scheduler + // The container will be reserved for app-2 + RMNode preemptedRMNode = rm.getRMContext().getRMNodes().get( + preemptedNode.get().getNodeId()); + NodeUpdateSchedulerEvent nodeUpdate = new NodeUpdateSchedulerEvent( + preemptedRMNode); + rm.getResourceScheduler().handle(nodeUpdate); + + // Validate if Reservation happened + // Reservation will happen on last node in the iterator - Node3 + CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); + ApplicationAttemptId app2AttemptId = app2.getCurrentAppAttempt() + .getAppAttemptId(); + FiCaSchedulerApp schedulerApp = cs.getApplicationAttempt(app2AttemptId); + + assertEquals("App2 failed to get reserved container", 1, + schedulerApp.getReservedContainers().size()); + LOG.info("Reserved node is: " + + schedulerApp.getReservedContainers().get(0).getReservedNode()); + assertNotEquals("Failed to reserve as per the Multi Node Itearor", + schedulerApp.getReservedContainers().get(0).getReservedNode(), + preemptedNode.get().getNodeId()); + + + // Step 6: Okay, now preempted node is Node1 and reserved node is Node3 + // Validate if the Reserved Container gets allocated + // after updating release container. + schedulerNode = ((CapacityScheduler) rm.getResourceScheduler()) + .getNodeTracker().getNode(preemptedNode.get().getNodeId()); + curResource = schedulerNode.getAllocatedResource(); + schedulerNode.updateTotalResource( + Resources.add(schedulerNode.getTotalResource(), curResource)); + + ((CapacityScheduler) rm.getResourceScheduler()).getNodeTracker() + .removeNode(preemptedNode.get().getNodeId()); + ((CapacityScheduler) rm.getResourceScheduler()).getNodeTracker() + .addNode(schedulerNode); + + preemptedRMNode = rm.getRMContext().getRMNodes().get( + preemptedNode.get().getNodeId()); + nodeUpdate = new NodeUpdateSchedulerEvent(preemptedRMNode); + rm.getResourceScheduler().handle(nodeUpdate); + + // Step 7: Wait for app-2 to get ALLOCATED + GenericTestUtils.waitFor(() -> { + return result.get(); + }, 10, 20000); + + // Step 8: Validate if app-2 has got 1 live container and + // released the reserved container + schedulerApp = cs.getApplicationAttempt(app2AttemptId); + assertEquals("App2 failed to get Allocated", 1, + schedulerApp.getLiveContainers().size()); + assertEquals("App2 failed to Unreserve", 0, + schedulerApp.getReservedContainers().size()); + + rm.stop(); + } +} \ No newline at end of file From e15408477017753ea1a0896c8f54daeadee40d10 Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Fri, 12 Jun 2020 23:16:33 +0530 Subject: [PATCH 022/131] HADOOP-17046. Support downstreams' existing Hadoop-rpc implementations using non-shaded protobuf classes (#2026) --- .../dev-support/findbugsExcludeFile.xml | 4 + hadoop-common-project/hadoop-common/pom.xml | 92 +- .../ipc/protobuf/ProtobufRpcEngineProtos.java | 1163 +++++++++++++++++ .../org/apache/hadoop/ha/ZKFCRpcServer.java | 4 +- ...ServiceProtocolClientSideTranslatorPB.java | 6 +- .../ZKFCProtocolClientSideTranslatorPB.java | 4 +- .../org/apache/hadoop/ipc/ProtobufHelper.java | 17 + .../apache/hadoop/ipc/ProtobufRpcEngine.java | 22 +- .../apache/hadoop/ipc/ProtobufRpcEngine2.java | 598 +++++++++ .../hadoop/ipc/ProtobufRpcEngineCallback.java | 11 +- .../ipc/ProtobufRpcEngineCallback2.java | 29 + .../main/java/org/apache/hadoop/ipc/RPC.java | 4 +- .../org/apache/hadoop/ipc/RpcClientUtil.java | 2 +- .../org/apache/hadoop/ipc/RpcWritable.java | 45 + .../java/org/apache/hadoop/ipc/Server.java | 12 +- .../org/apache/hadoop/tracing/TraceAdmin.java | 4 +- .../src/main/proto/ProtobufRpcEngine2.proto | 67 + .../org/apache/hadoop/ha/DummyHAService.java | 4 +- .../apache/hadoop/ipc/RPCCallBenchmark.java | 8 +- .../ipc/TestMultipleProtocolServer.java | 2 +- .../ipc/TestProtoBufRPCCompatibility.java | 9 +- .../apache/hadoop/ipc/TestProtoBufRpc.java | 5 +- .../ipc/TestProtoBufRpcServerHandoff.java | 6 +- .../hadoop/ipc/TestRPCCompatibility.java | 10 +- .../hadoop/ipc/TestRPCWaitForProxy.java | 2 +- .../hadoop/ipc/TestReuseRpcConnections.java | 3 +- .../org/apache/hadoop/ipc/TestRpcBase.java | 2 +- .../org/apache/hadoop/ipc/TestSaslRPC.java | 4 +- .../security/TestDoAsEffectiveUser.java | 18 +- .../hadoop/hdfs/NameNodeProxiesClient.java | 4 +- .../ClientDatanodeProtocolTranslatorPB.java | 4 +- .../ClientNamenodeProtocolTranslatorPB.java | 6 +- .../ReconfigurationProtocolTranslatorPB.java | 4 +- .../federation/router/ConnectionPool.java | 4 +- .../federation/router/RouterAdminServer.java | 4 +- .../federation/router/RouterClient.java | 4 +- .../federation/router/RouterRpcServer.java | 4 +- .../hdfs/tools/federation/RouterAdmin.java | 4 +- .../hdfs/server/federation/MockNamenode.java | 4 +- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 3 + .../java/org/apache/hadoop/hdfs/DFSUtil.java | 24 +- .../apache/hadoop/hdfs/NameNodeProxies.java | 4 +- ...ifelineProtocolClientSideTranslatorPB.java | 4 +- ...atanodeProtocolClientSideTranslatorPB.java | 4 +- .../InterDatanodeProtocolTranslatorPB.java | 4 +- .../qjournal/client/IPCLoggerChannel.java | 6 +- .../qjournal/server/JournalNodeRpcServer.java | 4 +- .../qjournal/server/JournalNodeSyncer.java | 4 +- .../InMemoryLevelDBAliasMapServer.java | 4 +- .../hadoop/hdfs/server/datanode/DataNode.java | 4 +- .../server/namenode/NameNodeRpcServer.java | 6 +- .../apache/hadoop/hdfs/tools/DFSAdmin.java | 4 +- .../qjournal/client/TestQJMWithFaults.java | 4 +- .../client/TestQuorumJournalManager.java | 4 +- .../security/token/block/TestBlockToken.java | 4 +- .../namenode/snapshot/SnapshotTestHelper.java | 2 +- .../client/HSClientProtocolPBClientImpl.java | 4 +- .../client/MRClientProtocolPBClientImpl.java | 8 +- .../hadoop/mapreduce/v2/hs/HSProxies.java | 4 +- .../mapreduce/v2/hs/server/HSAdminServer.java | 6 +- .../client/ClientAMProtocolPBClientImpl.java | 4 +- ...ApplicationClientProtocolPBClientImpl.java | 4 +- ...pplicationHistoryProtocolPBClientImpl.java | 5 +- ...ApplicationMasterProtocolPBClientImpl.java | 5 +- .../client/ClientSCMProtocolPBClientImpl.java | 4 +- ...ntainerManagementProtocolPBClientImpl.java | 4 +- .../CsiAdaptorProtocolPBClientImpl.java | 4 +- .../impl/pb/RpcServerFactoryPBImpl.java | 4 +- ...gerAdministrationProtocolPBClientImpl.java | 4 +- .../client/SCMAdminProtocolPBClientImpl.java | 4 +- ...lectorNodemanagerProtocolPBClientImpl.java | 4 +- ...butedSchedulingAMProtocolPBClientImpl.java | 4 +- .../client/ResourceTrackerPBClientImpl.java | 5 +- .../SCMUploaderProtocolPBClientImpl.java | 4 +- .../java/org/apache/hadoop/yarn/TestRPC.java | 4 +- .../LocalizationProtocolPBClientImpl.java | 5 +- .../server/nodemanager/TestNMAuditLogger.java | 4 +- .../server/resourcemanager/AdminService.java | 4 +- ...ortunisticContainerAllocatorAMService.java | 6 +- .../resourcemanager/TestRMAuditLogger.java | 4 +- .../security/TestClientToAMTokens.java | 6 +- 81 files changed, 2233 insertions(+), 169 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/arm-java/org/apache/hadoop/ipc/protobuf/ProtobufRpcEngineProtos.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback2.java create mode 100644 hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml index cf5c3874d1063..23e39d055ffc5 100644 --- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml +++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml @@ -283,6 +283,10 @@ + + + + diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index dd058812fc774..9bb70ac76a06a 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -395,7 +395,12 @@ src-compile-protoc - false + + false + + ProtobufRpcEngine.proto + + src-test-compile-protoc @@ -411,6 +416,9 @@ replace-generated-sources false + + **/ProtobufRpcEngineProtos.java + @@ -423,6 +431,14 @@ replace-sources false + + + **/ProtobufHelper.java + **/RpcWritable.java + **/ProtobufRpcEngineCallback.java + **/ProtobufRpcEngine.java + **/ProtobufRpcEngineProtos.java + @@ -1015,7 +1031,79 @@ - + + + aarch64 + + false + + aarch64 + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + add-source-legacy-protobuf + generate-sources + + add-source + + + + ${basedir}/src/main/arm-java + + + + + + + + + + + x86_64 + + false + + !aarch64 + + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + src-compile-protoc-legacy + generate-sources + + compile + + + false + + + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + + false + ${basedir}/src/main/proto + ${project.build.directory}/generated-sources/java + false + + ProtobufRpcEngine.proto + + + + + + + + diff --git a/hadoop-common-project/hadoop-common/src/main/arm-java/org/apache/hadoop/ipc/protobuf/ProtobufRpcEngineProtos.java b/hadoop-common-project/hadoop-common/src/main/arm-java/org/apache/hadoop/ipc/protobuf/ProtobufRpcEngineProtos.java new file mode 100644 index 0000000000000..28e28bf633784 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/arm-java/org/apache/hadoop/ipc/protobuf/ProtobufRpcEngineProtos.java @@ -0,0 +1,1163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// This is class is added to source because for arm protoc 2.5.0 executable +// is not available to generate the same code. +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: ProtobufRpcEngine.proto +package org.apache.hadoop.ipc.protobuf; + +public final class ProtobufRpcEngineProtos { + private ProtobufRpcEngineProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface RequestHeaderProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string methodName = 1; + /** + * required string methodName = 1; + * + *
+     ** Name of the RPC method
+     * 
+ */ + boolean hasMethodName(); + /** + * required string methodName = 1; + * + *
+     ** Name of the RPC method
+     * 
+ */ + java.lang.String getMethodName(); + /** + * required string methodName = 1; + * + *
+     ** Name of the RPC method
+     * 
+ */ + com.google.protobuf.ByteString + getMethodNameBytes(); + + // required string declaringClassProtocolName = 2; + /** + * required string declaringClassProtocolName = 2; + * + *
+     **
+     * RPCs for a particular interface (ie protocol) are done using a
+     * IPC connection that is setup using rpcProxy.
+     * The rpcProxy's has a declared protocol name that is
+     * sent form client to server at connection time.
+     *
+     * Each Rpc call also sends a protocol name
+     * (called declaringClassprotocolName). This name is usually the same
+     * as the connection protocol name except in some cases.
+     * For example metaProtocols such ProtocolInfoProto which get metainfo
+     * about the protocol reuse the connection but need to indicate that
+     * the actual protocol is different (i.e. the protocol is
+     * ProtocolInfoProto) since they reuse the connection; in this case
+     * the declaringClassProtocolName field is set to the ProtocolInfoProto
+     * 
+ */ + boolean hasDeclaringClassProtocolName(); + /** + * required string declaringClassProtocolName = 2; + * + *
+     **
+     * RPCs for a particular interface (ie protocol) are done using a
+     * IPC connection that is setup using rpcProxy.
+     * The rpcProxy's has a declared protocol name that is
+     * sent form client to server at connection time.
+     *
+     * Each Rpc call also sends a protocol name
+     * (called declaringClassprotocolName). This name is usually the same
+     * as the connection protocol name except in some cases.
+     * For example metaProtocols such ProtocolInfoProto which get metainfo
+     * about the protocol reuse the connection but need to indicate that
+     * the actual protocol is different (i.e. the protocol is
+     * ProtocolInfoProto) since they reuse the connection; in this case
+     * the declaringClassProtocolName field is set to the ProtocolInfoProto
+     * 
+ */ + java.lang.String getDeclaringClassProtocolName(); + /** + * required string declaringClassProtocolName = 2; + * + *
+     **
+     * RPCs for a particular interface (ie protocol) are done using a
+     * IPC connection that is setup using rpcProxy.
+     * The rpcProxy's has a declared protocol name that is
+     * sent form client to server at connection time.
+     *
+     * Each Rpc call also sends a protocol name
+     * (called declaringClassprotocolName). This name is usually the same
+     * as the connection protocol name except in some cases.
+     * For example metaProtocols such ProtocolInfoProto which get metainfo
+     * about the protocol reuse the connection but need to indicate that
+     * the actual protocol is different (i.e. the protocol is
+     * ProtocolInfoProto) since they reuse the connection; in this case
+     * the declaringClassProtocolName field is set to the ProtocolInfoProto
+     * 
+ */ + com.google.protobuf.ByteString + getDeclaringClassProtocolNameBytes(); + + // required uint64 clientProtocolVersion = 3; + /** + * required uint64 clientProtocolVersion = 3; + * + *
+     ** protocol version of class declaring the called method
+     * 
+ */ + boolean hasClientProtocolVersion(); + /** + * required uint64 clientProtocolVersion = 3; + * + *
+     ** protocol version of class declaring the called method
+     * 
+ */ + long getClientProtocolVersion(); + } + /** + * Protobuf type {@code hadoop.common.RequestHeaderProto} + * + *
+   **
+   * This message is the header for the Protobuf Rpc Engine
+   * when sending a RPC request from  RPC client to the RPC server.
+   * The actual request (serialized as protobuf) follows this request.
+   *
+   * No special header is needed for the Rpc Response for Protobuf Rpc Engine.
+   * The normal RPC response header (see RpcHeader.proto) are sufficient.
+   * 
+ */ + public static final class RequestHeaderProto extends + com.google.protobuf.GeneratedMessage + implements RequestHeaderProtoOrBuilder { + // Use RequestHeaderProto.newBuilder() to construct. + private RequestHeaderProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RequestHeaderProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RequestHeaderProto defaultInstance; + public static RequestHeaderProto getDefaultInstance() { + return defaultInstance; + } + + public RequestHeaderProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RequestHeaderProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + methodName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + declaringClassProtocolName_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + clientProtocolVersion_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.class, org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RequestHeaderProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RequestHeaderProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string methodName = 1; + public static final int METHODNAME_FIELD_NUMBER = 1; + private java.lang.Object methodName_; + /** + * required string methodName = 1; + * + *
+     ** Name of the RPC method
+     * 
+ */ + public boolean hasMethodName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string methodName = 1; + * + *
+     ** Name of the RPC method
+     * 
+ */ + public java.lang.String getMethodName() { + java.lang.Object ref = methodName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + methodName_ = s; + } + return s; + } + } + /** + * required string methodName = 1; + * + *
+     ** Name of the RPC method
+     * 
+ */ + public com.google.protobuf.ByteString + getMethodNameBytes() { + java.lang.Object ref = methodName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + methodName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string declaringClassProtocolName = 2; + public static final int DECLARINGCLASSPROTOCOLNAME_FIELD_NUMBER = 2; + private java.lang.Object declaringClassProtocolName_; + /** + * required string declaringClassProtocolName = 2; + * + *
+     **
+     * RPCs for a particular interface (ie protocol) are done using a
+     * IPC connection that is setup using rpcProxy.
+     * The rpcProxy's has a declared protocol name that is
+     * sent form client to server at connection time.
+     *
+     * Each Rpc call also sends a protocol name
+     * (called declaringClassprotocolName). This name is usually the same
+     * as the connection protocol name except in some cases.
+     * For example metaProtocols such ProtocolInfoProto which get metainfo
+     * about the protocol reuse the connection but need to indicate that
+     * the actual protocol is different (i.e. the protocol is
+     * ProtocolInfoProto) since they reuse the connection; in this case
+     * the declaringClassProtocolName field is set to the ProtocolInfoProto
+     * 
+ */ + public boolean hasDeclaringClassProtocolName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string declaringClassProtocolName = 2; + * + *
+     **
+     * RPCs for a particular interface (ie protocol) are done using a
+     * IPC connection that is setup using rpcProxy.
+     * The rpcProxy's has a declared protocol name that is
+     * sent form client to server at connection time.
+     *
+     * Each Rpc call also sends a protocol name
+     * (called declaringClassprotocolName). This name is usually the same
+     * as the connection protocol name except in some cases.
+     * For example metaProtocols such ProtocolInfoProto which get metainfo
+     * about the protocol reuse the connection but need to indicate that
+     * the actual protocol is different (i.e. the protocol is
+     * ProtocolInfoProto) since they reuse the connection; in this case
+     * the declaringClassProtocolName field is set to the ProtocolInfoProto
+     * 
+ */ + public java.lang.String getDeclaringClassProtocolName() { + java.lang.Object ref = declaringClassProtocolName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + declaringClassProtocolName_ = s; + } + return s; + } + } + /** + * required string declaringClassProtocolName = 2; + * + *
+     **
+     * RPCs for a particular interface (ie protocol) are done using a
+     * IPC connection that is setup using rpcProxy.
+     * The rpcProxy's has a declared protocol name that is
+     * sent form client to server at connection time.
+     *
+     * Each Rpc call also sends a protocol name
+     * (called declaringClassprotocolName). This name is usually the same
+     * as the connection protocol name except in some cases.
+     * For example metaProtocols such ProtocolInfoProto which get metainfo
+     * about the protocol reuse the connection but need to indicate that
+     * the actual protocol is different (i.e. the protocol is
+     * ProtocolInfoProto) since they reuse the connection; in this case
+     * the declaringClassProtocolName field is set to the ProtocolInfoProto
+     * 
+ */ + public com.google.protobuf.ByteString + getDeclaringClassProtocolNameBytes() { + java.lang.Object ref = declaringClassProtocolName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + declaringClassProtocolName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint64 clientProtocolVersion = 3; + public static final int CLIENTPROTOCOLVERSION_FIELD_NUMBER = 3; + private long clientProtocolVersion_; + /** + * required uint64 clientProtocolVersion = 3; + * + *
+     ** protocol version of class declaring the called method
+     * 
+ */ + public boolean hasClientProtocolVersion() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 clientProtocolVersion = 3; + * + *
+     ** protocol version of class declaring the called method
+     * 
+ */ + public long getClientProtocolVersion() { + return clientProtocolVersion_; + } + + private void initFields() { + methodName_ = ""; + declaringClassProtocolName_ = ""; + clientProtocolVersion_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasMethodName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasDeclaringClassProtocolName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasClientProtocolVersion()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getMethodNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getDeclaringClassProtocolNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, clientProtocolVersion_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getMethodNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getDeclaringClassProtocolNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, clientProtocolVersion_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto other = (org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto) obj; + + boolean result = true; + result = result && (hasMethodName() == other.hasMethodName()); + if (hasMethodName()) { + result = result && getMethodName() + .equals(other.getMethodName()); + } + result = result && (hasDeclaringClassProtocolName() == other.hasDeclaringClassProtocolName()); + if (hasDeclaringClassProtocolName()) { + result = result && getDeclaringClassProtocolName() + .equals(other.getDeclaringClassProtocolName()); + } + result = result && (hasClientProtocolVersion() == other.hasClientProtocolVersion()); + if (hasClientProtocolVersion()) { + result = result && (getClientProtocolVersion() + == other.getClientProtocolVersion()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasMethodName()) { + hash = (37 * hash) + METHODNAME_FIELD_NUMBER; + hash = (53 * hash) + getMethodName().hashCode(); + } + if (hasDeclaringClassProtocolName()) { + hash = (37 * hash) + DECLARINGCLASSPROTOCOLNAME_FIELD_NUMBER; + hash = (53 * hash) + getDeclaringClassProtocolName().hashCode(); + } + if (hasClientProtocolVersion()) { + hash = (37 * hash) + CLIENTPROTOCOLVERSION_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getClientProtocolVersion()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.RequestHeaderProto} + * + *
+     **
+     * This message is the header for the Protobuf Rpc Engine
+     * when sending a RPC request from  RPC client to the RPC server.
+     * The actual request (serialized as protobuf) follows this request.
+     *
+     * No special header is needed for the Rpc Response for Protobuf Rpc Engine.
+     * The normal RPC response header (see RpcHeader.proto) are sufficient.
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.class, org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + methodName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + declaringClassProtocolName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + clientProtocolVersion_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto build() { + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto buildPartial() { + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto result = new org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.methodName_ = methodName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.declaringClassProtocolName_ = declaringClassProtocolName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.clientProtocolVersion_ = clientProtocolVersion_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto other) { + if (other == org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.getDefaultInstance()) return this; + if (other.hasMethodName()) { + bitField0_ |= 0x00000001; + methodName_ = other.methodName_; + onChanged(); + } + if (other.hasDeclaringClassProtocolName()) { + bitField0_ |= 0x00000002; + declaringClassProtocolName_ = other.declaringClassProtocolName_; + onChanged(); + } + if (other.hasClientProtocolVersion()) { + setClientProtocolVersion(other.getClientProtocolVersion()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasMethodName()) { + + return false; + } + if (!hasDeclaringClassProtocolName()) { + + return false; + } + if (!hasClientProtocolVersion()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string methodName = 1; + private java.lang.Object methodName_ = ""; + /** + * required string methodName = 1; + * + *
+       ** Name of the RPC method
+       * 
+ */ + public boolean hasMethodName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string methodName = 1; + * + *
+       ** Name of the RPC method
+       * 
+ */ + public java.lang.String getMethodName() { + java.lang.Object ref = methodName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + methodName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string methodName = 1; + * + *
+       ** Name of the RPC method
+       * 
+ */ + public com.google.protobuf.ByteString + getMethodNameBytes() { + java.lang.Object ref = methodName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + methodName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string methodName = 1; + * + *
+       ** Name of the RPC method
+       * 
+ */ + public Builder setMethodName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + methodName_ = value; + onChanged(); + return this; + } + /** + * required string methodName = 1; + * + *
+       ** Name of the RPC method
+       * 
+ */ + public Builder clearMethodName() { + bitField0_ = (bitField0_ & ~0x00000001); + methodName_ = getDefaultInstance().getMethodName(); + onChanged(); + return this; + } + /** + * required string methodName = 1; + * + *
+       ** Name of the RPC method
+       * 
+ */ + public Builder setMethodNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + methodName_ = value; + onChanged(); + return this; + } + + // required string declaringClassProtocolName = 2; + private java.lang.Object declaringClassProtocolName_ = ""; + /** + * required string declaringClassProtocolName = 2; + * + *
+       **
+       * RPCs for a particular interface (ie protocol) are done using a
+       * IPC connection that is setup using rpcProxy.
+       * The rpcProxy's has a declared protocol name that is
+       * sent form client to server at connection time.
+       *
+       * Each Rpc call also sends a protocol name
+       * (called declaringClassprotocolName). This name is usually the same
+       * as the connection protocol name except in some cases.
+       * For example metaProtocols such ProtocolInfoProto which get metainfo
+       * about the protocol reuse the connection but need to indicate that
+       * the actual protocol is different (i.e. the protocol is
+       * ProtocolInfoProto) since they reuse the connection; in this case
+       * the declaringClassProtocolName field is set to the ProtocolInfoProto
+       * 
+ */ + public boolean hasDeclaringClassProtocolName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string declaringClassProtocolName = 2; + * + *
+       **
+       * RPCs for a particular interface (ie protocol) are done using a
+       * IPC connection that is setup using rpcProxy.
+       * The rpcProxy's has a declared protocol name that is
+       * sent form client to server at connection time.
+       *
+       * Each Rpc call also sends a protocol name
+       * (called declaringClassprotocolName). This name is usually the same
+       * as the connection protocol name except in some cases.
+       * For example metaProtocols such ProtocolInfoProto which get metainfo
+       * about the protocol reuse the connection but need to indicate that
+       * the actual protocol is different (i.e. the protocol is
+       * ProtocolInfoProto) since they reuse the connection; in this case
+       * the declaringClassProtocolName field is set to the ProtocolInfoProto
+       * 
+ */ + public java.lang.String getDeclaringClassProtocolName() { + java.lang.Object ref = declaringClassProtocolName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + declaringClassProtocolName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string declaringClassProtocolName = 2; + * + *
+       **
+       * RPCs for a particular interface (ie protocol) are done using a
+       * IPC connection that is setup using rpcProxy.
+       * The rpcProxy's has a declared protocol name that is
+       * sent form client to server at connection time.
+       *
+       * Each Rpc call also sends a protocol name
+       * (called declaringClassprotocolName). This name is usually the same
+       * as the connection protocol name except in some cases.
+       * For example metaProtocols such ProtocolInfoProto which get metainfo
+       * about the protocol reuse the connection but need to indicate that
+       * the actual protocol is different (i.e. the protocol is
+       * ProtocolInfoProto) since they reuse the connection; in this case
+       * the declaringClassProtocolName field is set to the ProtocolInfoProto
+       * 
+ */ + public com.google.protobuf.ByteString + getDeclaringClassProtocolNameBytes() { + java.lang.Object ref = declaringClassProtocolName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + declaringClassProtocolName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string declaringClassProtocolName = 2; + * + *
+       **
+       * RPCs for a particular interface (ie protocol) are done using a
+       * IPC connection that is setup using rpcProxy.
+       * The rpcProxy's has a declared protocol name that is
+       * sent form client to server at connection time.
+       *
+       * Each Rpc call also sends a protocol name
+       * (called declaringClassprotocolName). This name is usually the same
+       * as the connection protocol name except in some cases.
+       * For example metaProtocols such ProtocolInfoProto which get metainfo
+       * about the protocol reuse the connection but need to indicate that
+       * the actual protocol is different (i.e. the protocol is
+       * ProtocolInfoProto) since they reuse the connection; in this case
+       * the declaringClassProtocolName field is set to the ProtocolInfoProto
+       * 
+ */ + public Builder setDeclaringClassProtocolName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + declaringClassProtocolName_ = value; + onChanged(); + return this; + } + /** + * required string declaringClassProtocolName = 2; + * + *
+       **
+       * RPCs for a particular interface (ie protocol) are done using a
+       * IPC connection that is setup using rpcProxy.
+       * The rpcProxy's has a declared protocol name that is
+       * sent form client to server at connection time.
+       *
+       * Each Rpc call also sends a protocol name
+       * (called declaringClassprotocolName). This name is usually the same
+       * as the connection protocol name except in some cases.
+       * For example metaProtocols such ProtocolInfoProto which get metainfo
+       * about the protocol reuse the connection but need to indicate that
+       * the actual protocol is different (i.e. the protocol is
+       * ProtocolInfoProto) since they reuse the connection; in this case
+       * the declaringClassProtocolName field is set to the ProtocolInfoProto
+       * 
+ */ + public Builder clearDeclaringClassProtocolName() { + bitField0_ = (bitField0_ & ~0x00000002); + declaringClassProtocolName_ = getDefaultInstance().getDeclaringClassProtocolName(); + onChanged(); + return this; + } + /** + * required string declaringClassProtocolName = 2; + * + *
+       **
+       * RPCs for a particular interface (ie protocol) are done using a
+       * IPC connection that is setup using rpcProxy.
+       * The rpcProxy's has a declared protocol name that is
+       * sent form client to server at connection time.
+       *
+       * Each Rpc call also sends a protocol name
+       * (called declaringClassprotocolName). This name is usually the same
+       * as the connection protocol name except in some cases.
+       * For example metaProtocols such ProtocolInfoProto which get metainfo
+       * about the protocol reuse the connection but need to indicate that
+       * the actual protocol is different (i.e. the protocol is
+       * ProtocolInfoProto) since they reuse the connection; in this case
+       * the declaringClassProtocolName field is set to the ProtocolInfoProto
+       * 
+ */ + public Builder setDeclaringClassProtocolNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + declaringClassProtocolName_ = value; + onChanged(); + return this; + } + + // required uint64 clientProtocolVersion = 3; + private long clientProtocolVersion_ ; + /** + * required uint64 clientProtocolVersion = 3; + * + *
+       ** protocol version of class declaring the called method
+       * 
+ */ + public boolean hasClientProtocolVersion() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 clientProtocolVersion = 3; + * + *
+       ** protocol version of class declaring the called method
+       * 
+ */ + public long getClientProtocolVersion() { + return clientProtocolVersion_; + } + /** + * required uint64 clientProtocolVersion = 3; + * + *
+       ** protocol version of class declaring the called method
+       * 
+ */ + public Builder setClientProtocolVersion(long value) { + bitField0_ |= 0x00000004; + clientProtocolVersion_ = value; + onChanged(); + return this; + } + /** + * required uint64 clientProtocolVersion = 3; + * + *
+       ** protocol version of class declaring the called method
+       * 
+ */ + public Builder clearClientProtocolVersion() { + bitField0_ = (bitField0_ & ~0x00000004); + clientProtocolVersion_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.RequestHeaderProto) + } + + static { + defaultInstance = new RequestHeaderProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.RequestHeaderProto) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_RequestHeaderProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\027ProtobufRpcEngine.proto\022\rhadoop.common" + + "\"k\n\022RequestHeaderProto\022\022\n\nmethodName\030\001 \002" + + "(\t\022\"\n\032declaringClassProtocolName\030\002 \002(\t\022\035" + + "\n\025clientProtocolVersion\030\003 \002(\004B<\n\036org.apa" + + "che.hadoop.ipc.protobufB\027ProtobufRpcEngi" + + "neProtos\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hadoop_common_RequestHeaderProto_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_RequestHeaderProto_descriptor, + new java.lang.String[] { "MethodName", "DeclaringClassProtocolName", "ClientProtocolVersion", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java index 61ea53c420ab1..09161c745dc06 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.ZKFCProtocolService; import org.apache.hadoop.ha.protocolPB.ZKFCProtocolPB; import org.apache.hadoop.ha.protocolPB.ZKFCProtocolServerSideTranslatorPB; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.security.AccessControlException; @@ -51,7 +51,7 @@ public class ZKFCRpcServer implements ZKFCProtocol { this.zkfc = zkfc; RPC.setProtocolEngine(conf, ZKFCProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ZKFCProtocolServerSideTranslatorPB translator = new ZKFCProtocolServerSideTranslatorPB(this); BlockingService service = ZKFCProtocolService diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java index e53820cd13107..2cbfd0d0ec030 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java @@ -38,7 +38,7 @@ import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto; import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.UserGroupInformation; @@ -67,7 +67,7 @@ public class HAServiceProtocolClientSideTranslatorPB implements public HAServiceProtocolClientSideTranslatorPB(InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); rpcProxy = RPC.getProxy(HAServiceProtocolPB.class, RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, conf); } @@ -76,7 +76,7 @@ public HAServiceProtocolClientSideTranslatorPB( InetSocketAddress addr, Configuration conf, SocketFactory socketFactory, int timeout) throws IOException { RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); rpcProxy = RPC.getProxy(HAServiceProtocolPB.class, RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java index 7001d93995f0f..3777207c7e45c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveRequestProto; import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverRequestProto; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.AccessControlException; @@ -48,7 +48,7 @@ public ZKFCProtocolClientSideTranslatorPB( InetSocketAddress addr, Configuration conf, SocketFactory socketFactory, int timeout) throws IOException { RPC.setProtocolEngine(conf, ZKFCProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); rpcProxy = RPC.getProxy(ZKFCProtocolPB.class, RPC.getProtocolVersion(ZKFCProtocolPB.class), addr, UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java index bb86cfc35bf4e..1e110b9011313 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java @@ -53,6 +53,23 @@ public static IOException getRemoteException(ServiceException se) { return e instanceof IOException ? (IOException) e : new IOException(se); } + /** + * Kept for backward compatible. + * Return the IOException thrown by the remote server wrapped in + * ServiceException as cause. + * @param se ServiceException that wraps IO exception thrown by the server + * @return Exception wrapped in ServiceException or + * a new IOException that wraps the unexpected ServiceException. + */ + @Deprecated + public static IOException getRemoteException( + com.google.protobuf.ServiceException se) { + Throwable e = se.getCause(); + if (e == null) { + return new IOException(se); + } + return e instanceof IOException ? (IOException) e : new IOException(se); + } /** * Map used to cache fixed strings to ByteStrings. Since there is no diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java index 14b356f847acf..220ad1ded9fec 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java @@ -19,8 +19,11 @@ package org.apache.hadoop.ipc; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.protobuf.*; -import org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.BlockingService; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.ServiceException; +import com.google.protobuf.TextFormat; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -29,6 +32,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.Client.ConnectionId; import org.apache.hadoop.ipc.RPC.RpcInvoker; +import org.apache.hadoop.ipc.RPC.RpcKind; import org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; @@ -52,7 +56,10 @@ /** * RPC Engine for for protobuf based RPCs. + * This engine uses Protobuf 2.5.0. Recommended to upgrade to Protobuf 3.x + * from hadoop-thirdparty and use ProtobufRpcEngine2. */ +@Deprecated @InterfaceStability.Evolving public class ProtobufRpcEngine implements RpcEngine { public static final Logger LOG = @@ -355,6 +362,7 @@ public static class Server extends RPC.Server { new ThreadLocal<>(); static final ThreadLocal currentCallInfo = new ThreadLocal<>(); + private static final RpcInvoker RPC_INVOKER = new ProtoBufRpcInvoker(); static class CallInfo { private final RPC.Server server; @@ -433,7 +441,15 @@ public Server(Class protocolClass, Object protocolImpl, registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass, protocolImpl); } - + + @Override + protected RpcInvoker getServerRpcInvoker(RpcKind rpcKind) { + if (rpcKind == RpcKind.RPC_PROTOCOL_BUFFER) { + return RPC_INVOKER; + } + return super.getServerRpcInvoker(rpcKind); + } + /** * Protobuf invoker for {@link RpcInvoker} */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java new file mode 100644 index 0000000000000..30315343962c8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java @@ -0,0 +1,598 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ipc; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.protobuf.*; +import org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.ipc.Client.ConnectionId; +import org.apache.hadoop.ipc.RPC.RpcInvoker; +import org.apache.hadoop.ipc.protobuf.ProtobufRpcEngine2Protos.RequestHeaderProto; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.SecretManager; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.AsyncGet; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.SocketFactory; +import java.io.IOException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.net.InetSocketAddress; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * RPC Engine for for protobuf based RPCs. + */ +@InterfaceStability.Evolving +public class ProtobufRpcEngine2 implements RpcEngine { + public static final Logger LOG = + LoggerFactory.getLogger(ProtobufRpcEngine2.class); + private static final ThreadLocal> + ASYNC_RETURN_MESSAGE = new ThreadLocal<>(); + + static { // Register the rpcRequest deserializer for ProtobufRpcEngine + org.apache.hadoop.ipc.Server.registerProtocolEngine( + RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class, + new Server.ProtoBufRpcInvoker()); + } + + private static final ClientCache CLIENTS = new ClientCache(); + + @Unstable + public static AsyncGet getAsyncReturnMessage() { + return ASYNC_RETURN_MESSAGE.get(); + } + + public ProtocolProxy getProxy(Class protocol, long clientVersion, + InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, + SocketFactory factory, int rpcTimeout) throws IOException { + return getProxy(protocol, clientVersion, addr, ticket, conf, factory, + rpcTimeout, null); + } + + @Override + public ProtocolProxy getProxy( + Class protocol, long clientVersion, + InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, + SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy) + throws IOException { + return getProxy(protocol, clientVersion, addr, ticket, conf, factory, + rpcTimeout, connectionRetryPolicy, null, null); + } + + @Override + @SuppressWarnings("unchecked") + public ProtocolProxy getProxy(Class protocol, long clientVersion, + InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, + SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy, + AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext) + throws IOException { + + final Invoker invoker = new Invoker(protocol, addr, ticket, conf, factory, + rpcTimeout, connectionRetryPolicy, fallbackToSimpleAuth, + alignmentContext); + return new ProtocolProxy(protocol, (T) Proxy.newProxyInstance( + protocol.getClassLoader(), new Class[]{protocol}, invoker), false); + } + + @Override + public ProtocolProxy getProtocolMetaInfoProxy( + ConnectionId connId, Configuration conf, SocketFactory factory) + throws IOException { + Class protocol = ProtocolMetaInfoPB.class; + return new ProtocolProxy(protocol, + (ProtocolMetaInfoPB) Proxy.newProxyInstance(protocol.getClassLoader(), + new Class[]{protocol}, new Invoker(protocol, connId, conf, + factory)), false); + } + + private static final class Invoker implements RpcInvocationHandler { + private final Map returnTypes = + new ConcurrentHashMap(); + private boolean isClosed = false; + private final Client.ConnectionId remoteId; + private final Client client; + private final long clientProtocolVersion; + private final String protocolName; + private AtomicBoolean fallbackToSimpleAuth; + private AlignmentContext alignmentContext; + + private Invoker(Class protocol, InetSocketAddress addr, + UserGroupInformation ticket, Configuration conf, SocketFactory factory, + int rpcTimeout, RetryPolicy connectionRetryPolicy, + AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext) + throws IOException { + this(protocol, Client.ConnectionId.getConnectionId( + addr, protocol, ticket, rpcTimeout, connectionRetryPolicy, conf), + conf, factory); + this.fallbackToSimpleAuth = fallbackToSimpleAuth; + this.alignmentContext = alignmentContext; + } + + /** + * This constructor takes a connectionId, instead of creating a new one. + */ + private Invoker(Class protocol, Client.ConnectionId connId, + Configuration conf, SocketFactory factory) { + this.remoteId = connId; + this.client = CLIENTS.getClient(conf, factory, RpcWritable.Buffer.class); + this.protocolName = RPC.getProtocolName(protocol); + this.clientProtocolVersion = RPC + .getProtocolVersion(protocol); + } + + private RequestHeaderProto constructRpcRequestHeader(Method method) { + RequestHeaderProto.Builder builder = RequestHeaderProto + .newBuilder(); + builder.setMethodName(method.getName()); + + + // For protobuf, {@code protocol} used when creating client side proxy is + // the interface extending BlockingInterface, which has the annotations + // such as ProtocolName etc. + // + // Using Method.getDeclaringClass(), as in WritableEngine to get at + // the protocol interface will return BlockingInterface, from where + // the annotation ProtocolName and Version cannot be + // obtained. + // + // Hence we simply use the protocol class used to create the proxy. + // For PB this may limit the use of mixins on client side. + builder.setDeclaringClassProtocolName(protocolName); + builder.setClientProtocolVersion(clientProtocolVersion); + return builder.build(); + } + + /** + * This is the client side invoker of RPC method. It only throws + * ServiceException, since the invocation proxy expects only + * ServiceException to be thrown by the method in case protobuf service. + * + * ServiceException has the following causes: + *
    + *
  1. Exceptions encountered on the client side in this method are + * set as cause in ServiceException as is.
  2. + *
  3. Exceptions from the server are wrapped in RemoteException and are + * set as cause in ServiceException
  4. + *
+ * + * Note that the client calling protobuf RPC methods, must handle + * ServiceException by getting the cause from the ServiceException. If the + * cause is RemoteException, then unwrap it to get the exception thrown by + * the server. + */ + @Override + public Message invoke(Object proxy, final Method method, Object[] args) + throws ServiceException { + long startTime = 0; + if (LOG.isDebugEnabled()) { + startTime = Time.now(); + } + + if (args.length != 2) { // RpcController + Message + throw new ServiceException( + "Too many or few parameters for request. Method: [" + + method.getName() + "]" + ", Expected: 2, Actual: " + + args.length); + } + if (args[1] == null) { + throw new ServiceException("null param while calling Method: [" + + method.getName() + "]"); + } + + // if Tracing is on then start a new span for this rpc. + // guard it in the if statement to make sure there isn't + // any extra string manipulation. + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = null; + if (tracer != null) { + traceScope = tracer.newScope(RpcClientUtil.methodToTraceString(method)); + } + + RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method); + + if (LOG.isTraceEnabled()) { + LOG.trace(Thread.currentThread().getId() + ": Call -> " + + remoteId + ": " + method.getName() + + " {" + TextFormat.shortDebugString((Message) args[1]) + "}"); + } + + + final Message theRequest = (Message) args[1]; + final RpcWritable.Buffer val; + try { + val = (RpcWritable.Buffer) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER, + new RpcProtobufRequest(rpcRequestHeader, theRequest), remoteId, + fallbackToSimpleAuth, alignmentContext); + + } catch (Throwable e) { + if (LOG.isTraceEnabled()) { + LOG.trace(Thread.currentThread().getId() + ": Exception <- " + + remoteId + ": " + method.getName() + + " {" + e + "}"); + } + if (traceScope != null) { + traceScope.addTimelineAnnotation("Call got exception: " + + e.toString()); + } + throw new ServiceException(e); + } finally { + if (traceScope != null) { + traceScope.close(); + } + } + + if (LOG.isDebugEnabled()) { + long callTime = Time.now() - startTime; + LOG.debug("Call: " + method.getName() + " took " + callTime + "ms"); + } + + if (Client.isAsynchronousMode()) { + final AsyncGet arr + = Client.getAsyncRpcResponse(); + final AsyncGet asyncGet = + new AsyncGet() { + @Override + public Message get(long timeout, TimeUnit unit) throws Exception { + return getReturnMessage(method, arr.get(timeout, unit)); + } + + @Override + public boolean isDone() { + return arr.isDone(); + } + }; + ASYNC_RETURN_MESSAGE.set(asyncGet); + return null; + } else { + return getReturnMessage(method, val); + } + } + + private Message getReturnMessage(final Method method, + final RpcWritable.Buffer buf) throws ServiceException { + Message prototype = null; + try { + prototype = getReturnProtoType(method); + } catch (Exception e) { + throw new ServiceException(e); + } + Message returnMessage; + try { + returnMessage = buf.getValue(prototype.getDefaultInstanceForType()); + + if (LOG.isTraceEnabled()) { + LOG.trace(Thread.currentThread().getId() + ": Response <- " + + remoteId + ": " + method.getName() + + " {" + TextFormat.shortDebugString(returnMessage) + "}"); + } + + } catch (Throwable e) { + throw new ServiceException(e); + } + return returnMessage; + } + + @Override + public void close() throws IOException { + if (!isClosed) { + isClosed = true; + CLIENTS.stopClient(client); + } + } + + private Message getReturnProtoType(Method method) throws Exception { + if (returnTypes.containsKey(method.getName())) { + return returnTypes.get(method.getName()); + } + + Class returnType = method.getReturnType(); + Method newInstMethod = returnType.getMethod("getDefaultInstance"); + newInstMethod.setAccessible(true); + Message prototype = (Message) newInstMethod.invoke(null, (Object[]) null); + returnTypes.put(method.getName(), prototype); + return prototype; + } + + @Override //RpcInvocationHandler + public ConnectionId getConnectionId() { + return remoteId; + } + } + + @VisibleForTesting + @InterfaceAudience.Private + @InterfaceStability.Unstable + static Client getClient(Configuration conf) { + return CLIENTS.getClient(conf, SocketFactory.getDefault(), + RpcWritable.Buffer.class); + } + + + + @Override + public RPC.Server getServer(Class protocol, Object protocolImpl, + String bindAddress, int port, int numHandlers, int numReaders, + int queueSizePerHandler, boolean verbose, Configuration conf, + SecretManager secretManager, + String portRangeConfig, AlignmentContext alignmentContext) + throws IOException { + return new Server(protocol, protocolImpl, conf, bindAddress, port, + numHandlers, numReaders, queueSizePerHandler, verbose, secretManager, + portRangeConfig, alignmentContext); + } + + public static class Server extends RPC.Server { + + static final ThreadLocal CURRENT_CALLBACK = + new ThreadLocal<>(); + + static final ThreadLocal CURRENT_CALL_INFO = new ThreadLocal<>(); + + static class CallInfo { + private final RPC.Server server; + private final String methodName; + + CallInfo(RPC.Server server, String methodName) { + this.server = server; + this.methodName = methodName; + } + } + + static class ProtobufRpcEngineCallbackImpl + implements ProtobufRpcEngineCallback2 { + + private final RPC.Server server; + private final Call call; + private final String methodName; + private final long setupTime; + + ProtobufRpcEngineCallbackImpl() { + this.server = CURRENT_CALL_INFO.get().server; + this.call = Server.getCurCall().get(); + this.methodName = CURRENT_CALL_INFO.get().methodName; + this.setupTime = Time.now(); + } + + @Override + public void setResponse(Message message) { + long processingTime = Time.now() - setupTime; + call.setDeferredResponse(RpcWritable.wrap(message)); + server.updateDeferredMetrics(methodName, processingTime); + } + + @Override + public void error(Throwable t) { + long processingTime = Time.now() - setupTime; + String detailedMetricsName = t.getClass().getSimpleName(); + server.updateDeferredMetrics(detailedMetricsName, processingTime); + call.setDeferredError(t); + } + } + + @InterfaceStability.Unstable + public static ProtobufRpcEngineCallback2 registerForDeferredResponse() { + ProtobufRpcEngineCallback2 callback = new ProtobufRpcEngineCallbackImpl(); + CURRENT_CALLBACK.set(callback); + return callback; + } + + /** + * Construct an RPC server. + * + * @param protocolClass the class of protocol + * @param protocolImpl the protocolImpl whose methods will be called + * @param conf the configuration to use + * @param bindAddress the address to bind on to listen for connection + * @param port the port to listen for connections on + * @param numHandlers the number of method handler threads to run + * @param verbose whether each call should be logged + * @param portRangeConfig A config parameter that can be used to restrict + * the range of ports used when port is 0 (an ephemeral port) + * @param alignmentContext provides server state info on client responses + */ + public Server(Class protocolClass, Object protocolImpl, + Configuration conf, String bindAddress, int port, int numHandlers, + int numReaders, int queueSizePerHandler, boolean verbose, + SecretManager secretManager, + String portRangeConfig, AlignmentContext alignmentContext) + throws IOException { + super(bindAddress, port, null, numHandlers, + numReaders, queueSizePerHandler, conf, + serverNameFromClass(protocolImpl.getClass()), secretManager, + portRangeConfig); + setAlignmentContext(alignmentContext); + this.verbose = verbose; + registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass, + protocolImpl); + } + + /** + * Protobuf invoker for {@link RpcInvoker}. + */ + static class ProtoBufRpcInvoker implements RpcInvoker { + private static ProtoClassProtoImpl getProtocolImpl(RPC.Server server, + String protoName, long clientVersion) throws RpcServerException { + ProtoNameVer pv = new ProtoNameVer(protoName, clientVersion); + ProtoClassProtoImpl impl = + server.getProtocolImplMap(RPC.RpcKind.RPC_PROTOCOL_BUFFER).get(pv); + if (impl == null) { // no match for Protocol AND Version + VerProtocolImpl highest = server.getHighestSupportedProtocol( + RPC.RpcKind.RPC_PROTOCOL_BUFFER, protoName); + if (highest == null) { + throw new RpcNoSuchProtocolException( + "Unknown protocol: " + protoName); + } + // protocol supported but not the version that client wants + throw new RPC.VersionMismatch(protoName, clientVersion, + highest.version); + } + return impl; + } + + @Override + /** + * This is a server side method, which is invoked over RPC. On success + * the return response has protobuf response payload. On failure, the + * exception name and the stack trace are returned in the response. + * See {@link HadoopRpcResponseProto} + * + * In this method there three types of exceptions possible and they are + * returned in response as follows. + *
    + *
  1. Exceptions encountered in this method that are returned + * as {@link RpcServerException}
  2. + *
  3. Exceptions thrown by the service is wrapped in ServiceException. + * In that this method returns in response the exception thrown by the + * service.
  4. + *
  5. Other exceptions thrown by the service. They are returned as + * it is.
  6. + *
+ */ + public Writable call(RPC.Server server, String connectionProtocolName, + Writable writableRequest, long receiveTime) throws Exception { + RpcProtobufRequest request = (RpcProtobufRequest) writableRequest; + RequestHeaderProto rpcRequest = request.getRequestHeader(); + String methodName = rpcRequest.getMethodName(); + + /** + * RPCs for a particular interface (ie protocol) are done using a + * IPC connection that is setup using rpcProxy. + * The rpcProxy's has a declared protocol name that is + * sent form client to server at connection time. + * + * Each Rpc call also sends a protocol name + * (called declaringClassprotocolName). This name is usually the same + * as the connection protocol name except in some cases. + * For example metaProtocols such ProtocolInfoProto which get info + * about the protocol reuse the connection but need to indicate that + * the actual protocol is different (i.e. the protocol is + * ProtocolInfoProto) since they reuse the connection; in this case + * the declaringClassProtocolName field is set to the ProtocolInfoProto. + */ + + String declaringClassProtoName = + rpcRequest.getDeclaringClassProtocolName(); + long clientVersion = rpcRequest.getClientProtocolVersion(); + if (server.verbose) { + LOG.info("Call: connectionProtocolName=" + connectionProtocolName + + ", method=" + methodName); + } + + ProtoClassProtoImpl protocolImpl = getProtocolImpl(server, + declaringClassProtoName, clientVersion); + BlockingService service = (BlockingService) protocolImpl.protocolImpl; + MethodDescriptor methodDescriptor = service.getDescriptorForType() + .findMethodByName(methodName); + if (methodDescriptor == null) { + String msg = "Unknown method " + methodName + " called on " + + connectionProtocolName + " protocol."; + LOG.warn(msg); + throw new RpcNoSuchMethodException(msg); + } + Message prototype = service.getRequestPrototype(methodDescriptor); + Message param = request.getValue(prototype); + + Message result; + Call currentCall = Server.getCurCall().get(); + try { + server.rpcDetailedMetrics.init(protocolImpl.protocolClass); + CURRENT_CALL_INFO.set(new CallInfo(server, methodName)); + currentCall.setDetailedMetricsName(methodName); + result = service.callBlockingMethod(methodDescriptor, null, param); + // Check if this needs to be a deferred response, + // by checking the ThreadLocal callback being set + if (CURRENT_CALLBACK.get() != null) { + currentCall.deferResponse(); + CURRENT_CALLBACK.set(null); + return null; + } + } catch (ServiceException e) { + Exception exception = (Exception) e.getCause(); + currentCall.setDetailedMetricsName( + exception.getClass().getSimpleName()); + throw (Exception) e.getCause(); + } catch (Exception e) { + currentCall.setDetailedMetricsName(e.getClass().getSimpleName()); + throw e; + } finally { + CURRENT_CALL_INFO.set(null); + } + return RpcWritable.wrap(result); + } + } + } + + // htrace in the ipc layer creates the span name based on toString() + // which uses the rpc header. in the normal case we want to defer decoding + // the rpc header until needed by the rpc engine. + static class RpcProtobufRequest extends RpcWritable.Buffer { + private volatile RequestHeaderProto requestHeader; + private Message payload; + + RpcProtobufRequest() { + } + + RpcProtobufRequest(RequestHeaderProto header, Message payload) { + this.requestHeader = header; + this.payload = payload; + } + + RequestHeaderProto getRequestHeader() throws IOException { + if (getByteBuffer() != null && requestHeader == null) { + requestHeader = getValue(RequestHeaderProto.getDefaultInstance()); + } + return requestHeader; + } + + @Override + public void writeTo(ResponseBuffer out) throws IOException { + requestHeader.writeDelimitedTo(out); + if (payload != null) { + payload.writeDelimitedTo(out); + } + } + + // this is used by htrace to name the span. + @Override + public String toString() { + try { + RequestHeaderProto header = getRequestHeader(); + return header.getDeclaringClassProtocolName() + "." + + header.getMethodName(); + } catch (IOException e) { + throw new IllegalArgumentException(e); + } + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java index 50b70ca4bec1a..f85adb17d3f8e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java @@ -18,12 +18,17 @@ package org.apache.hadoop.ipc; -import org.apache.hadoop.thirdparty.protobuf.Message; +import com.google.protobuf.Message; +/** + * This engine uses Protobuf 2.5.0. Recommended to upgrade to Protobuf 3.x + * from hadoop-thirdparty and use ProtobufRpcEngineCallback2. + */ +@Deprecated public interface ProtobufRpcEngineCallback { - public void setResponse(Message message); + void setResponse(Message message); - public void error(Throwable t); + void error(Throwable t); } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback2.java new file mode 100644 index 0000000000000..e8c09f56282e6 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback2.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ipc; + +import org.apache.hadoop.thirdparty.protobuf.Message; + +public interface ProtobufRpcEngineCallback2 { + + public void setResponse(Message message); + + public void error(Throwable t); + +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java index 4f95863b03db6..e794cb913c232 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java @@ -1043,7 +1043,7 @@ protected Server(String bindAddress, int port, private void initProtocolMetaInfo(Configuration conf) { RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ProtocolMetaInfoServerSideTranslatorPB xlator = new ProtocolMetaInfoServerSideTranslatorPB(this); BlockingService protocolInfoBlockingService = ProtocolInfoService @@ -1067,7 +1067,7 @@ public Server addProtocol(RpcKind rpcKind, Class protocolClass, @Override public Writable call(RPC.RpcKind rpcKind, String protocol, Writable rpcRequest, long receiveTime) throws Exception { - return getRpcInvoker(rpcKind).call(this, protocol, rpcRequest, + return getServerRpcInvoker(rpcKind).call(this, protocol, rpcRequest, receiveTime); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java index 84ecba1d34e9c..0ce78e54a43a0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java @@ -114,7 +114,7 @@ public static boolean isMethodSupported(Object rpcProxy, Class protocol, if (versionMap == null) { Configuration conf = new Configuration(); RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ProtocolMetaInfoPB protocolInfoProxy = getProtocolMetaInfoProxy(rpcProxy, conf); GetProtocolSignatureRequestProto.Builder builder = diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java index 6604bd0cc1c68..f5f0d071f39ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java @@ -42,6 +42,8 @@ static RpcWritable wrap(Object o) { return (RpcWritable)o; } else if (o instanceof Message) { return new ProtobufWrapper((Message)o); + } else if (o instanceof com.google.protobuf.Message) { + return new ProtobufWrapperLegacy((com.google.protobuf.Message) o); } else if (o instanceof Writable) { return new WritableWrapper((Writable)o); } @@ -132,6 +134,49 @@ T readFrom(ByteBuffer bb) throws IOException { } } + // adapter for Protobufs. + static class ProtobufWrapperLegacy extends RpcWritable { + private com.google.protobuf.Message message; + + ProtobufWrapperLegacy(com.google.protobuf.Message message) { + this.message = message; + } + + com.google.protobuf.Message getMessage() { + return message; + } + + @Override + void writeTo(ResponseBuffer out) throws IOException { + int length = message.getSerializedSize(); + length += com.google.protobuf.CodedOutputStream. + computeUInt32SizeNoTag(length); + out.ensureCapacity(length); + message.writeDelimitedTo(out); + } + + @SuppressWarnings("unchecked") + @Override + T readFrom(ByteBuffer bb) throws IOException { + // using the parser with a byte[]-backed coded input stream is the + // most efficient way to deserialize a protobuf. it has a direct + // path to the PB ctor that doesn't create multi-layered streams + // that internally buffer. + com.google.protobuf.CodedInputStream cis = + com.google.protobuf.CodedInputStream.newInstance( + bb.array(), bb.position() + bb.arrayOffset(), bb.remaining()); + try { + cis.pushLimit(cis.readRawVarint32()); + message = message.getParserForType().parseFrom(cis); + cis.checkLastTagWas(0); + } finally { + // advance over the bytes read. + bb.position(bb.position() + cis.getTotalBytesRead()); + } + return (T)message; + } + } + /** * adapter to allow decoding of writables and protobufs from a byte buffer. */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 4448164f4b137..907d55f9be347 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -304,7 +304,11 @@ public Class getRpcRequestWrapper( RpcKindMapValue val = rpcKindMap.get(ProtoUtil.convert(rpcKind)); return (val == null) ? null : val.rpcRequestWrapperClass; } - + + protected RpcInvoker getServerRpcInvoker(RPC.RpcKind rpcKind) { + return getRpcInvoker(rpcKind); + } + public static RpcInvoker getRpcInvoker(RPC.RpcKind rpcKind) { RpcKindMapValue val = rpcKindMap.get(rpcKind); return (val == null) ? null : val.rpcInvoker; @@ -2688,15 +2692,15 @@ private void processRpcRequest(RpcRequestHeaderProto header, call.setPriorityLevel(callQueue.getPriorityLevel(call)); call.markCallCoordinated(false); if(alignmentContext != null && call.rpcRequest != null && - (call.rpcRequest instanceof ProtobufRpcEngine.RpcProtobufRequest)) { + (call.rpcRequest instanceof ProtobufRpcEngine2.RpcProtobufRequest)) { // if call.rpcRequest is not RpcProtobufRequest, will skip the following // step and treat the call as uncoordinated. As currently only certain // ClientProtocol methods request made through RPC protobuf needs to be // coordinated. String methodName; String protoName; - ProtobufRpcEngine.RpcProtobufRequest req = - (ProtobufRpcEngine.RpcProtobufRequest) call.rpcRequest; + ProtobufRpcEngine2.RpcProtobufRequest req = + (ProtobufRpcEngine2.RpcProtobufRequest) call.rpcRequest; try { methodName = req.getRequestHeader().getMethodName(); protoName = req.getRequestHeader().getDeclaringClassProtocolName(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java index 766fb0a6557eb..130414c2895b5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java @@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -179,7 +179,7 @@ public int run(String argv[]) throws Exception { servicePrincipal); } RPC.setProtocolEngine(getConf(), TraceAdminProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); InetSocketAddress address = NetUtils.createSocketAddr(hostPort); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); Class xface = TraceAdminProtocolPB.class; diff --git a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto new file mode 100644 index 0000000000000..16ee880e7b720 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * These .proto interfaces are private and stable. + * Please see http://wiki.apache.org/hadoop/Compatibility + * for what changes are allowed for a *stable* .proto interface. + */ +syntax = "proto2"; +/** + * These are the messages used by Hadoop RPC for the Rpc Engine Protocol Buffer + * to marshal the request and response in the RPC layer. + * The messages are sent in addition to the normal RPC header as + * defined in RpcHeader.proto + */ +option java_package = "org.apache.hadoop.ipc.protobuf"; +option java_outer_classname = "ProtobufRpcEngine2Protos"; +option java_generate_equals_and_hash = true; +package hadoop.common; + +/** + * This message is the header for the Protobuf Rpc Engine + * when sending a RPC request from RPC client to the RPC server. + * The actual request (serialized as protobuf) follows this request. + * + * No special header is needed for the Rpc Response for Protobuf Rpc Engine. + * The normal RPC response header (see RpcHeader.proto) are sufficient. + */ +message RequestHeaderProto { + /** Name of the RPC method */ + required string methodName = 1; + + /** + * RPCs for a particular interface (ie protocol) are done using a + * IPC connection that is setup using rpcProxy. + * The rpcProxy's has a declared protocol name that is + * sent form client to server at connection time. + * + * Each Rpc call also sends a protocol name + * (called declaringClassprotocolName). This name is usually the same + * as the connection protocol name except in some cases. + * For example metaProtocols such ProtocolInfoProto which get metainfo + * about the protocol reuse the connection but need to indicate that + * the actual protocol is different (i.e. the protocol is + * ProtocolInfoProto) since they reuse the connection; in this case + * the declaringClassProtocolName field is set to the ProtocolInfoProto + */ + required string declaringClassProtocolName = 2; + + /** protocol version of class declaring the called method */ + required uint64 clientProtocolVersion = 3; +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java index 6505fbb8224f8..0c2530739fa49 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB; import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; @@ -119,7 +119,7 @@ private InetSocketAddress startAndGetRPCServerAddress(InetSocketAddress serverAd try { RPC.setProtocolEngine(conf, - HAServiceProtocolPB.class, ProtobufRpcEngine.class); + HAServiceProtocolPB.class, ProtobufRpcEngine2.class); HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator = new HAServiceProtocolServerSideTranslatorPB(new MockHAProtocolImpl()); BlockingService haPbService = HAServiceProtocolService diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java index bbb4ec21812e3..e7130d4da8cb3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java @@ -66,7 +66,7 @@ private static class MyOptions { public int secondsToRun = 15; private int msgSize = 1024; public Class rpcEngine = - ProtobufRpcEngine.class; + ProtobufRpcEngine2.class; private MyOptions(String args[]) { try { @@ -181,7 +181,7 @@ private void processOptions(CommandLine line, Options opts) if (line.hasOption('e')) { String eng = line.getOptionValue('e'); if ("protobuf".equals(eng)) { - rpcEngine = ProtobufRpcEngine.class; + rpcEngine = ProtobufRpcEngine2.class; } else { throw new ParseException("invalid engine: " + eng); } @@ -224,7 +224,7 @@ private Server startServer(MyOptions opts) throws IOException { RPC.Server server; // Get RPC server for server side implementation - if (opts.rpcEngine == ProtobufRpcEngine.class) { + if (opts.rpcEngine == ProtobufRpcEngine2.class) { // Create server side implementation PBServerImpl serverImpl = new PBServerImpl(); BlockingService service = TestProtobufRpcProto @@ -378,7 +378,7 @@ private interface RpcServiceWrapper { private RpcServiceWrapper createRpcClient(MyOptions opts) throws IOException { InetSocketAddress addr = NetUtils.createSocketAddr(opts.host, opts.getPort()); - if (opts.rpcEngine == ProtobufRpcEngine.class) { + if (opts.rpcEngine == ProtobufRpcEngine2.class) { final TestRpcService proxy = RPC.getProxy(TestRpcService.class, 0, addr, conf); return new RpcServiceWrapper() { @Override diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java index 10e23baefef9b..c1b0858697682 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java @@ -45,7 +45,7 @@ public void testPBService() throws Exception { // Set RPC engine to protobuf RPC engine Configuration conf2 = new Configuration(); RPC.setProtocolEngine(conf2, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); TestRpcService client = RPC.getProxy(TestRpcService.class, 0, addr, conf2); TestProtoBufRpc.testProtoBufRpc(client); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java index dfb9e934f6055..d813c6b784f5d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java @@ -25,8 +25,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto; -import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto; import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto; import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto; import org.apache.hadoop.ipc.protobuf.TestProtos.OptRequestProto; @@ -138,7 +136,7 @@ public void testProtocolVersionMismatch() throws IOException, ServiceException { conf = new Configuration(); conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024); // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, NewRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, NewRpcService.class, ProtobufRpcEngine2.class); // Create server side implementation NewServerImpl serverImpl = new NewServerImpl(); @@ -151,7 +149,7 @@ public void testProtocolVersionMismatch() throws IOException, ServiceException { server.start(); - RPC.setProtocolEngine(conf, OldRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, OldRpcService.class, ProtobufRpcEngine2.class); OldRpcService proxy = RPC.getProxy(OldRpcService.class, 0, addr, conf); // Verify that exception is thrown if protocolVersion is mismatch between @@ -168,7 +166,8 @@ public void testProtocolVersionMismatch() throws IOException, ServiceException { } // Verify that missing of optional field is still compatible in RPC call. - RPC.setProtocolEngine(conf, NewerRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, NewerRpcService.class, + ProtobufRpcEngine2.class); NewerRpcService newProxy = RPC.getProxy(NewerRpcService.class, 0, addr, conf); newProxy.echo(null, emptyRequest); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java index facb8fdd8b191..06c3646310412 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java @@ -94,8 +94,9 @@ public void setUp() throws IOException { // Setup server for both protocols conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024); conf.setBoolean(CommonConfigurationKeys.IPC_SERVER_LOG_SLOW_RPC, true); // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class); - RPC.setProtocolEngine(conf, TestRpcService2.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class); + RPC.setProtocolEngine(conf, TestRpcService2.class, + ProtobufRpcEngine2.class); // Create server side implementation PBServerImpl serverImpl = new PBServerImpl(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java index 32300d4f876e1..922e9192c41c6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java @@ -52,7 +52,7 @@ public void test() throws Exception { TestProtobufRpcHandoffProto.newReflectiveBlockingService(serverImpl); RPC.setProtocolEngine(conf, TestProtoBufRpcServerHandoffProtocol.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); RPC.Server server = new RPC.Builder(conf) .setProtocol(TestProtoBufRpcServerHandoffProtocol.class) .setInstance(blockingService) @@ -144,8 +144,8 @@ public static class TestProtoBufRpcServerHandoffServer TestProtos.SleepRequestProto2 request) throws ServiceException { final long startTime = System.currentTimeMillis(); - final ProtobufRpcEngineCallback callback = - ProtobufRpcEngine.Server.registerForDeferredResponse(); + final ProtobufRpcEngineCallback2 callback = + ProtobufRpcEngine2.Server.registerForDeferredResponse(); final long sleepTime = request.getSleepTime(); new Thread() { @Override diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java index ffee086fa9801..22fdcbbe14e65 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java @@ -114,19 +114,19 @@ public void setUp() { ProtocolSignature.resetCache(); RPC.setProtocolEngine(conf, - TestProtocol0.class, ProtobufRpcEngine.class); + TestProtocol0.class, ProtobufRpcEngine2.class); RPC.setProtocolEngine(conf, - TestProtocol1.class, ProtobufRpcEngine.class); + TestProtocol1.class, ProtobufRpcEngine2.class); RPC.setProtocolEngine(conf, - TestProtocol2.class, ProtobufRpcEngine.class); + TestProtocol2.class, ProtobufRpcEngine2.class); RPC.setProtocolEngine(conf, - TestProtocol3.class, ProtobufRpcEngine.class); + TestProtocol3.class, ProtobufRpcEngine2.class); RPC.setProtocolEngine(conf, - TestProtocol4.class, ProtobufRpcEngine.class); + TestProtocol4.class, ProtobufRpcEngine2.class); } @After diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java index d810fe3c5a1e0..90973d2674c01 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java @@ -44,7 +44,7 @@ public class TestRPCWaitForProxy extends TestRpcBase { @Before public void setupProtocolEngine() { RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java index 2729dc3cd9daa..65558a7980a2d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java @@ -26,7 +26,6 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.TestConnectionRetryPolicy; import org.apache.hadoop.ipc.Client.ConnectionId; -import org.apache.hadoop.ipc.TestRpcBase.TestRpcService; import org.junit.Before; import org.junit.Test; @@ -129,7 +128,7 @@ private void verifyRetryPolicyReuseConnections( try { proxy1 = getClient(addr, newConf, retryPolicy1); proxy1.ping(null, newEmptyRequest()); - client = ProtobufRpcEngine.getClient(newConf); + client = ProtobufRpcEngine2.getClient(newConf); final Set conns = client.getConnectionIds(); assertEquals("number of connections in cache is wrong", 1, conns.size()); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java index bf24d680dde2e..010935b60960c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java @@ -70,7 +70,7 @@ public class TestRpcBase { protected void setupConf() { conf = new Configuration(); // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java index 72f73822b6fd0..5f944574656ac 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java @@ -169,7 +169,7 @@ public void setup() { clientFallBackToSimpleAllowed = true; // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class); } static String getQOPNames (QualityOfProtection[] qops){ @@ -356,7 +356,7 @@ public void testPerConnectionConf() throws Exception { newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[0]); proxy1 = getClient(addr, newConf); proxy1.getAuthMethod(null, newEmptyRequest()); - client = ProtobufRpcEngine.getClient(newConf); + client = ProtobufRpcEngine2.getClient(newConf); Set conns = client.getConnectionIds(); assertEquals("number of connections in cache is wrong", 1, conns.size()); // same conf, connection should be re-used diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java index c86b9ae344195..edd537011c4a8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java @@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.TestRpcBase; @@ -151,7 +151,7 @@ public void testRealUserSetup() throws IOException { configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME); // Set RPC engine to protobuf RPC engine RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 5); @@ -181,7 +181,7 @@ public void testRealUserAuthorizationSuccess() throws IOException { getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1"); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 5); @@ -215,7 +215,7 @@ public void testRealUserIPAuthorizationFailure() throws IOException { getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1"); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 5); @@ -251,7 +251,7 @@ public void testRealUserIPNotSpecified() throws IOException { conf.setStrings(DefaultImpersonationProvider.getTestProvider(). getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1"); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 2); @@ -286,7 +286,7 @@ public void testRealUserGroupNotSpecified() throws IOException { final Configuration conf = new Configuration(); configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 2); @@ -322,7 +322,7 @@ public void testRealUserGroupAuthorizationFailure() throws IOException { getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group3"); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 2); @@ -363,7 +363,7 @@ public void testProxyWithToken() throws Exception { TestTokenSecretManager sm = new TestTokenSecretManager(); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 5, sm); @@ -411,7 +411,7 @@ public void testTokenBySuperUser() throws Exception { SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf); // Set RPC engine to protobuf RPC engine RPC.setProtocolEngine(newConf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(newConf); final Server server = setupTestServer(newConf, 5, sm); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java index c640b39b6f488..68577aad82501 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java @@ -56,7 +56,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.io.retry.RetryUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; @@ -355,7 +355,7 @@ public static ClientProtocol createProxyWithAlignmentContext( AlignmentContext alignmentContext) throws IOException { RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); final RetryPolicy defaultPolicy = RetryUtils.getDefaultRetryPolicy( diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java index 4028b0e8fb245..47234e8b65d78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java @@ -68,7 +68,7 @@ import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus; import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; @@ -181,7 +181,7 @@ static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy( InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int socketTimeout) throws IOException { RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); return RPC.getProxy(ClientDatanodeProtocolPB.class, RPC.getProtocolVersion(ClientDatanodeProtocolPB.class), addr, ticket, conf, factory, socketTimeout); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index d44469211bf91..7e41460ca4c63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -237,7 +237,7 @@ import org.apache.hadoop.io.retry.AsyncCallHandler; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; @@ -456,7 +456,7 @@ public void setPermission(String src, FsPermission permission) private void setAsyncReturnValue() { final AsyncGet asyncReturnMessage - = ProtobufRpcEngine.getAsyncReturnMessage(); + = ProtobufRpcEngine2.getAsyncReturnMessage(); final AsyncGet asyncGet = new AsyncGet() { @Override @@ -1570,7 +1570,7 @@ public AclStatus getAclStatus(String src) throws IOException { if (Client.isAsynchronousMode()) { rpcProxy.getAclStatus(null, req); final AsyncGet asyncReturnMessage - = ProtobufRpcEngine.getAsyncReturnMessage(); + = ProtobufRpcEngine2.getAsyncReturnMessage(); final AsyncGet asyncGet = new AsyncGet() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java index 5165887ece5f3..ce8a89b84acce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; @@ -84,7 +84,7 @@ static ReconfigurationProtocolPB createReconfigurationProtocolProxy( InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int socketTimeout) throws IOException { RPC.setProtocolEngine(conf, ReconfigurationProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); return RPC.getProxy(ReconfigurationProtocolPB.class, RPC.getProtocolVersion(ReconfigurationProtocolPB.class), addr, ticket, conf, factory, socketTimeout); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index b84848089a319..60924990cfd30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -47,7 +47,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.RefreshUserMappingsProtocol; @@ -379,7 +379,7 @@ protected static ConnectionContext newConnection(Configuration conf, throw new IllegalStateException(msg); } ProtoImpl classes = PROTO_MAP.get(proto); - RPC.setProtocolEngine(conf, classes.protoPb, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, classes.protoPb, ProtobufRpcEngine2.class); final RetryPolicy defaultPolicy = RetryUtils.getDefaultRetryPolicy(conf, HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index f987e35cbd36a..50094b02d47be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -77,7 +77,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.ipc.RefreshRegistry; @@ -139,7 +139,7 @@ public RouterAdminServer(Configuration conf, Router router) RBFConfigKeys.DFS_ROUTER_ADMIN_HANDLER_COUNT_DEFAULT); RPC.setProtocolEngine(this.conf, RouterAdminProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); RouterAdminProtocolServerSideTranslatorPB routerAdminProtocolTranslator = new RouterAdminProtocolServerSideTranslatorPB(this); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java index 0641c0b82afb6..ee29b7dd2b513 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolTranslatorPB; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; import org.apache.hadoop.hdfs.server.federation.resolver.RouterGenericManager; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -47,7 +47,7 @@ private static RouterAdminProtocolTranslatorPB createRouterProxy( throws IOException { RPC.setProtocolEngine( - conf, RouterAdminProtocolPB.class, ProtobufRpcEngine.class); + conf, RouterAdminProtocolPB.class, ProtobufRpcEngine2.class); AtomicBoolean fallbackToSimpleAuth = new AtomicBoolean(false); final long version = RPC.getProtocolVersion(RouterAdminProtocolPB.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 345ec705f2cd8..4f1310bb25911 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -133,7 +133,7 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.ipc.RemoteException; @@ -256,7 +256,7 @@ public RouterRpcServer(Configuration configuration, Router router, readerQueueSize); RPC.setProtocolEngine(this.conf, ClientNamenodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ClientNamenodeProtocolServerSideTranslatorPB clientProtocolServerTranslator = diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 5ea33237b672c..7422989d6aad2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -71,7 +71,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RefreshResponse; import org.apache.hadoop.ipc.RemoteException; @@ -1222,7 +1222,7 @@ public int genericRefresh(String[] argv, int i) throws IOException { InetSocketAddress address = NetUtils.createSocketAddr(hostport); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine2.class); GenericRefreshProtocolPB proxy = (GenericRefreshProtocolPB)RPC.getProxy( xface, RPC.getProtocolVersion(xface), address, ugi, conf, NetUtils.getDefaultSocketFactory(conf), 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java index 7c80ad64bc6a2..f908065384193 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java @@ -90,7 +90,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.ipc.RemoteException; @@ -174,7 +174,7 @@ public HAServiceStatus answer(InvocationOnMock invocation) */ private void setupRPCServer(final Configuration conf) throws IOException { RPC.setProtocolEngine( - conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class); + conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine2.class); ClientNamenodeProtocolServerSideTranslatorPB clientNNProtoXlator = new ClientNamenodeProtocolServerSideTranslatorPB(mockNn); diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index ca9096646c7b2..e1ea5840deda5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -349,6 +349,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> replace-sources false + + **/DFSUtil.java +
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 48827175c351c..00f14cd82e42f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -72,6 +72,7 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.INodesInPath; +import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.security.AccessControlException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -92,7 +93,7 @@ import org.apache.hadoop.hdfs.web.AuthFilterInitializer; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpServer2; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AuthenticationFilterInitializer; @@ -1295,6 +1296,27 @@ static URI trimUri(URI uri) { */ public static void addPBProtocol(Configuration conf, Class protocol, BlockingService service, RPC.Server server) throws IOException { + RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine2.class); + server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, service); + } + + /** + * Add protobuf based protocol to the {@link RPC.Server}. + * This engine uses Protobuf 2.5.0. Recommended to upgrade to + * Protobuf 3.x from hadoop-thirdparty and use + * {@link DFSUtil#addPBProtocol(Configuration, Class, BlockingService, + * RPC.Server)}. + * @param conf configuration + * @param protocol Protocol interface + * @param service service that implements the protocol + * @param server RPC server to which the protocol & implementation is + * added to + * @throws IOException + */ + @Deprecated + public static void addPBProtocol(Configuration conf, Class protocol, + com.google.protobuf.BlockingService service, RPC.Server server) + throws IOException { RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class); server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, service); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index 3063083db8840..2a56ef3e1868b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -48,7 +48,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.ipc.AlignmentContext; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProxyCombiner; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RefreshCallQueueProtocol; @@ -305,7 +305,7 @@ private static BalancerProtocols createNNProxyWithBalancerProtocol( private static T createNameNodeProxy(InetSocketAddress address, Configuration conf, UserGroupInformation ugi, Class xface, int rpcTimeout, AlignmentContext alignmentContext) throws IOException { - RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine2.class); return RPC.getProtocolProxy(xface, RPC.getProtocolVersion(xface), address, ugi, conf, NetUtils.getDefaultSocketFactory(conf), rpcTimeout, null, null, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeLifelineProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeLifelineProtocolClientSideTranslatorPB.java index 050073fb952ed..220e9e2835625 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeLifelineProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeLifelineProtocolClientSideTranslatorPB.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RpcClientUtil; @@ -57,7 +57,7 @@ public class DatanodeLifelineProtocolClientSideTranslatorPB implements public DatanodeLifelineProtocolClientSideTranslatorPB( InetSocketAddress nameNodeAddr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, DatanodeLifelineProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); rpcProxy = createNamenode(nameNodeAddr, conf, ugi); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index 6ab98e5880c31..b512d7e21d57d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -62,7 +62,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RpcClientUtil; @@ -99,7 +99,7 @@ public DatanodeProtocolClientSideTranslatorPB(DatanodeProtocolPB rpcProxy) { public DatanodeProtocolClientSideTranslatorPB(InetSocketAddress nameNodeAddr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, DatanodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); rpcProxy = createNamenode(nameNodeAddr, conf, ugi); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java index 64d57562a1811..031b0e4512ad3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RpcClientUtil; @@ -62,7 +62,7 @@ public InterDatanodeProtocolTranslatorPB(InetSocketAddress addr, int socketTimeout) throws IOException { RPC.setProtocolEngine(conf, InterDatanodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); rpcProxy = RPC.getProxy(InterDatanodeProtocolPB.class, RPC.getProtocolVersion(InterDatanodeProtocolPB.class), addr, ugi, conf, factory, socketTimeout); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 07c89552a5f43..ad947f18f13e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -52,7 +52,7 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.StopWatch; @@ -235,13 +235,13 @@ protected QJournalProtocol createProxy() throws IOException { true); RPC.setProtocolEngine(confCopy, - QJournalProtocolPB.class, ProtobufRpcEngine.class); + QJournalProtocolPB.class, ProtobufRpcEngine2.class); return SecurityUtil.doAsLoginUser( new PrivilegedExceptionAction() { @Override public QJournalProtocol run() throws IOException { RPC.setProtocolEngine(confCopy, - QJournalProtocolPB.class, ProtobufRpcEngine.class); + QJournalProtocolPB.class, ProtobufRpcEngine2.class); QJournalProtocolPB pbproxy = RPC.getProxy( QJournalProtocolPB.class, RPC.getProtocolVersion(QJournalProtocolPB.class), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java index 36f7faaedb01e..ef44f21d2cfcb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java @@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.net.NetUtils; @@ -85,7 +85,7 @@ public class JournalNodeRpcServer implements QJournalProtocol, LOG.info("RPC server is binding to " + bindHost + ":" + addr.getPort()); RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); QJournalProtocolServerSideTranslatorPB translator = new QJournalProtocolServerSideTranslatorPB(this); BlockingService service = QJournalProtocolService diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java index dc352c5d367c2..bb141d8437496 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -505,7 +505,7 @@ private class JournalNodeProxy { @Override public InterQJournalProtocol run() throws IOException { RPC.setProtocolEngine(confCopy, InterQJournalProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); InterQJournalProtocolPB interQJournalProtocolPB = RPC.getProxy( InterQJournalProtocolPB.class, RPC.getProtocolVersion(InterQJournalProtocolPB.class), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java index f6ba4239d7110..2ba22b1a90ba7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java @@ -19,7 +19,7 @@ import org.apache.hadoop.thirdparty.protobuf.BlockingService; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configurable; @@ -71,7 +71,7 @@ public InMemoryLevelDBAliasMapServer( public void start() throws IOException { RPC.setProtocolEngine(getConf(), AliasMapProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); AliasMapProtocolServerSideTranslatorPB aliasMapProtocolXlator = new AliasMapProtocolServerSideTranslatorPB(this); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index f96682fa2d7b2..2e498e47504e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -188,7 +188,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.ReadaheadPool; import org.apache.hadoop.io.nativeio.NativeIO; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.util.MBeans; @@ -1015,7 +1015,7 @@ private void initIpcServer() throws IOException { // Add all the RPC protocols that the Datanode implements RPC.setProtocolEngine(getConf(), ClientDatanodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ClientDatanodeProtocolServerSideTranslatorPB clientDatanodeProtocolXlator = new ClientDatanodeProtocolServerSideTranslatorPB(this); BlockingService service = ClientDatanodeProtocolService diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index fcbd457d7a5e8..230e4020117f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -184,7 +184,7 @@ import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.RetryCache; @@ -281,7 +281,7 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) DFS_NAMENODE_HANDLER_COUNT_DEFAULT); RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ClientNamenodeProtocolServerSideTranslatorPB clientProtocolServerTranslator = @@ -405,7 +405,7 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) InetSocketAddress lifelineRpcAddr = nn.getLifelineRpcServerAddress(conf); if (lifelineRpcAddr != null) { RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); String bindHost = nn.getLifelineRpcServerBindHost(conf); if (bindHost == null) { bindHost = lifelineRpcAddr.getHostName(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index 74257cf697d9f..ec5fa0afddc60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -87,7 +87,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.io.MultipleIOException; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RefreshCallQueueProtocol; import org.apache.hadoop.ipc.RefreshResponse; @@ -2029,7 +2029,7 @@ public int genericRefresh(String[] argv, int i) throws IOException { InetSocketAddress address = NetUtils.createSocketAddr(hostport); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine2.class); GenericRefreshProtocolPB proxy = (GenericRefreshProtocolPB) RPC.getProxy(xface, RPC.getProtocolVersion(xface), address, ugi, conf, NetUtils.getDefaultSocketFactory(conf), 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java index 946358c7a61c4..f64e7006ea957 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.util.Holder; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.Rule; @@ -225,7 +225,7 @@ public void testRandomized() throws Exception { // If the user specifies a seed, then we should gather all the // IPC trace information so that debugging is easier. This makes // the test run about 25% slower otherwise. - GenericTestUtils.setLogLevel(ProtobufRpcEngine.LOG, Level.ALL); + GenericTestUtils.setLogLevel(ProtobufRpcEngine2.LOG, Level.ALL); } else { seed = new Random().nextLong(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java index cd0216e2f26d0..b88c6433a2bac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java @@ -58,7 +58,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.After; @@ -87,7 +87,7 @@ public class TestQuorumJournalManager { private final List toClose = Lists.newLinkedList(); static { - GenericTestUtils.setLogLevel(ProtobufRpcEngine.LOG, Level.ALL); + GenericTestUtils.setLogLevel(ProtobufRpcEngine2.LOG, Level.ALL); } @Rule diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index 4c4c171d72a4e..c548b716f044d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -74,7 +74,7 @@ import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.TestWritable; import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; @@ -314,7 +314,7 @@ private static Server createMockDatanode(BlockTokenSecretManager sm, .getReplicaVisibleLength(any(), any()); RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); BlockingService service = ClientDatanodeProtocolService .newReflectiveBlockingService(mockDN); return new RPC.Builder(conf).setProtocol(ClientDatanodeProtocolPB.class) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java index d13cc38cb8a62..d57a7344fe024 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics; import org.apache.hadoop.http.HttpRequestLog; import org.apache.hadoop.http.HttpServer2; -import org.apache.hadoop.ipc.ProtobufRpcEngine.Server; +import org.apache.hadoop.ipc.ProtobufRpcEngine2.Server; import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java index 156930325bcef..5ff80809450b2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java @@ -22,7 +22,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol; import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB; @@ -34,7 +34,7 @@ public HSClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { super(); RPC.setProtocolEngine(conf, HSClientProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = (HSClientProtocolPB)RPC.getProxy( HSClientProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java index efd48715283d4..7d8344841b886 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java @@ -24,7 +24,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; @@ -108,8 +108,10 @@ public class MRClientProtocolPBClientImpl implements MRClientProtocol, public MRClientProtocolPBClientImpl() {}; - public MRClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, MRClientProtocolPB.class, ProtobufRpcEngine.class); + public MRClientProtocolPBClientImpl(long clientVersion, + InetSocketAddress addr, Configuration conf) throws IOException { + RPC.setProtocolEngine(conf, MRClientProtocolPB.class, + ProtobufRpcEngine2.class); proxy = RPC.getProxy(MRClientProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSProxies.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSProxies.java index 3e238cbc0f6a7..4908523275473 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSProxies.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSProxies.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocol; import org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocolPB; @@ -93,7 +93,7 @@ private static HSAdminRefreshProtocol createHSProxyWithHSAdminRefreshProtocol( private static Object createHSProxy(InetSocketAddress address, Configuration conf, UserGroupInformation ugi, Class xface, int rpcTimeout) throws IOException { - RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine2.class); Object proxy = RPC.getProxy(xface, RPC.getProtocolVersion(xface), address, ugi, conf, NetUtils.getDefaultSocketFactory(conf), rpcTimeout); return proxy; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java index aa03bb6940cba..85bd563c83f30 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.security.AccessControlException; @@ -81,7 +81,7 @@ public HSAdminServer(AggregatedLogDeletionService aggLogDelService, @Override public void serviceInit(Configuration conf) throws Exception { RPC.setProtocolEngine(conf, RefreshUserMappingsProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); RefreshUserMappingsProtocolServerSideTranslatorPB refreshUserMappingXlator = new RefreshUserMappingsProtocolServerSideTranslatorPB( this); @@ -154,7 +154,7 @@ protected void serviceStop() throws Exception { private void addProtocol(Configuration conf, Class protocol, BlockingService blockingService) throws IOException { - RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine2.class); clientRpcServer.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, blockingService); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java index 0c1de58902e94..79d6773d0eca7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java @@ -20,7 +20,7 @@ import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; @@ -58,7 +58,7 @@ public class ClientAMProtocolPBClientImpl public ClientAMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, ClientAMProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(ClientAMProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java index 8c8d7f17feac7..70f0ed77041ad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; @@ -207,7 +207,7 @@ public class ApplicationClientProtocolPBClientImpl implements ApplicationClientP public ApplicationClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, ApplicationClientProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(ApplicationClientProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationHistoryProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationHistoryProtocolPBClientImpl.java index ceace111ac1d6..c488164ceb4e6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationHistoryProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationHistoryProtocolPBClientImpl.java @@ -23,12 +23,11 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; -import org.apache.hadoop.yarn.api.ApplicationClientProtocolPB; import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; import org.apache.hadoop.yarn.api.ApplicationHistoryProtocolPB; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; @@ -86,7 +85,7 @@ public class ApplicationHistoryProtocolPBClientImpl implements public ApplicationHistoryProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, ApplicationHistoryProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(ApplicationHistoryProtocolPB.class, clientVersion, addr, conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationMasterProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationMasterProtocolPBClientImpl.java index b4a20af6b605c..4525a001024d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationMasterProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationMasterProtocolPBClientImpl.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB; @@ -55,7 +55,8 @@ public class ApplicationMasterProtocolPBClientImpl implements ApplicationMasterP public ApplicationMasterProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, ApplicationMasterProtocolPB.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, ApplicationMasterProtocolPB.class, + ProtobufRpcEngine2.class); proxy = (ApplicationMasterProtocolPB) RPC.getProxy(ApplicationMasterProtocolPB.class, clientVersion, addr, conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientSCMProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientSCMProtocolPBClientImpl.java index a1c2d5b86ef8a..7ee70e51f48e9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientSCMProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientSCMProtocolPBClientImpl.java @@ -23,7 +23,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.api.ClientSCMProtocol; import org.apache.hadoop.yarn.api.ClientSCMProtocolPB; @@ -50,7 +50,7 @@ public class ClientSCMProtocolPBClientImpl implements ClientSCMProtocol, public ClientSCMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, ClientSCMProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(ClientSCMProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagementProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagementProtocolPBClientImpl.java index d5c191103c282..86fc398f2520e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagementProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagementProtocolPBClientImpl.java @@ -21,7 +21,7 @@ import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -106,7 +106,7 @@ public class ContainerManagementProtocolPBClientImpl implements ContainerManagem public ContainerManagementProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, ContainerManagementProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); int expireIntvl = conf.getInt(NM_COMMAND_TIMEOUT, DEFAULT_COMMAND_TIMEOUT); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/CsiAdaptorProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/CsiAdaptorProtocolPBClientImpl.java index 2ab36558f44a1..9aff674ef32c4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/CsiAdaptorProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/CsiAdaptorProtocolPBClientImpl.java @@ -19,7 +19,7 @@ import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.api.CsiAdaptorPB; import org.apache.hadoop.yarn.api.CsiAdaptorProtocol; @@ -57,7 +57,7 @@ public class CsiAdaptorProtocolPBClientImpl public CsiAdaptorProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, CsiAdaptorPB.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, CsiAdaptorPB.class, ProtobufRpcEngine2.class); this.proxy = RPC.getProxy(CsiAdaptorPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java index 7b48d5f8a721f..17571ed03d297 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java @@ -30,7 +30,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.token.SecretManager; @@ -165,7 +165,7 @@ private String getPackageName(Class clazz) { private Server createServer(Class pbProtocol, InetSocketAddress addr, Configuration conf, SecretManager secretManager, int numHandlers, BlockingService blockingService, String portRangeConfig) throws IOException { - RPC.setProtocolEngine(conf, pbProtocol, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, pbProtocol, ProtobufRpcEngine2.class); RPC.Server server = new RPC.Builder(conf).setProtocol(pbProtocol) .setInstance(blockingService).setBindAddress(addr.getHostName()) .setPort(addr.getPort()).setNumHandlers(numHandlers).setVerbose(false) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java index 639017aa622a7..20729a3cc8a73 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; @@ -114,7 +114,7 @@ public class ResourceManagerAdministrationProtocolPBClientImpl implements Resour public ResourceManagerAdministrationProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, ResourceManagerAdministrationProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = (ResourceManagerAdministrationProtocolPB)RPC.getProxy( ResourceManagerAdministrationProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMAdminProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMAdminProtocolPBClientImpl.java index a1ead5b4176cd..fb7a750852a52 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMAdminProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMAdminProtocolPBClientImpl.java @@ -23,7 +23,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.server.api.SCMAdminProtocol; import org.apache.hadoop.yarn.server.api.SCMAdminProtocolPB; @@ -45,7 +45,7 @@ public class SCMAdminProtocolPBClientImpl implements SCMAdminProtocol, public SCMAdminProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, SCMAdminProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(SCMAdminProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/CollectorNodemanagerProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/CollectorNodemanagerProtocolPBClientImpl.java index af75038096c77..6d2bb5ddaf1f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/CollectorNodemanagerProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/CollectorNodemanagerProtocolPBClientImpl.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -63,7 +63,7 @@ public class CollectorNodemanagerProtocolPBClientImpl implements public CollectorNodemanagerProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, CollectorNodemanagerProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); int expireIntvl = conf.getInt(NM_COMMAND_TIMEOUT, DEFAULT_COMMAND_TIMEOUT); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/DistributedSchedulingAMProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/DistributedSchedulingAMProtocolPBClientImpl.java index 4bd803f755565..f2527fc13a21c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/DistributedSchedulingAMProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/DistributedSchedulingAMProtocolPBClientImpl.java @@ -20,7 +20,7 @@ import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos; import org.apache.hadoop.yarn.server.api.DistributedSchedulingAMProtocol; @@ -63,7 +63,7 @@ public class DistributedSchedulingAMProtocolPBClientImpl implements public DistributedSchedulingAMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, DistributedSchedulingAMProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(DistributedSchedulingAMProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java index 650df85a01e92..76622e3a1440e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java @@ -23,7 +23,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; @@ -52,7 +52,8 @@ public class ResourceTrackerPBClientImpl implements ResourceTracker, Closeable { private ResourceTrackerPB proxy; public ResourceTrackerPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, ResourceTrackerPB.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, ResourceTrackerPB.class, + ProtobufRpcEngine2.class); proxy = (ResourceTrackerPB)RPC.getProxy( ResourceTrackerPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMUploaderProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMUploaderProtocolPBClientImpl.java index 32f0bce4eb40f..d484ac1ab3d11 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMUploaderProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMUploaderProtocolPBClientImpl.java @@ -23,7 +23,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; @@ -50,7 +50,7 @@ public class SCMUploaderProtocolPBClientImpl implements public SCMUploaderProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, SCMUploaderProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(SCMUploaderProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java index dedabc07d6c96..def59584d0ad0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java @@ -25,7 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; @@ -262,7 +262,7 @@ private void test(String rpcClass) throws Exception { new DummyContainerManager(), addr, conf, null, 1); server.start(); RPC.setProtocolEngine(conf, ContainerManagementProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ContainerManagementProtocol proxy = (ContainerManagementProtocol) rpc.getProxy(ContainerManagementProtocol.class, NetUtils.getConnectAddress(server), conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java index 154052208c18d..124211cc7a486 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java @@ -22,7 +22,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; @@ -42,7 +42,8 @@ public class LocalizationProtocolPBClientImpl implements LocalizationProtocol, private LocalizationProtocolPB proxy; public LocalizationProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, LocalizationProtocolPB.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, LocalizationProtocolPB.class, + ProtobufRpcEngine2.class); proxy = (LocalizationProtocolPB)RPC.getProxy( LocalizationProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java index e810046361de4..3ddd05a5ee828 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java @@ -29,7 +29,7 @@ import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.ClientId; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.TestRPC.TestImpl; @@ -220,7 +220,7 @@ public TestProtos.EmptyResponseProto ping( @Test public void testNMAuditLoggerWithIP() throws Exception { Configuration conf = new Configuration(); - RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class); // Create server side implementation MyTestRPCServer serverImpl = new MyTestRPCServer(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java index 6ad4e3ab8dee5..44ffd1778ebe6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java @@ -41,7 +41,7 @@ import org.apache.hadoop.ha.proto.HAServiceProtocolProtos; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.ipc.StandbyException; @@ -201,7 +201,7 @@ protected void startServer() throws Exception { if (rm.getRMContext().isHAEnabled()) { RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator = new HAServiceProtocolServerSideTranslatorPB(this); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java index 5c5cb25fd6225..901dc8a143020 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; @@ -1045,7 +1045,7 @@ public ResourceScheduler getScheduler() { // Verify that the OpportunisticContainerAllocatorAMSercvice can handle // vanilla ApplicationMasterProtocol clients RPC.setProtocolEngine(conf, ApplicationMasterProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ApplicationMasterProtocolPB ampProxy = RPC.getProxy(ApplicationMasterProtocolPB .class, 1, NetUtils.getConnectAddress(server), conf); @@ -1080,7 +1080,7 @@ public ResourceScheduler getScheduler() { // Verify that the DistrubutedSchedulingService can handle the // DistributedSchedulingAMProtocol clients as well RPC.setProtocolEngine(conf, DistributedSchedulingAMProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); DistributedSchedulingAMProtocolPB dsProxy = RPC.getProxy(DistributedSchedulingAMProtocolPB .class, 1, NetUtils.getConnectAddress(server), conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java index 282ff11152265..e8a532d1baf29 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java @@ -31,7 +31,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.CallerContext; import org.apache.hadoop.ipc.ClientId; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.TestRPC.TestImpl; @@ -420,7 +420,7 @@ public TestProtos.EmptyResponseProto ping( public void testRMAuditLoggerWithIP() throws Exception { Configuration conf = new Configuration(); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); // Create server side implementation MyTestRPCServer serverImpl = new MyTestRPCServer(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java index 8734d9f36f9ec..03b68944b481f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java @@ -23,7 +23,7 @@ import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolInfo; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; @@ -160,7 +160,7 @@ protected void serviceStart() throws Exception { Configuration conf = getConfig(); // Set RPC engine to protobuf RPC engine RPC.setProtocolEngine(conf, CustomProtocol.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); BlockingService service = TestRpcServiceProtos.CustomProto @@ -194,7 +194,7 @@ public void testClientToAMTokens() throws Exception { conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, CustomProtocol.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, CustomProtocol.class, ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); ContainerManagementProtocol containerManager = From 785b1def959fab6b8b7ffff66410bcd240feee13 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Fri, 12 Jun 2020 14:32:19 -0700 Subject: [PATCH 023/131] HDFS-15387. FSUsage#DF should consider ViewFSOverloadScheme in processPath. Contributed by Uma Maheswara Rao G. --- .../org/apache/hadoop/fs/shell/FsUsage.java | 3 +- .../hadoop/fs/viewfs/ViewFileSystemUtil.java | 14 +- ...ileSystemOverloadSchemeWithFSCommands.java | 173 ++++++++++++++++++ 3 files changed, 188 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java index 6596527738058..64aade3df9539 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java @@ -128,7 +128,8 @@ private void addToUsagesTable(URI uri, FsStatus fsStatus, @Override protected void processPath(PathData item) throws IOException { - if (ViewFileSystemUtil.isViewFileSystem(item.fs)) { + if (ViewFileSystemUtil.isViewFileSystem(item.fs) + || ViewFileSystemUtil.isViewFileSystemOverloadScheme(item.fs)) { ViewFileSystem viewFileSystem = (ViewFileSystem) item.fs; Map fsStatusMap = ViewFileSystemUtil.getStatus(viewFileSystem, item.path); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java index c8a1d78cffd46..f486a10b4c8f9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java @@ -51,6 +51,17 @@ public static boolean isViewFileSystem(final FileSystem fileSystem) { return fileSystem.getScheme().equals(FsConstants.VIEWFS_SCHEME); } + /** + * Check if the FileSystem is a ViewFileSystemOverloadScheme. + * + * @param fileSystem + * @return true if the fileSystem is ViewFileSystemOverloadScheme + */ + public static boolean isViewFileSystemOverloadScheme( + final FileSystem fileSystem) { + return fileSystem instanceof ViewFileSystemOverloadScheme; + } + /** * Get FsStatus for all ViewFsMountPoints matching path for the given * ViewFileSystem. @@ -93,7 +104,8 @@ public static boolean isViewFileSystem(final FileSystem fileSystem) { */ public static Map getStatus( FileSystem fileSystem, Path path) throws IOException { - if (!isViewFileSystem(fileSystem)) { + if (!(isViewFileSystem(fileSystem) + || isViewFileSystemOverloadScheme(fileSystem))) { throw new UnsupportedFileSystemException("FileSystem '" + fileSystem.getUri() + "'is not a ViewFileSystem."); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java new file mode 100644 index 0000000000000..a974377fac01c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java @@ -0,0 +1,173 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools; + +import static org.junit.Assert.assertEquals; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.PrintStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.List; +import java.util.Scanner; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.FsShell; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme; +import org.apache.hadoop.fs.viewfs.ViewFsTestSetup; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.test.PathUtils; +import org.apache.hadoop.util.ToolRunner; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.Lists; + +/** + * Tests HDFS commands with ViewFileSystemOverloadScheme with configured mount + * links. + */ +public class TestViewFileSystemOverloadSchemeWithFSCommands { + private static final String FS_IMPL_PATTERN_KEY = "fs.%s.impl"; + private static final String HDFS_SCHEME = "hdfs"; + private Configuration conf = null; + private MiniDFSCluster cluster = null; + private URI defaultFSURI; + private File localTargetDir; + private static final String TEST_ROOT_DIR = PathUtils + .getTestDirName(TestViewFileSystemOverloadSchemeWithFSCommands.class); + private static final String HDFS_USER_FOLDER = "/HDFSUser"; + private static final String LOCAL_FOLDER = "/local"; + private final ByteArrayOutputStream out = new ByteArrayOutputStream(); + private final ByteArrayOutputStream err = new ByteArrayOutputStream(); + private static final PrintStream OLD_OUT = System.out; + private static final PrintStream OLD_ERR = System.err; + + /** + * Sets up the configurations and starts the MiniDFSCluster. + */ + @Before + public void startCluster() throws IOException { + conf = new Configuration(); + conf.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1); + conf.set(String.format(FS_IMPL_PATTERN_KEY, HDFS_SCHEME), + ViewFileSystemOverloadScheme.class.getName()); + conf.set(String.format( + FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + HDFS_SCHEME), DistributedFileSystem.class.getName()); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + cluster.waitClusterUp(); + defaultFSURI = + URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY)); + localTargetDir = new File(TEST_ROOT_DIR, "/root/"); + Assert.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme. + } + + @After + public void tearDown() throws IOException { + try { + System.out.flush(); + System.err.flush(); + } finally { + System.setOut(OLD_OUT); + System.setErr(OLD_ERR); + } + if (cluster != null) { + FileSystem.closeAll(); + cluster.shutdown(); + } + resetStream(); + } + + private void redirectStream() { + System.setOut(new PrintStream(out)); + System.setErr(new PrintStream(err)); + } + + private void resetStream() { + out.reset(); + err.reset(); + } + + private static void scanIntoList(final ByteArrayOutputStream baos, + final List list) { + final Scanner scanner = new Scanner(baos.toString()); + while (scanner.hasNextLine()) { + list.add(scanner.nextLine()); + } + scanner.close(); + } + + /** + * Adds the given mount links to config. sources contains mount link src and + * the respective index location in targets contains the target uri. + */ + void addMountLinks(String mountTable, String[] sources, String[] targets, + Configuration config) throws IOException, URISyntaxException { + ViewFsTestSetup.addMountLinksToConf(mountTable, sources, targets, config); + } + + /** + * Tests DF with ViewFSOverloadScheme. + */ + @Test + public void testDFWithViewFsOverloadScheme() throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + List mounts = Lists.newArrayList(); + mounts.add(HDFS_USER_FOLDER); + mounts.add(LOCAL_FOLDER); + addMountLinks(defaultFSURI.getAuthority(), + mounts.toArray(new String[mounts.size()]), + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString() }, + conf); + FsShell fsShell = new FsShell(conf); + try { + redirectStream(); + int ret = + ToolRunner.run(fsShell, new String[] {"-fs", defaultFSURI.toString(), + "-df", "-h", defaultFSURI.toString() + "/" }); + assertEquals(0, ret); + final List errList = Lists.newArrayList(); + scanIntoList(out, errList); + assertEquals(3, errList.size()); + for (int i = 1; i < errList.size(); i++) { + String[] lineSplits = errList.get(i).split("\\s+"); + String mount = lineSplits[lineSplits.length - 1]; + mounts.remove(mount); + } + String msg = + "DF was not calculated on all mounts. The left out mounts are: " + + mounts; + assertEquals(msg, 0, mounts.size()); + } finally { + fsShell.close(); + } + } +} \ No newline at end of file From 719b53a79dc169a8c52229831dcb011935a8a151 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Sat, 13 Jun 2020 09:35:05 -0700 Subject: [PATCH 024/131] HDFS-15351. Blocks scheduled count was wrong on truncate. Contributed by hemanthboyina. --- .../server/blockmanagement/BlockManager.java | 9 ++- .../hdfs/TestBlocksScheduledCounter.java | 56 +++++++++++++++++++ 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 0b1da8bccc029..7f0f17e7b42fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1287,7 +1287,14 @@ public LocatedBlock convertLastBlockToUnderConstruction( neededReconstruction.remove(lastBlock, replicas.liveReplicas(), replicas.readOnlyReplicas(), replicas.outOfServiceReplicas(), getExpectedRedundancyNum(lastBlock)); - pendingReconstruction.remove(lastBlock); + PendingBlockInfo remove = pendingReconstruction.remove(lastBlock); + if (remove != null) { + List locations = remove.getTargets(); + DatanodeStorageInfo[] removedBlockTargets = + new DatanodeStorageInfo[locations.size()]; + locations.toArray(removedBlockTargets); + DatanodeStorageInfo.decrementBlocksScheduled(removedBlockTargets); + } // remove this block from the list of pending blocks to be deleted. for (DatanodeStorageInfo storage : targets) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java index 95f4e2c59734a..95d6825d29740 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java @@ -202,4 +202,60 @@ public void testScheduledBlocksCounterDecrementOnDeletedBlock() } } + /** + * Test Block Scheduled counter on truncating a file. + * @throws Exception + */ + @Test + public void testBlocksScheduledCounterOnTruncate() throws Exception { + final Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster.waitActive(); + BlockManager bm = cluster.getNamesystem().getBlockManager(); + try { + DistributedFileSystem dfs = cluster.getFileSystem(); + // 1. stop a datanode + cluster.stopDataNode(0); + + // 2. create a file + Path filePath = new Path("/tmp"); + DFSTestUtil.createFile(dfs, filePath, 1024, (short) 3, 0L); + + DatanodeManager datanodeManager = + cluster.getNamesystem().getBlockManager().getDatanodeManager(); + ArrayList dnList = + new ArrayList(); + datanodeManager.fetchDatanodes(dnList, dnList, false); + + // 3. restart the stopped datanode + cluster.restartDataNode(0); + + // 4. disable the heartbeats + for (DataNode dn : cluster.getDataNodes()) { + DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true); + } + + cluster.getNamesystem().writeLock(); + try { + BlockManagerTestUtil.computeAllPendingWork(bm); + BlockManagerTestUtil.updateState(bm); + assertEquals(1L, bm.getPendingReconstructionBlocksCount()); + } finally { + cluster.getNamesystem().writeUnlock(); + } + + // 5.truncate the file whose block exists in pending reconstruction + dfs.truncate(filePath, 10); + int blocksScheduled = 0; + for (DatanodeDescriptor descriptor : dnList) { + if (descriptor.getBlocksScheduled() != 0) { + blocksScheduled += descriptor.getBlocksScheduled(); + } + } + assertEquals(0, blocksScheduled); + } finally { + cluster.shutdown(); + } + } } \ No newline at end of file From f41a144077fc0e2d32072e0d088c1abd1897cee5 Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Mon, 15 Jun 2020 09:15:53 +0900 Subject: [PATCH 025/131] HDFS-15403. NPE in FileIoProvider#transferToSocketFully. Contributed by hemanthboyina. --- .../apache/hadoop/hdfs/server/datanode/FileIoProvider.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java index fc98d3a6b7a8e..cf6902912f6ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java @@ -280,7 +280,12 @@ public void transferToSocketFully( profilingEventHook.afterFileIo(volume, TRANSFER, begin, count); } catch (Exception e) { String em = e.getMessage(); - if (!em.startsWith("Broken pipe") && !em.startsWith("Connection reset")) { + if (em != null) { + if (!em.startsWith("Broken pipe") + && !em.startsWith("Connection reset")) { + onFailure(volume, begin); + } + } else { onFailure(volume, begin); } throw e; From 81d8a887b0406380e469c76ed2e41022a6372dd7 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Mon, 15 Jun 2020 10:55:26 +0900 Subject: [PATCH 026/131] SPNEGO TLS verification Signed-off-by: Akira Ajisaka --- .../org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 4caa0e91fbb54..df3f7eaf7f055 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -152,6 +152,7 @@ public class WebHdfsFileSystem extends FileSystem + "/v" + VERSION; public static final String EZ_HEADER = "X-Hadoop-Accept-EZ"; public static final String FEFINFO_HEADER = "X-Hadoop-feInfo"; + public static final String DFS_HTTP_POLICY_KEY = "dfs.http.policy"; /** * Default connection factory may be overridden in tests to use smaller @@ -181,6 +182,7 @@ public class WebHdfsFileSystem extends FileSystem private DFSOpsCountStatistics storageStatistics; private KeyProvider testProvider; + private boolean isTLSKrb; /** * Return the protocol scheme for the FileSystem. @@ -242,6 +244,7 @@ public synchronized void initialize(URI uri, Configuration conf .newDefaultURLConnectionFactory(connectTimeout, readTimeout, conf); } + this.isTLSKrb = "HTTPS_ONLY".equals(conf.get(DFS_HTTP_POLICY_KEY)); ugi = UserGroupInformation.getCurrentUser(); this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); @@ -699,6 +702,11 @@ protected HttpURLConnection connect(URL url) throws IOException { //redirect hostname and port redirectHost = null; + if (url.getProtocol().equals("http") && + UserGroupInformation.isSecurityEnabled() && + isTLSKrb) { + throw new IOException("Access denied: dfs.http.policy is HTTPS_ONLY."); + } // resolve redirects for a DN operation unless already resolved if (op.getRedirect() && !redirected) { From 730a39d1388548f22f76132a6734d61c24c3eb72 Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Tue, 16 Jun 2020 15:58:16 -0700 Subject: [PATCH 027/131] HDFS-15372. Files in snapshots no longer see attribute provider permissions. Contributed by Stephen O'Donnell. Signed-off-by: Wei-Chiu Chuang --- .../hdfs/server/namenode/FSDirectory.java | 16 ++- .../server/namenode/FSPermissionChecker.java | 46 ++++--- .../hdfs/server/namenode/INodesInPath.java | 42 +++++++ .../namenode/TestINodeAttributeProvider.java | 115 ++++++++++++++++++ 4 files changed, 199 insertions(+), 20 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 5895c6b08ec23..cd9eb0944f566 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -73,7 +73,6 @@ import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; -import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -2032,7 +2031,20 @@ INodeAttributes getAttributes(INodesInPath iip) // first empty component for the root. however file status // related calls are expected to strip out the root component according // to TestINodeAttributeProvider. - byte[][] components = iip.getPathComponents(); + // Due to HDFS-15372 the attribute provider should received the resolved + // snapshot path. Ie, rather than seeing /d/.snapshot/sn/data it should + // see /d/data. However, for the path /d/.snapshot/sn it should see this + // full path. Node.getPathComponents always resolves the path to the + // original location, so we need to check if ".snapshot/sn" is the last + // path to ensure the provider receives the correct components. + byte[][] components; + if (iip.isSnapshot() && !iip.isDotSnapshotDirPrefix()) { + // For snapshot paths, node.getPathComponents unless the last component + // is like ".snapshot/sn" + components = node.getPathComponents(); + } else { + components = iip.getPathComponents(); + } components = Arrays.copyOfRange(components, 1, components.length); nodeAttrs = ap.getAttributes(components, nodeAttrs); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index c697ead7000d2..615b164c19814 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -19,11 +19,14 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Stack; import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.ipc.CallerContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -207,7 +210,7 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, final INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.length]; final byte[][] components = inodesInPath.getPathComponents(); for (int i = 0; i < inodes.length && inodes[i] != null; i++) { - inodeAttrs[i] = getINodeAttrs(components, i, inodes[i], snapshotId); + inodeAttrs[i] = getINodeAttrs(inodes[i], snapshotId); } String path = inodesInPath.getPath(); @@ -257,8 +260,7 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, void checkPermission(INode inode, int snapshotId, FsAction access) throws AccessControlException { byte[][] pathComponents = inode.getPathComponents(); - INodeAttributes nodeAttributes = getINodeAttrs(pathComponents, - pathComponents.length - 1, inode, snapshotId); + INodeAttributes nodeAttributes = getINodeAttrs(inode, snapshotId); try { INodeAttributes[] iNodeAttr = {nodeAttributes}; AccessControlEnforcer enforcer = getAccessControlEnforcer(); @@ -367,23 +369,31 @@ public void checkPermissionWithContext( authzContext.getSubAccess(), authzContext.isIgnoreEmptyDir()); } - private INodeAttributes getINodeAttrs(byte[][] pathByNameArr, int pathIdx, - INode inode, int snapshotId) { + private INodeAttributes getINodeAttrs(INode inode, int snapshotId) { INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId); + /** + * This logic is similar to {@link FSDirectory#getAttributes()} and it + * ensures that the attribute provider sees snapshot paths resolved to their + * original location. This means the attributeProvider can apply permissions + * to the snapshot paths in the same was as the live paths. See HDFS-15372. + */ if (getAttributesProvider() != null) { - String[] elements = new String[pathIdx + 1]; /** - * {@link INode#getPathComponents(String)} returns a null component - * for the root only path "/". Assign an empty string if so. + * If we have an inode representing a path like /d/.snapshot/snap1 + * then calling inode.getPathComponents returns [null, d, snap1]. If we + * call inode.getFullPathName() it will return /d/.snapshot/snap1. For + * this special path (snapshot root) the attribute provider should see: + * + * [null, d, .snapshot/snap1] + * + * Using IIP.resolveFromRoot, it will take the inode fullPathName and + * construct an IIP object that give the correct components as above. */ - if (pathByNameArr.length == 1 && pathByNameArr[0] == null) { - elements[0] = ""; - } else { - for (int i = 0; i < elements.length; i++) { - elements[i] = DFSUtil.bytes2String(pathByNameArr[i]); - } - } - inodeAttrs = getAttributesProvider().getAttributes(elements, inodeAttrs); + INodesInPath iip = INodesInPath.resolveFromRoot(inode); + byte[][] components = iip.getPathComponents(); + components = Arrays.copyOfRange(components, 1, components.length); + inodeAttrs = getAttributesProvider() + .getAttributes(components, inodeAttrs); } return inodeAttrs; } @@ -439,7 +449,7 @@ private void checkSubAccess(byte[][] components, int pathIdx, if (!(cList.isEmpty() && ignoreEmptyDir)) { //TODO have to figure this out with inodeattribute provider INodeAttributes inodeAttr = - getINodeAttrs(components, pathIdx, d, snapshotId); + getINodeAttrs(d, snapshotId); if (!hasPermission(inodeAttr, access)) { throw new AccessControlException( toAccessControlString(inodeAttr, d.getFullPathName(), access)); @@ -457,7 +467,7 @@ private void checkSubAccess(byte[][] components, int pathIdx, if (inodeAttr.getFsPermission().getStickyBit()) { for (INode child : cList) { INodeAttributes childInodeAttr = - getINodeAttrs(components, pathIdx, child, snapshotId); + getINodeAttrs(child, snapshotId); if (isStickyBitViolated(inodeAttr, childInodeAttr)) { List allComponentList = new ArrayList<>(); for (int i = 0; i <= pathIdx; ++i) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index f072220677733..179c1c01be4fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -46,6 +46,20 @@ private static boolean isDotSnapshotDir(byte[] pathComponent) { Arrays.equals(HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent); } + /** + * Returns true if the given path component starts with the same byte stream + * as {@link HdfsConstants#DOT_SNAPSHOT_DIR}, indicating the component + * starts with a DotSnapshot directory. + * @param pathComponent Bytes representing the pathComponent + * @return True is the component starts with + * {@link HdfsConstants#DOT_SNAPSHOT_DIR} and false otherwise. + */ + private static boolean isDotSnapshotDirPrefix(byte[] pathComponent) { + return pathComponent != null && + isDotSnapshotDir(Arrays.copyOf( + pathComponent, HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES.length)); + } + private static INode[] getINodes(final INode inode) { int depth = 0, index; INode tmp = inode; @@ -135,6 +149,27 @@ static INodesInPath resolve(final INodeDirectory startingDir, return resolve(startingDir, components, false); } + /** + * Retrieves the existing INodes from a path, starting at the root directory. + * The root directory is located by following the parent link in the inode + * recursively until the final root inode is found. + * The inodes returned will depend upon the output of inode.getFullPathName(). + * For a snapshot path, like /data/.snapshot/snap1, it will be resolved to: + * [null, data, .snapshot/snap1] + * For a file in the snapshot, as inode.getFullPathName resolves the snapshot + * information, the returned inodes for a path like /data/.snapshot/snap1/d1 + * would be: + * [null, data, d1] + * @param inode the {@link INode} to be resolved + * @return INodesInPath + */ + static INodesInPath resolveFromRoot(INode inode) { + INode[] inodes = getINodes(inode); + byte[][] paths = INode.getPathComponents(inode.getFullPathName()); + INodeDirectory rootDir = inodes[0].asDirectory(); + return resolve(rootDir, paths); + } + static INodesInPath resolve(final INodeDirectory startingDir, byte[][] components, final boolean isRaw) { Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0); @@ -462,6 +497,13 @@ boolean isDotSnapshotDir() { return isDotSnapshotDir(getLastLocalName()); } + /** + * @return Return true if .snapshot is the prefix of the last path component. + */ + boolean isDotSnapshotDirPrefix() { + return isDotSnapshotDirPrefix(getLastLocalName()); + } + /** * @return if this is a /.reserved/raw path. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java index 433be79b87a28..e7e1f90b2c0f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; @@ -80,6 +81,7 @@ public void checkPermission(String fsOwner, String supergroup, ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); } CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + "|" + access); + CALLED.add("checkPermission|" + path); } @Override @@ -93,6 +95,7 @@ public void checkPermissionWithContext( CALLED.add("checkPermission|" + authzContext.getAncestorAccess() + "|" + authzContext.getParentAccess() + "|" + authzContext .getAccess()); + CALLED.add("checkPermission|" + authzContext.getPath()); } } @@ -109,7 +112,12 @@ public void stop() { @Override public INodeAttributes getAttributes(String[] pathElements, final INodeAttributes inode) { + String fullPath = String.join("/", pathElements); + if (!fullPath.startsWith("/")) { + fullPath = "/" + fullPath; + } CALLED.add("getAttributes"); + CALLED.add("getAttributes|"+fullPath); final boolean useDefault = useDefault(pathElements); final boolean useNullAcl = useNullAclFeature(pathElements); return new INodeAttributes() { @@ -485,4 +493,111 @@ public Void run() throws Exception { } }); } + + @Test + // HDFS-15372 - Attribute provider should not see the snapshot path as it + // should be resolved into the original path name before it hits the provider. + public void testAttrProviderSeesResolvedSnapahotPaths() throws Exception { + FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); + DistributedFileSystem hdfs = miniDFS.getFileSystem(); + final Path userPath = new Path("/user"); + final Path authz = new Path("/user/authz"); + final Path authzChild = new Path("/user/authz/child2"); + + fs.mkdirs(userPath); + fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION)); + fs.mkdirs(authz); + hdfs.allowSnapshot(userPath); + fs.setPermission(authz, new FsPermission(HDFS_PERMISSION)); + fs.mkdirs(authzChild); + fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION)); + fs.createSnapshot(userPath, "snapshot_1"); + UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1", + new String[]{"g1"}); + ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); + final Path snapChild = + new Path("/user/.snapshot/snapshot_1/authz/child2"); + // Run various methods on the path to access the attributes etc. + fs.getAclStatus(snapChild); + fs.getContentSummary(snapChild); + fs.getFileStatus(snapChild); + Assert.assertFalse(CALLED.contains("getAttributes|" + + snapChild.toString())); + Assert.assertTrue(CALLED.contains("getAttributes|/user/authz/child2")); + // The snapshot path should be seen by the permission checker, but when + // it checks access, the paths will be resolved so the attributeProvider + // only sees the resolved path. + Assert.assertTrue( + CALLED.contains("checkPermission|" + snapChild.toString())); + CALLED.clear(); + fs.getAclStatus(new Path("/")); + Assert.assertTrue(CALLED.contains("checkPermission|/")); + Assert.assertTrue(CALLED.contains("getAttributes|/")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user")); + Assert.assertTrue(CALLED.contains("checkPermission|/user")); + Assert.assertTrue(CALLED.contains("getAttributes|/user")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot")); + Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot")); + // attribute provider never sees the .snapshot path directly. + Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot/snapshot_1")); + Assert.assertTrue( + CALLED.contains("checkPermission|/user/.snapshot/snapshot_1")); + Assert.assertTrue( + CALLED.contains("getAttributes|/user/.snapshot/snapshot_1")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz")); + Assert.assertTrue(CALLED + .contains("checkPermission|/user/.snapshot/snapshot_1/authz")); + Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/authz")); + Assert.assertTrue(CALLED.contains("checkPermission|/user/authz")); + Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); + return null; + } + }); + // Delete the files / folders covered by the snapshot, then re-check they + // are all readable correctly. + fs.delete(authz, true); + ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot")); + Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot")); + // attribute provider never sees the .snapshot path directly. + Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot/snapshot_1")); + Assert.assertTrue( + CALLED.contains("checkPermission|/user/.snapshot/snapshot_1")); + Assert.assertTrue( + CALLED.contains("getAttributes|/user/.snapshot/snapshot_1")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz")); + Assert.assertTrue(CALLED + .contains("checkPermission|/user/.snapshot/snapshot_1/authz")); + Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); + + return null; + } + }); + + } } From fc4ebb0499fe1095b87ff782c265e9afce154266 Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Wed, 17 Jun 2020 09:26:41 +0530 Subject: [PATCH 028/131] YARN-10314. YarnClient throws NoClassDefFoundError for WebSocketException with only shaded client jars (#2075) --- .../hadoop-client-minicluster/pom.xml | 16 +++++++++++++--- .../hadoop-client-runtime/pom.xml | 11 +++++++---- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml index b447eedf1349f..f66528dc7f23c 100644 --- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml +++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml @@ -811,15 +811,25 @@ */** - + org.eclipse.jetty:jetty-client */** + + org.eclipse.jetty:jetty-xml + + */** + + + + org.eclipse.jetty:jetty-http + + */** + + diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml index fe95ed8688548..9a1efff6b1455 100644 --- a/hadoop-client-modules/hadoop-client-runtime/pom.xml +++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml @@ -158,12 +158,8 @@ com.google.code.findbugs:jsr305 io.dropwizard.metrics:metrics-core - org.eclipse.jetty.websocket:* org.eclipse.jetty:jetty-servlet org.eclipse.jetty:jetty-security - org.eclipse.jetty:jetty-client - org.eclipse.jetty:jetty-http - org.eclipse.jetty:jetty-xml org.ow2.asm:* org.bouncycastle:* @@ -213,6 +209,13 @@ about.html + + + org.eclipse.jetty.websocket:* + + about.html + + org.apache.kerby:kerb-util From c8ed33cd2a4b92618ba2bd7d2cd6cc7961690e44 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Wed, 17 Jun 2020 13:55:40 +0530 Subject: [PATCH 029/131] HADOOP-9851. dfs -chown does not like "+" plus sign in user name. Contributed by Andras Bokor. --- .../src/main/java/org/apache/hadoop/fs/FsShellPermissions.java | 2 +- .../src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java index 76e379c51f605..1a8a77723176e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java @@ -112,7 +112,7 @@ protected void processPath(PathData item) throws IOException { // used by chown/chgrp static private String allowedChars = Shell.WINDOWS ? "[-_./@a-zA-Z0-9 ]" : - "[-_./@a-zA-Z0-9]"; + "[-+_./@a-zA-Z0-9]"; /** * Used to change owner and/or group of files diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 65032514b4e5f..d3920fca569d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -1468,6 +1468,9 @@ public void testFilePermissions() throws IOException { runCmd(shell, "-chgrp", "hadoop-core@apache.org/100", file); confirmOwner(null, "hadoop-core@apache.org/100", fs, path); + + runCmd(shell, "-chown", "MYCOMPANY+user.name:hadoop", file); + confirmOwner("MYCOMPANY+user.name", "hadoop", fs, path); } /** From 5b1a56f9f1aec7d75b14a60d0c42192b04407356 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Wed, 17 Jun 2020 14:34:40 +0200 Subject: [PATCH 030/131] YARN-10281. Redundant QueuePath usage in UserGroupMappingPlacementRule and AppNameMappingPlacementRule. Contributed by Gergely Pollak --- .../AppNameMappingPlacementRule.java | 11 ++-- .../placement/QueueMapping.java | 24 +++++--- .../resourcemanager/placement/QueuePath.java | 61 ------------------- .../placement/QueuePlacementRuleUtils.java | 31 +++------- .../UserGroupMappingPlacementRule.java | 35 ++++++----- .../CapacitySchedulerConfiguration.java | 4 +- .../scheduler/capacity/TestQueueMappings.java | 27 ++++++++ 7 files changed, 73 insertions(+), 120 deletions(-) delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePath.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java index cf725b628625f..63d98ba6c4032 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java @@ -86,8 +86,6 @@ public boolean initialize(ResourceScheduler scheduler) // check if mappings refer to valid queues for (QueueMapping mapping : queueMappings) { - QueuePath queuePath = mapping.getQueuePath(); - if (isStaticQueueMapping(mapping)) { //at this point mapping.getQueueName() return only the queue name, since //the config parsing have been changed making QueueMapping more @@ -98,7 +96,7 @@ public boolean initialize(ResourceScheduler scheduler) //Try getting queue by its full path name, if it exists it is a static //leaf queue indeed, without any auto creation magic - if (queueManager.isAmbiguous(queuePath.getFullPath())) { + if (queueManager.isAmbiguous(mapping.getFullPath())) { throw new IOException( "mapping contains ambiguous leaf queue reference " + mapping .getFullPath()); @@ -110,8 +108,7 @@ public boolean initialize(ResourceScheduler scheduler) // then it should exist and // be an instance of AutoCreateEnabledParentQueue QueueMapping newMapping = - validateAndGetAutoCreatedQueueMapping(queueManager, mapping, - queuePath); + validateAndGetAutoCreatedQueueMapping(queueManager, mapping); if (newMapping == null) { throw new IOException( "mapping contains invalid or non-leaf queue " + mapping @@ -124,7 +121,7 @@ public boolean initialize(ResourceScheduler scheduler) // if its an instance of auto created leaf queue, // then extract parent queue name and update queue mapping QueueMapping newMapping = validateAndGetQueueMapping( - queueManager, queue, mapping, queuePath); + queueManager, queue, mapping); newMappings.add(newMapping); } } else { @@ -135,7 +132,7 @@ public boolean initialize(ResourceScheduler scheduler) // parent queue exists and an instance of AutoCreateEnabledParentQueue // QueueMapping newMapping = validateAndGetAutoCreatedQueueMapping( - queueManager, mapping, queuePath); + queueManager, mapping); if (newMapping != null) { newMappings.add(newMapping); } else{ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMapping.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMapping.java index 3fcb5fe6b8368..b142dd6c1008a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMapping.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMapping.java @@ -66,10 +66,20 @@ public QueueMappingBuilder parentQueue(String mappingParentQueue) { return this; } - public QueueMappingBuilder queuePath(QueuePath path) { - this.queue = path.getLeafQueue(); - this.parentQueue = path.getParentQueue(); - return this; + public QueueMappingBuilder parsePathString(String queuePath) { + int parentQueueNameEndIndex = queuePath.lastIndexOf(DOT); + + if (parentQueueNameEndIndex > -1) { + final String parentQueue = + queuePath.substring(0, parentQueueNameEndIndex).trim(); + final String leafQueue = + queuePath.substring(parentQueueNameEndIndex + 1).trim(); + return this + .parentQueue(parentQueue) + .queue(leafQueue); + } + + return this.queue(queuePath); } public QueueMapping build() { @@ -138,12 +148,6 @@ public String getFullPath() { return fullPath; } - public QueuePath getQueuePath() { - //This is to make sure the parsing is the same everywhere, but the - //whole parsing part should be moved to QueuePathConstructor - return QueuePlacementRuleUtils.extractQueuePath(getFullPath()); - } - @Override public int hashCode() { final int prime = 31; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePath.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePath.java deleted file mode 100644 index e02cf58145b89..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePath.java +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.server.resourcemanager.placement; - -import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.DOT; - -public class QueuePath { - - private String parentQueue; - private String leafQueue; - private String fullPath; - - public QueuePath(final String leafQueue) { - //if the queue does not have a parent, the full path == leaf queue name - this.leafQueue = leafQueue; - this.fullPath = leafQueue; - } - - public QueuePath(final String parentQueue, final String leafQueue) { - this.parentQueue = parentQueue; - this.leafQueue = leafQueue; - this.fullPath = parentQueue + DOT + leafQueue; - } - - public String getParentQueue() { - return parentQueue; - } - - public String getLeafQueue() { - return leafQueue; - } - - public boolean hasParentQueue() { - return parentQueue != null; - } - - public String getFullPath() { - return fullPath; - } - - @Override - public String toString() { - return fullPath; - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java index 350f2b93d8ce5..15c8fd8b70811 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java @@ -66,18 +66,19 @@ public static void validateQueueMappingUnderParentQueue( } public static QueueMapping validateAndGetAutoCreatedQueueMapping( - CapacitySchedulerQueueManager queueManager, QueueMapping mapping, - QueuePath queuePath) throws IOException { - if (queuePath.hasParentQueue()) { + CapacitySchedulerQueueManager queueManager, QueueMapping mapping) + throws IOException { + if (mapping.hasParentQueue()) { //if parent queue is specified, // then it should exist and be an instance of ManagedParentQueue validateQueueMappingUnderParentQueue(queueManager.getQueue( - queuePath.getParentQueue()), queuePath.getParentQueue(), - queuePath.getFullPath()); + mapping.getParentQueue()), mapping.getParentQueue(), + mapping.getFullPath()); return QueueMapping.QueueMappingBuilder.create() .type(mapping.getType()) .source(mapping.getSource()) - .queuePath(queuePath) + .parentQueue(mapping.getParentQueue()) + .queue(mapping.getQueue()) .build(); } @@ -86,7 +87,7 @@ public static QueueMapping validateAndGetAutoCreatedQueueMapping( public static QueueMapping validateAndGetQueueMapping( CapacitySchedulerQueueManager queueManager, CSQueue queue, - QueueMapping mapping, QueuePath queuePath) throws IOException { + QueueMapping mapping) throws IOException { if (!(queue instanceof LeafQueue)) { throw new IOException( "mapping contains invalid or non-leaf queue : " + @@ -97,7 +98,7 @@ public static QueueMapping validateAndGetQueueMapping( .getParent() instanceof ManagedParentQueue) { QueueMapping newMapping = validateAndGetAutoCreatedQueueMapping( - queueManager, mapping, queuePath); + queueManager, mapping); if (newMapping == null) { throw new IOException( "mapping contains invalid or non-leaf queue " + @@ -114,20 +115,6 @@ public static boolean isStaticQueueMapping(QueueMapping mapping) { && !mapping.getQueue().contains(SECONDARY_GROUP_MAPPING); } - public static QueuePath extractQueuePath(String queuePath) { - int parentQueueNameEndIndex = queuePath.lastIndexOf(DOT); - - if (parentQueueNameEndIndex > -1) { - final String parentQueue = queuePath.substring(0, parentQueueNameEndIndex) - .trim(); - final String leafQueue = queuePath.substring(parentQueueNameEndIndex + 1) - .trim(); - return new QueuePath(parentQueue, leafQueue); - } - - return new QueuePath(queuePath); - } - public static ApplicationPlacementContext getPlacementContext( QueueMapping mapping, CapacitySchedulerQueueManager queueManager) throws IOException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java index 643fc5015db7e..0e8cb9cc047e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java @@ -386,7 +386,6 @@ public boolean initialize(ResourceScheduler scheduler) //at this point mapping.getQueueName() return only the queue name, since //the config parsing have been changed making QueueMapping more consistent - QueuePath queuePath = mapping.getQueuePath(); if (isStaticQueueMapping(mapping)) { //Try getting queue by its full path name, if it exists it is a static //leaf queue indeed, without any auto creation magic @@ -407,7 +406,7 @@ public boolean initialize(ResourceScheduler scheduler) // then it should exist and // be an instance of AutoCreateEnabledParentQueue QueueMapping newMapping = validateAndGetAutoCreatedQueueMapping( - queueManager, mapping, queuePath); + queueManager, mapping); if (newMapping == null) { throw new IOException( "mapping contains invalid or non-leaf queue " + mapping @@ -420,7 +419,7 @@ public boolean initialize(ResourceScheduler scheduler) // if its an instance of auto created leaf queue, // then extract parent queue name and update queue mapping QueueMapping newMapping = validateAndGetQueueMapping(queueManager, - queue, mapping, queuePath); + queue, mapping); newMappings.add(newMapping); } } else{ @@ -431,7 +430,7 @@ public boolean initialize(ResourceScheduler scheduler) // parent queue exists and an instance of AutoCreateEnabledParentQueue // QueueMapping newMapping = validateAndGetAutoCreatedQueueMapping( - queueManager, mapping, queuePath); + queueManager, mapping); if (newMapping != null) { newMappings.add(newMapping); } else{ @@ -453,7 +452,7 @@ public boolean initialize(ResourceScheduler scheduler) private static QueueMapping validateAndGetQueueMapping( CapacitySchedulerQueueManager queueManager, CSQueue queue, - QueueMapping mapping, QueuePath queuePath) throws IOException { + QueueMapping mapping) throws IOException { if (!(queue instanceof LeafQueue)) { throw new IOException( "mapping contains invalid or non-leaf queue : " + @@ -464,7 +463,7 @@ private static QueueMapping validateAndGetQueueMapping( .getParent() instanceof ManagedParentQueue) { QueueMapping newMapping = validateAndGetAutoCreatedQueueMapping( - queueManager, mapping, queuePath); + queueManager, mapping); if (newMapping == null) { throw new IOException( "mapping contains invalid or non-leaf queue " @@ -480,29 +479,29 @@ private static boolean ifQueueDoesNotExist(CSQueue queue) { } private static QueueMapping validateAndGetAutoCreatedQueueMapping( - CapacitySchedulerQueueManager queueManager, QueueMapping mapping, - QueuePath queuePath) throws IOException { - if (queuePath.hasParentQueue() - && (queuePath.getParentQueue().equals(PRIMARY_GROUP_MAPPING) - || queuePath.getParentQueue().equals(SECONDARY_GROUP_MAPPING))) { + CapacitySchedulerQueueManager queueManager, QueueMapping mapping) + throws IOException { + if (mapping.hasParentQueue() + && (mapping.getParentQueue().equals(PRIMARY_GROUP_MAPPING) + || mapping.getParentQueue().equals(SECONDARY_GROUP_MAPPING))) { // dynamic parent queue return QueueMappingBuilder.create() .type(mapping.getType()) .source(mapping.getSource()) - .queue(queuePath.getLeafQueue()) - .parentQueue(queuePath.getParentQueue()) + .queue(mapping.getQueue()) + .parentQueue(mapping.getParentQueue()) .build(); - } else if (queuePath.hasParentQueue()) { + } else if (mapping.hasParentQueue()) { //if parent queue is specified, // then it should exist and be an instance of ManagedParentQueue QueuePlacementRuleUtils.validateQueueMappingUnderParentQueue( - queueManager.getQueue(queuePath.getParentQueue()), - queuePath.getParentQueue(), queuePath.getLeafQueue()); + queueManager.getQueue(mapping.getParentQueue()), + mapping.getParentQueue(), mapping.getQueue()); return QueueMappingBuilder.create() .type(mapping.getType()) .source(mapping.getSource()) - .queue(queuePath.getLeafQueue()) - .parentQueue(queuePath.getParentQueue()) + .queue(mapping.getQueue()) + .parentQueue(mapping.getParentQueue()) .build(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 7f4150fab1656..496dd0b290d49 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -1060,7 +1060,7 @@ public List getQueueMappingEntity( QueueMapping m = QueueMapping.QueueMappingBuilder.create() .type(QueueMapping.MappingType.APPLICATION) .source(mapping[0]) - .queuePath(QueuePlacementRuleUtils.extractQueuePath(mapping[1])) + .parsePathString(mapping[1]) .build(); mappings.add(m); } @@ -1136,7 +1136,7 @@ public List getQueueMappings() { m = QueueMappingBuilder.create() .type(mappingType) .source(mapping[1]) - .queuePath(QueuePlacementRuleUtils.extractQueuePath(mapping[2])) + .parsePathString(mapping[2]) .build(); } catch (Throwable t) { throw new IllegalArgumentException( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java index 2e7009eae65c0..039b9da8aaa52 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java @@ -100,6 +100,33 @@ public void testQueueMappingTrimSpaces() throws IOException { .build()); } + @Test + public void testQueueMappingPathParsing() { + QueueMapping leafOnly = QueueMapping.QueueMappingBuilder.create() + .parsePathString("leaf") + .build(); + + Assert.assertEquals("leaf", leafOnly.getQueue()); + Assert.assertEquals(null, leafOnly.getParentQueue()); + Assert.assertEquals("leaf", leafOnly.getFullPath()); + + QueueMapping twoLevels = QueueMapping.QueueMappingBuilder.create() + .parsePathString("root.leaf") + .build(); + + Assert.assertEquals("leaf", twoLevels.getQueue()); + Assert.assertEquals("root", twoLevels.getParentQueue()); + Assert.assertEquals("root.leaf", twoLevels.getFullPath()); + + QueueMapping deep = QueueMapping.QueueMappingBuilder.create() + .parsePathString("root.a.b.c.d.e.leaf") + .build(); + + Assert.assertEquals("leaf", deep.getQueue()); + Assert.assertEquals("root.a.b.c.d.e", deep.getParentQueue()); + Assert.assertEquals("root.a.b.c.d.e.leaf", deep.getFullPath()); + } + @Test (timeout = 60000) public void testQueueMappingParsingInvalidCases() throws Exception { // configuration parsing tests - negative test cases From 2bfb22840acc9f96a8bdec1ef82da37d06937da8 Mon Sep 17 00:00:00 2001 From: Mehakmeet Singh Date: Wed, 17 Jun 2020 20:45:26 +0530 Subject: [PATCH 031/131] HADOOP-17020. Improve RawFileSystem Performance (#2063) Contributed by : Mehakmeet Singh Co-authored-by: Rajesh Balamohan Co-authored-by: Mehakmeet Singh --- .../org/apache/hadoop/fs/RawLocalFileSystem.java | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index cf2210575da15..72eeb99a4ea5d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -64,6 +64,7 @@ public class RawLocalFileSystem extends FileSystem { static final URI NAME = URI.create("file:///"); private Path workingDir; + private long defaultBlockSize; // Temporary workaround for HADOOP-9652. private static boolean useDeprecatedFileStatus = true; @@ -100,6 +101,7 @@ public File pathToFile(Path path) { public void initialize(URI uri, Configuration conf) throws IOException { super.initialize(uri, conf); setConf(conf); + defaultBlockSize = getDefaultBlockSize(new Path(uri)); } /******************************************************* @@ -518,7 +520,12 @@ public FileStatus[] listStatus(Path f) throws IOException { } return new FileStatus[] { new DeprecatedRawLocalFileStatus(localf, - getDefaultBlockSize(f), this) }; + defaultBlockSize, this) }; + } + + @Override + public boolean exists(Path f) throws IOException { + return pathToFile(f).exists(); } protected boolean mkOneDir(File p2f) throws IOException { @@ -663,7 +670,7 @@ private FileStatus deprecatedGetFileStatus(Path f) throws IOException { File path = pathToFile(f); if (path.exists()) { return new DeprecatedRawLocalFileStatus(pathToFile(f), - getDefaultBlockSize(f), this); + defaultBlockSize, this); } else { throw new FileNotFoundException("File " + f + " does not exist"); } @@ -1051,7 +1058,7 @@ private FileStatus deprecatedGetFileLinkStatusInternal(final Path f) private FileStatus getNativeFileLinkStatus(final Path f, boolean dereference) throws IOException { checkPath(f); - Stat stat = new Stat(f, getDefaultBlockSize(f), dereference, this); + Stat stat = new Stat(f, defaultBlockSize, dereference, this); FileStatus status = stat.getFileStatus(); return status; } From 89689c52c39cdcc498d04508dbd235c6036ec17c Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Wed, 17 Jun 2020 09:04:26 -0700 Subject: [PATCH 032/131] YARN-10308. Update javadoc and variable names for YARN service. Contributed by Bilwa S T via eyang --- .../api/records/KerberosPrincipal.java | 20 ++++++++++--------- .../yarn/service/client/ServiceClient.java | 10 +++++----- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/KerberosPrincipal.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/KerberosPrincipal.java index 0ff4daa6b87fd..27125fbedc385 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/KerberosPrincipal.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/KerberosPrincipal.java @@ -71,8 +71,9 @@ public KerberosPrincipal keytab(String keytab) { } /** - * The URI of the kerberos keytab. It supports two schemes \" - * hdfs\" and \"file\". If the URI starts with \" + * The URI of the kerberos keytab. It supports hadoop supported schemes + * like \"hdfs\" \"file\" \"s3\" + * \"viewfs\" etc.If the URI starts with \" * hdfs://\" scheme, it indicates the path on hdfs where the keytab is * stored. The keytab will be localized by YARN and made available to AM in * its local directory. If the URI starts with \"file://\" @@ -81,13 +82,14 @@ public KerberosPrincipal keytab(String keytab) { * * @return keytab **/ - @ApiModelProperty(value = "The URI of the kerberos keytab. It supports two " + - "schemes \"hdfs\" and \"file\". If the URI starts with \"hdfs://\" " + - "scheme, it indicates the path on hdfs where the keytab is stored. The " + - "keytab will be localized by YARN and made available to AM in its local" + - " directory. If the URI starts with \"file://\" scheme, it indicates a " + - "path on the local host where the keytab is presumbaly installed by " + - "admins upfront. ") + @ApiModelProperty(value = "The URI of the kerberos keytab. It supports" + + " Hadoop supported filesystem types like \"hdfs\", \"file\"," + + " \"viewfs\", \"s3\" etc.If the URI starts with \"hdfs://\" scheme, " + + "it indicates the path on hdfs where the keytab is stored. The " + + "keytab will be localized by YARN and made available to AM in its local" + + " directory. If the URI starts with \"file://\" scheme, it indicates a " + + "path on the local host where the keytab is presumbaly installed by " + + "admins upfront. ") public String getKeytab() { return keytab; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index b3ac7bbe748eb..41d1e423946d8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -1478,18 +1478,18 @@ private void addKeytabResourceIfSecure(SliderFileSystem fileSystem, if ("file".equals(keytabURI.getScheme())) { LOG.info("Using a keytab from localhost: " + keytabURI); } else { - Path keytabOnhdfs = new Path(keytabURI); - if (!fileSystem.getFileSystem().exists(keytabOnhdfs)) { + Path keytabPath = new Path(keytabURI); + if (!fileSystem.getFileSystem().exists(keytabPath)) { LOG.warn(service.getName() + "'s keytab (principalName = " - + principalName + ") doesn't exist at: " + keytabOnhdfs); + + principalName + ") doesn't exist at: " + keytabPath); return; } - LocalResource keytabRes = fileSystem.createAmResource(keytabOnhdfs, + LocalResource keytabRes = fileSystem.createAmResource(keytabPath, LocalResourceType.FILE, LocalResourceVisibility.PRIVATE); localResource.put(String.format(YarnServiceConstants.KEYTAB_LOCATION, service.getName()), keytabRes); LOG.info("Adding " + service.getName() + "'s keytab for " - + "localization, uri = " + keytabOnhdfs); + + "localization, uri = " + keytabPath); } } From caf3995ac2bbc3241896babb9a607272462f70ca Mon Sep 17 00:00:00 2001 From: Thomas Marquardt Date: Wed, 17 Jun 2020 23:12:22 +0000 Subject: [PATCH 033/131] HADOOP-17076: ABFS: Delegation SAS Generator Updates Contributed by Thomas Marquardt. DETAILS: 1) The authentication version in the service has been updated from Dec19 to Feb20, so need to update the client. 2) Add support and test cases for getXattr and setXAttr. 3) Update DelegationSASGenerator and related to use Duration instead of int for time periods. 4) Cleanup DelegationSASGenerator switch/case statement that maps operations to permissions. 5) Cleanup SASGenerator classes to use String.equals instead of ==. TESTS: Added tests for getXAttr and setXAttr. All tests are passing against my account in eastus2euap: $mvn -T 1C -Dparallel-tests=abfs -Dscale -DtestsThreadCount=8 clean verify Tests run: 76, Failures: 0, Errors: 0, Skipped: 0 Tests run: 441, Failures: 0, Errors: 0, Skipped: 33 Tests run: 206, Failures: 0, Errors: 0, Skipped: 24 --- .../azurebfs/extensions/SASTokenProvider.java | 2 +- ...ITestAzureBlobFileSystemDelegationSAS.java | 16 ++++++++++ .../MockDelegationSASTokenProvider.java | 4 +-- .../utils/DelegationSASGenerator.java | 32 +++++++++---------- .../fs/azurebfs/utils/SASGenerator.java | 8 +++-- .../azurebfs/utils/ServiceSASGenerator.java | 6 ++-- 6 files changed, 42 insertions(+), 26 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/extensions/SASTokenProvider.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/extensions/SASTokenProvider.java index 2cd44f1b90715..a2cd292b0b230 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/extensions/SASTokenProvider.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/extensions/SASTokenProvider.java @@ -33,6 +33,7 @@ public interface SASTokenProvider { String CHECK_ACCESS_OPERATION = "check-access"; + String CREATE_DIRECTORY_OPERATION = "create-directory"; String CREATE_FILE_OPERATION = "create-file"; String DELETE_OPERATION = "delete"; String DELETE_RECURSIVE_OPERATION = "delete-recursive"; @@ -40,7 +41,6 @@ public interface SASTokenProvider { String GET_STATUS_OPERATION = "get-status"; String GET_PROPERTIES_OPERATION = "get-properties"; String LIST_OPERATION = "list"; - String CREATE_DIRECTORY_OPERATION = "create-directory"; String READ_OPERATION = "read"; String RENAME_SOURCE_OPERATION = "rename-source"; String RENAME_DESTINATION_OPERATION = "rename-destination"; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java index 07b5804d1113d..c2c691e2f6231 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java @@ -25,6 +25,7 @@ import java.util.List; import java.util.UUID; +import org.junit.Assert; import org.junit.Assume; import org.junit.Test; import org.slf4j.Logger; @@ -365,4 +366,19 @@ public void testRootPath() throws Exception { } assertEquals(0, count); } + + @Test + // Test filesystem operations getXAttr and setXAttr + public void testProperties() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + Path reqPath = new Path(UUID.randomUUID().toString()); + + fs.create(reqPath).close(); + + final String propertyName = "user.mime_type"; + final byte[] propertyValue = "text/plain".getBytes("utf-8"); + fs.setXAttr(reqPath, propertyName, propertyValue); + + assertArrayEquals(propertyValue, fs.getXAttr(reqPath, propertyName)); + } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/MockDelegationSASTokenProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/MockDelegationSASTokenProvider.java index fa50bef454b02..121256c4dbcf7 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/MockDelegationSASTokenProvider.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/MockDelegationSASTokenProvider.java @@ -55,8 +55,8 @@ public void initialize(Configuration configuration, String accountName) throws I String appSecret = configuration.get(TestConfigurationKeys.FS_AZURE_TEST_APP_SECRET); String sktid = configuration.get(TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_TENANT_ID); String skoid = configuration.get(TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_OBJECT_ID); - String skt = SASGenerator.ISO_8601_FORMATTER.format(Instant.now().minusSeconds(SASGenerator.FIVE_MINUTES)); - String ske = SASGenerator.ISO_8601_FORMATTER.format(Instant.now().plusSeconds(SASGenerator.ONE_DAY)); + String skt = SASGenerator.ISO_8601_FORMATTER.format(Instant.now().minus(SASGenerator.FIVE_MINUTES)); + String ske = SASGenerator.ISO_8601_FORMATTER.format(Instant.now().plus(SASGenerator.ONE_DAY)); String skv = SASGenerator.AuthenticationVersion.Dec19.toString(); byte[] key = getUserDelegationKey(accountName, appID, appSecret, sktid, skt, ske, skv); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/DelegationSASGenerator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/DelegationSASGenerator.java index f84ed6ab4c777..6f2209a6e8ced 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/DelegationSASGenerator.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/DelegationSASGenerator.java @@ -29,12 +29,12 @@ * Test Delegation SAS generator. */ public class DelegationSASGenerator extends SASGenerator { - private String skoid; - private String sktid; - private String skt; - private String ske; + private final String skoid; + private final String sktid; + private final String skt; + private final String ske; private final String sks = "b"; - private String skv; + private final String skv; public DelegationSASGenerator(byte[] userDelegationKey, String skoid, String sktid, String skt, String ske, String skv) { super(userDelegationKey); @@ -48,20 +48,18 @@ public DelegationSASGenerator(byte[] userDelegationKey, String skoid, String skt public String getDelegationSAS(String accountName, String containerName, String path, String operation, String saoid, String suoid, String scid) { - final String sv = AuthenticationVersion.Dec19.toString(); - final String st = ISO_8601_FORMATTER.format(Instant.now().minusSeconds(FIVE_MINUTES)); - final String se = ISO_8601_FORMATTER.format(Instant.now().plusSeconds(ONE_DAY)); + final String sv = AuthenticationVersion.Feb20.toString(); + final String st = ISO_8601_FORMATTER.format(Instant.now().minus(FIVE_MINUTES)); + final String se = ISO_8601_FORMATTER.format(Instant.now().plus(ONE_DAY)); String sr = "b"; String sdd = null; - String sp = null; + String sp; switch (operation) { - case SASTokenProvider.CHECK_ACCESS_OPERATION: - sp = "e"; - break; - case SASTokenProvider.WRITE_OPERATION: case SASTokenProvider.CREATE_FILE_OPERATION: case SASTokenProvider.CREATE_DIRECTORY_OPERATION: + case SASTokenProvider.WRITE_OPERATION: + case SASTokenProvider.SET_PROPERTIES_OPERATION: sp = "w"; break; case SASTokenProvider.DELETE_OPERATION: @@ -72,6 +70,7 @@ public String getDelegationSAS(String accountName, String containerName, String sr = "d"; sdd = Integer.toString(StringUtils.countMatches(path, "/")); break; + case SASTokenProvider.CHECK_ACCESS_OPERATION: case SASTokenProvider.GET_ACL_OPERATION: case SASTokenProvider.GET_STATUS_OPERATION: sp = "e"; @@ -79,6 +78,7 @@ public String getDelegationSAS(String accountName, String containerName, String case SASTokenProvider.LIST_OPERATION: sp = "l"; break; + case SASTokenProvider.GET_PROPERTIES_OPERATION: case SASTokenProvider.READ_OPERATION: sp = "r"; break; @@ -87,14 +87,12 @@ public String getDelegationSAS(String accountName, String containerName, String sp = "m"; break; case SASTokenProvider.SET_ACL_OPERATION: + case SASTokenProvider.SET_PERMISSION_OPERATION: sp = "p"; break; case SASTokenProvider.SET_OWNER_OPERATION: sp = "o"; break; - case SASTokenProvider.SET_PERMISSION_OPERATION: - sp = "p"; - break; default: throw new IllegalArgumentException(operation); } @@ -146,7 +144,7 @@ private String computeSignatureForSAS(String sp, String st, String se, String sv sb.append(accountName); sb.append("/"); sb.append(containerName); - if (path != null && sr != "c") { + if (path != null && !sr.equals("c")) { sb.append(path); } sb.append("\n"); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/SASGenerator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/SASGenerator.java index 34d504a9a0732..2e9289d8d44c7 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/SASGenerator.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/SASGenerator.java @@ -20,6 +20,7 @@ import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; +import java.time.Duration; import java.time.format.DateTimeFormatter; import java.time.ZoneId; import java.util.Locale; @@ -35,7 +36,8 @@ public abstract class SASGenerator { public enum AuthenticationVersion { Nov18("2018-11-09"), - Dec19("2019-12-12"); + Dec19("2019-12-12"), + Feb20("2020-02-10"); private final String ver; @@ -50,8 +52,8 @@ public String toString() { } protected static final Logger LOG = LoggerFactory.getLogger(SASGenerator.class); - public static final int FIVE_MINUTES = 5 * 60; - public static final int ONE_DAY = 24 * 60 * 60; + public static final Duration FIVE_MINUTES = Duration.ofMinutes(5); + public static final Duration ONE_DAY = Duration.ofDays(1); public static final DateTimeFormatter ISO_8601_FORMATTER = DateTimeFormatter .ofPattern("yyyy-MM-dd'T'HH:mm:ss'Z'", Locale.ROOT) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/ServiceSASGenerator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/ServiceSASGenerator.java index a76681b599be3..3d8496eff81e2 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/ServiceSASGenerator.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/ServiceSASGenerator.java @@ -40,8 +40,8 @@ public String getContainerSASWithFullControl(String accountName, String containe String sp = "rcwdl"; String sv = AuthenticationVersion.Nov18.toString(); String sr = "c"; - String st = ISO_8601_FORMATTER.format(Instant.now().minusSeconds(FIVE_MINUTES)); - String se = ISO_8601_FORMATTER.format(Instant.now().plusSeconds(ONE_DAY)); + String st = ISO_8601_FORMATTER.format(Instant.now().minus(FIVE_MINUTES)); + String se = ISO_8601_FORMATTER.format(Instant.now().plus(ONE_DAY)); String signature = computeSignatureForSAS(sp, st, se, sv, "c", accountName, containerName, null); @@ -71,7 +71,7 @@ private String computeSignatureForSAS(String sp, String st, String se, String sv sb.append(accountName); sb.append("/"); sb.append(containerName); - if (path != null && sr != "c") { + if (path != null && !sr.equals("c")) { //sb.append("/"); sb.append(path); } From 9cbd76cc775b58dfedb943f971b3307ec5702f13 Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Thu, 18 Jun 2020 13:33:25 +0800 Subject: [PATCH 034/131] HDFS-15346. FedBalance tool implementation. Contributed by Jinglun. --- .../resources/assemblies/hadoop-tools.xml | 15 + .../hadoop/hdfs/protocol/HdfsConstants.java | 2 + hadoop-project/pom.xml | 17 + .../hadoop-federation-balance/pom.xml | 249 +++++++ .../fedbalance/DistCpBalanceOptions.java | 95 +++ .../tools/fedbalance/DistCpProcedure.java | 635 ++++++++++++++++++ .../hadoop/tools/fedbalance/FedBalance.java | 377 +++++++++++ .../tools/fedbalance/FedBalanceConfigs.java | 19 +- .../tools/fedbalance/FedBalanceContext.java | 286 ++++++++ .../tools/fedbalance/MountTableProcedure.java | 244 +++++++ .../tools/fedbalance/TrashProcedure.java | 112 +++ .../hadoop/tools/fedbalance/package-info.java | 25 + .../fedbalance}/procedure/BalanceJob.java | 2 +- .../fedbalance}/procedure/BalanceJournal.java | 2 +- .../procedure/BalanceJournalInfoHDFS.java | 8 +- .../procedure/BalanceProcedure.java | 4 +- .../procedure/BalanceProcedureScheduler.java | 8 +- .../fedbalance}/procedure/package-info.java | 2 +- .../hadoop-federation-balance.sh | 38 ++ .../tools/fedbalance/TestDistCpProcedure.java | 446 ++++++++++++ .../fedbalance/TestMountTableProcedure.java | 222 ++++++ .../tools/fedbalance/TestTrashProcedure.java | 102 +++ .../procedure/MultiPhaseProcedure.java | 2 +- .../procedure/RecordProcedure.java | 2 +- .../fedbalance}/procedure/RetryProcedure.java | 2 +- .../TestBalanceProcedureScheduler.java | 7 +- .../procedure/UnrecoverableProcedure.java | 2 +- .../fedbalance}/procedure/WaitProcedure.java | 2 +- hadoop-tools/hadoop-tools-dist/pom.xml | 5 + hadoop-tools/pom.xml | 1 + 30 files changed, 2907 insertions(+), 26 deletions(-) create mode 100644 hadoop-tools/hadoop-federation-balance/pom.xml create mode 100644 hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/DistCpBalanceOptions.java create mode 100644 hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/DistCpProcedure.java create mode 100644 hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalance.java rename hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedureConfigKeys.java => hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceConfigs.java (72%) create mode 100644 hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceContext.java create mode 100644 hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/MountTableProcedure.java create mode 100644 hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/TrashProcedure.java create mode 100644 hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/package-info.java rename {hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs => hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance}/procedure/BalanceJob.java (99%) rename {hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs => hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance}/procedure/BalanceJournal.java (96%) rename {hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs => hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance}/procedure/BalanceJournalInfoHDFS.java (95%) rename {hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs => hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance}/procedure/BalanceProcedure.java (97%) rename {hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs => hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance}/procedure/BalanceProcedureScheduler.java (97%) rename {hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs => hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance}/procedure/package-info.java (95%) create mode 100644 hadoop-tools/hadoop-federation-balance/src/main/shellprofile.d/hadoop-federation-balance.sh create mode 100644 hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestDistCpProcedure.java create mode 100644 hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestMountTableProcedure.java create mode 100644 hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestTrashProcedure.java rename {hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs => hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance}/procedure/MultiPhaseProcedure.java (97%) rename {hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs => hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance}/procedure/RecordProcedure.java (96%) rename {hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs => hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance}/procedure/RetryProcedure.java (97%) rename {hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs => hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance}/procedure/TestBalanceProcedureScheduler.java (98%) rename {hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs => hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance}/procedure/UnrecoverableProcedure.java (96%) rename {hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs => hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance}/procedure/WaitProcedure.java (97%) diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml index 054d8c0ace2bd..db744f511dadb 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml @@ -47,6 +47,14 @@ /libexec/shellprofile.d 0755 + + ../hadoop-federation-balance/src/main/shellprofile.d + + * + + /libexec/shellprofile.d + 0755 + ../hadoop-extras/src/main/shellprofile.d @@ -111,6 +119,13 @@ *-sources.jar + + ../hadoop-federation-balance/target + /share/hadoop/${hadoop.component}/sources + + *-sources.jar + + ../hadoop-extras/target /share/hadoop/${hadoop.component}/sources diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index ab61e504502d1..a025b9bad2e69 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -105,6 +105,8 @@ public byte value() { public static final String DOT_SNAPSHOT_DIR = ".snapshot"; public static final String SEPARATOR_DOT_SNAPSHOT_DIR = Path.SEPARATOR + DOT_SNAPSHOT_DIR; + public static final String DOT_SNAPSHOT_DIR_SEPARATOR = + DOT_SNAPSHOT_DIR + Path.SEPARATOR; public static final String SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR = Path.SEPARATOR + DOT_SNAPSHOT_DIR + Path.SEPARATOR; public final static String DOT_RESERVED_STRING = ".reserved"; diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index f3a3d76a6420b..48928b508e318 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -327,6 +327,12 @@ ${hadoop.version} test-jar + + org.apache.hadoop + hadoop-hdfs-rbf + ${hadoop.version} + test-jar + org.apache.hadoop hadoop-mapreduce-client-app @@ -578,6 +584,17 @@ ${hadoop.version} test-jar + + org.apache.hadoop + hadoop-federation-balance + ${hadoop.version} + + + org.apache.hadoop + hadoop-federation-balance + ${hadoop.version} + test-jar + org.apache.hadoop hadoop-datajoin diff --git a/hadoop-tools/hadoop-federation-balance/pom.xml b/hadoop-tools/hadoop-federation-balance/pom.xml new file mode 100644 index 0000000000000..cf79e17c5ad5c --- /dev/null +++ b/hadoop-tools/hadoop-federation-balance/pom.xml @@ -0,0 +1,249 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-project + 3.4.0-SNAPSHOT + ../../hadoop-project + + hadoop-federation-balance + 3.4.0-SNAPSHOT + Apache Hadoop Federation Balance + Apache Hadoop Federation Balance + jar + + + UTF-8 + true + + + + + junit + junit + test + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.hadoop + hadoop-annotations + provided + + + org.apache.hadoop + hadoop-mapreduce-client-app + test + + + org.apache.hadoop + hadoop-mapreduce-client-hs + test + + + org.apache.hadoop + hadoop-mapreduce-client-core + provided + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + provided + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + test + test-jar + + + org.apache.hadoop + hadoop-hdfs-client + provided + + + org.apache.hadoop + hadoop-hdfs + provided + + + org.apache.hadoop + hadoop-hdfs + test + test-jar + + + org.apache.hadoop + hadoop-distcp + provided + + + org.apache.hadoop + hadoop-common + test + test-jar + + + org.apache.hadoop + hadoop-hdfs-rbf + provided + + + org.apache.hadoop + hadoop-hdfs-rbf + test + test-jar + + + org.mockito + mockito-core + test + + + org.assertj + assertj-core + test + + + org.apache.hadoop + hadoop-minicluster + provided + + + + + + + src/main/resources + true + + + + + src/test/resources + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + + ${ignoreTestFailure} + 1 + false + 600 + -Xmx1024m + + **/Test*.java + + true + + + test.build.data + ${basedir}/target/test/data + + + hadoop.log.dir + target/test/logs + + + org.apache.commons.logging.Log + org.apache.commons.logging.impl.SimpleLog + + + org.apache.commons.logging.simplelog.defaultlog + warn + + + + + + maven-dependency-plugin + + + package + + copy-dependencies + + + ${project.build.directory}/lib + + + + deplist + compile + + list + + + + ${project.basedir}/target/hadoop-tools-deps/${project.artifactId}.tools-builtin.txt + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + org.apache.hadoop.tools.fedbalance.FedBalance + + + + + + prepare-jar + prepare-package + + jar + + + + prepare-test-jar + prepare-package + + test-jar + + + + + + org.apache.maven.plugins + maven-source-plugin + + true + + + + + jar + + + + + + + diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/DistCpBalanceOptions.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/DistCpBalanceOptions.java new file mode 100644 index 0000000000000..704ffd9dccf26 --- /dev/null +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/DistCpBalanceOptions.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tools.fedbalance; + +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; + +/** + * Command line options of FedBalance. + */ +public final class DistCpBalanceOptions { + + /** + * The private construct protects this class from being instantiated. + */ + private DistCpBalanceOptions() {} + + /** + * Run in router-based federation mode. + */ + final static Option ROUTER = new Option("router", false, + "If `true` the command runs in router mode. The source path is " + + "taken as a mount point. It will disable write by setting the mount" + + " point readonly. Otherwise the command works in normal federation" + + " mode. The source path is taken as the full path. It will disable" + + " write by cancelling all permissions of the source path. The" + + " default value is `true`."); + + /** + * If true, in DIFF_DISTCP stage it will force close all open files when + * there is no diff between the source path and the dst path. Otherwise + * the DIFF_DISTCP stage will wait until there is no open files. The + * default value is `false`. + */ + final static Option FORCE_CLOSE_OPEN = new Option("forceCloseOpen", false, + "Force close all open files if the src and dst are synced."); + + /** + * Max number of maps to use during copy. DistCp will split work as equally + * as possible among these maps. + */ + final static Option MAP = + new Option("map", true, "Max number of concurrent maps to use for copy"); + + /** + * Specify bandwidth per map in MB, accepts bandwidth as a fraction. + */ + final static Option BANDWIDTH = + new Option("bandwidth", true, "Specify bandwidth per map in MB."); + + /** + * Specify the delayed duration(millie seconds) to retry the Job. + */ + final static Option DELAY_DURATION = new Option("delay", true, + "This specifies the delayed duration(millie seconds) when the job" + + " needs to retry. A job may retry many times and check the state" + + " when it waits for the distcp job to finish."); + + /** + * Move the source path to trash after all the data are sync to target, or + * delete the source directly, or skip both trash and deletion. + */ + final static Option TRASH = new Option("moveToTrash", true, + "Move the source path to trash, or delete the source path directly," + + " or skip both trash and deletion. This accepts 3 values: trash," + + " delete and skip. By default the server side trash interval is" + + " used. If the trash is disabled in the server side, the default" + + " trash interval 60 minutes is used."); + + final static Options CLI_OPTIONS = new Options(); + + static { + CLI_OPTIONS.addOption(ROUTER); + CLI_OPTIONS.addOption(FORCE_CLOSE_OPEN); + CLI_OPTIONS.addOption(MAP); + CLI_OPTIONS.addOption(BANDWIDTH); + CLI_OPTIONS.addOption(DELAY_DURATION); + CLI_OPTIONS.addOption(TRASH); + } +} diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/DistCpProcedure.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/DistCpProcedure.java new file mode 100644 index 0000000000000..73fecbf346c89 --- /dev/null +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/DistCpProcedure.java @@ -0,0 +1,635 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tools.fedbalance; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.OpenFileEntry; +import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; +import org.apache.hadoop.tools.DistCp; +import org.apache.hadoop.tools.OptionsParser; +import org.apache.hadoop.tools.fedbalance.procedure.BalanceProcedure; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobID; +import org.apache.hadoop.mapred.RunningJob; +import org.apache.hadoop.mapreduce.Job; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.ByteArrayOutputStream; +import java.io.ByteArrayInputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.List; + +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.CURRENT_SNAPSHOT_NAME; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.LAST_SNAPSHOT_NAME; + +/** + * Copy data through distcp. Super user privilege needed. + * + * PRE_CHECK :pre-check of src and dst. + * INIT_DISTCP :the first round of distcp. + * DIFF_DISTCP :copy snapshot diff round by round until there is + * no diff. + * DISABLE_WRITE :disable write operations. + * FINAL_DISTCP :close all open files and do the final round distcp. + * FINISH :procedure finish. + */ +public class DistCpProcedure extends BalanceProcedure { + + public static final Logger LOG = + LoggerFactory.getLogger(DistCpProcedure.class); + + /* Stages of this procedure. */ + enum Stage { + PRE_CHECK, INIT_DISTCP, DIFF_DISTCP, DISABLE_WRITE, FINAL_DISTCP, FINISH + } + + private FedBalanceContext context; // the balance context. + private Path src; // the source path including the source cluster. + private Path dst; // the dst path including the dst cluster. + private Configuration conf; + private int mapNum; // the number of map tasks. + private int bandWidth; // the bandwidth limit of each distcp task. + private String jobId; // the id of the current distcp. + private Stage stage; // current stage of this procedure. + + /* Force close all open files when there is no diff between src and dst */ + private boolean forceCloseOpenFiles; + /* Disable write by setting the mount point readonly. */ + private boolean useMountReadOnly; + + private FsPermission fPerm; // the permission of the src. + private AclStatus acl; // the acl of the src. + + private JobClient client; + private DistributedFileSystem srcFs; // fs of the src cluster. + private DistributedFileSystem dstFs; // fs of the dst cluster. + + /** + * Test only. In unit test we use the LocalJobRunner to run the distcp jobs. + * Here we save the job to look up the job status. The localJob won't be + * serialized thus won't be recovered. + */ + @VisibleForTesting + private Job localJob; + /** + * Enable test mode. Use LocalJobRunner to run the distcp jobs. + */ + @VisibleForTesting + static boolean enabledForTest = false; + + public DistCpProcedure() { + } + + /** + * The constructor of DistCpProcedure. + * + * @param name the name of the procedure. + * @param nextProcedure the name of the next procedure. + * @param delayDuration the delay duration when this procedure is delayed. + * @param context the federation balance context. + */ + public DistCpProcedure(String name, String nextProcedure, long delayDuration, + FedBalanceContext context) throws IOException { + super(name, nextProcedure, delayDuration); + this.context = context; + this.src = context.getSrc(); + this.dst = context.getDst(); + this.conf = context.getConf(); + this.client = new JobClient(conf); + this.stage = Stage.PRE_CHECK; + this.mapNum = context.getMapNum(); + this.bandWidth = context.getBandwidthLimit(); + this.forceCloseOpenFiles = context.getForceCloseOpenFiles(); + this.useMountReadOnly = context.getUseMountReadOnly(); + srcFs = (DistributedFileSystem) context.getSrc().getFileSystem(conf); + dstFs = (DistributedFileSystem) context.getDst().getFileSystem(conf); + } + + @Override + public boolean execute() throws RetryException, IOException { + LOG.info("Stage={}", stage.name()); + switch (stage) { + case PRE_CHECK: + preCheck(); + return false; + case INIT_DISTCP: + initDistCp(); + return false; + case DIFF_DISTCP: + diffDistCp(); + return false; + case DISABLE_WRITE: + disableWrite(); + return false; + case FINAL_DISTCP: + finalDistCp(); + return false; + case FINISH: + finish(); + return true; + default: + throw new IOException("Unexpected stage=" + stage); + } + } + + /** + * Pre check of src and dst. + */ + void preCheck() throws IOException { + FileStatus status = srcFs.getFileStatus(src); + if (!status.isDirectory()) { + throw new IOException(src + " should be a directory."); + } + if (dstFs.exists(dst)) { + throw new IOException(dst + " already exists."); + } + if (srcFs.exists(new Path(src, HdfsConstants.DOT_SNAPSHOT_DIR))) { + throw new IOException(src + " shouldn't enable snapshot."); + } + updateStage(Stage.INIT_DISTCP); + } + + /** + * The initial distcp. Copying src to dst. + */ + void initDistCp() throws IOException, RetryException { + RunningJobStatus job = getCurrentJob(); + if (job != null) { + // the distcp has been submitted. + if (job.isComplete()) { + jobId = null; // unset jobId because the job is done. + if (job.isSuccessful()) { + updateStage(Stage.DIFF_DISTCP); + return; + } else { + LOG.warn("DistCp failed. Failure={}", job.getFailureInfo()); + } + } else { + throw new RetryException(); + } + } else { + pathCheckBeforeInitDistcp(); + srcFs.createSnapshot(src, CURRENT_SNAPSHOT_NAME); + jobId = submitDistCpJob( + src.toString() + HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR + + CURRENT_SNAPSHOT_NAME, dst.toString(), false); + } + } + + /** + * The distcp copying diffs between LAST_SNAPSHOT_NAME and + * CURRENT_SNAPSHOT_NAME. + */ + void diffDistCp() throws IOException, RetryException { + RunningJobStatus job = getCurrentJob(); + if (job != null) { + if (job.isComplete()) { + jobId = null; + if (job.isSuccessful()) { + LOG.info("DistCp succeeded. jobId={}", job.getJobID()); + } else { + throw new IOException("DistCp failed. jobId=" + job.getJobID() + + " failure=" + job.getFailureInfo()); + } + } else { + throw new RetryException(); // wait job complete. + } + } else if (!verifyDiff()) { + if (!verifyOpenFiles() || forceCloseOpenFiles) { + updateStage(Stage.DISABLE_WRITE); + } else { + throw new RetryException(); + } + } else { + submitDiffDistCp(); + } + } + + /** + * Disable write either by making the mount entry readonly or cancelling the + * execute permission of the source path. + */ + void disableWrite() throws IOException { + if (useMountReadOnly) { + String mount = context.getMount(); + MountTableProcedure.disableWrite(mount, conf); + } else { + // Save and cancel permission. + FileStatus status = srcFs.getFileStatus(src); + fPerm = status.getPermission(); + acl = srcFs.getAclStatus(src); + srcFs.setPermission(src, FsPermission.createImmutable((short) 0)); + } + updateStage(Stage.FINAL_DISTCP); + } + + /** + * Enable write by restoring the x permission. + */ + void restorePermission() throws IOException { + // restore permission. + dstFs.removeAcl(dst); + if (acl != null) { + dstFs.modifyAclEntries(dst, acl.getEntries()); + } + if (fPerm != null) { + dstFs.setPermission(dst, fPerm); + } + } + + /** + * Close all open files then submit the distcp with -diff. + */ + void finalDistCp() throws IOException, RetryException { + // Close all open files then do the final distcp. + closeAllOpenFiles(srcFs, src); + // Final distcp. + RunningJobStatus job = getCurrentJob(); + if (job != null) { + // the distcp has been submitted. + if (job.isComplete()) { + jobId = null; // unset jobId because the job is done. + if (job.isSuccessful()) { + updateStage(Stage.FINISH); + return; + } else { + throw new IOException( + "Final DistCp failed. Failure: " + job.getFailureInfo()); + } + } else { + throw new RetryException(); + } + } else { + submitDiffDistCp(); + } + } + + void finish() throws IOException { + if (!useMountReadOnly) { + restorePermission(); + } + if (srcFs.exists(src)) { + cleanupSnapshot(srcFs, src); + } + if (dstFs.exists(dst)) { + cleanupSnapshot(dstFs, dst); + } + } + + @VisibleForTesting + Stage getStage() { + return stage; + } + + @VisibleForTesting + void updateStage(Stage value) { + String oldStage = stage == null ? "null" : stage.name(); + String newStage = value == null ? "null" : value.name(); + LOG.info("Stage updated from {} to {}.", oldStage, newStage); + stage = value; + } + + /** + * Submit distcp with -diff option to do the incremental copy. + * + * | the source path | the dst path | + * | LAST_SNAPSHOT_NAME | LAST_SNAPSHOT_NAME | + * | CURRENT_SNAPSHOT_NAME | + * + * 1. Cleanup all the last snapshots. If there are no last snapshots then do + * nothing. + * 2. Create the dst path snapshot named the last snapshot. + * 3. Rename the source path current snapshot as the last snapshot. The dst + * path last snapshot and the source path last snapshot are the same now. + * 4. Create the current snapshot of the source path. + * 5. Submit the distcp job. The incremental part is from the source path last + * snapshot to the source path current snapshot. + */ + private void submitDiffDistCp() throws IOException { + enableSnapshot(dstFs, dst); + deleteSnapshot(srcFs, src, LAST_SNAPSHOT_NAME); + deleteSnapshot(dstFs, dst, LAST_SNAPSHOT_NAME); + dstFs.createSnapshot(dst, LAST_SNAPSHOT_NAME); + srcFs.renameSnapshot(src, CURRENT_SNAPSHOT_NAME, LAST_SNAPSHOT_NAME); + srcFs.createSnapshot(src, CURRENT_SNAPSHOT_NAME); + jobId = submitDistCpJob(src.toString(), dst.toString(), true); + } + + /** + * Close all open files. Block until all the files are closed. + */ + private void closeAllOpenFiles(DistributedFileSystem dfs, Path path) + throws IOException { + String pathStr = path.toUri().getPath(); + while (true) { + RemoteIterator iterator = + dfs.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES), pathStr); + if (!iterator.hasNext()) { // all files has been closed. + break; + } + while (iterator.hasNext()) { + OpenFileEntry e = iterator.next(); + try { + srcFs.recoverLease(new Path(e.getFilePath())); + } catch (IOException re) { + // ignore recoverLease error. + } + } + } + } + + /** + * Verify whether the src has changed since CURRENT_SNAPSHOT_NAME snapshot. + * + * @return true if the src has changed. + */ + private boolean verifyDiff() throws IOException { + SnapshotDiffReport diffReport = + srcFs.getSnapshotDiffReport(src, CURRENT_SNAPSHOT_NAME, ""); + return diffReport.getDiffList().size() > 0; + } + + /** + * Verify whether there is any open files under src. + * + * @return true if there are open files. + */ + private boolean verifyOpenFiles() throws IOException { + RemoteIterator iterator = srcFs + .listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES), + src.toString()); + return iterator.hasNext(); + } + + private RunningJobStatus getCurrentJob() throws IOException { + if (jobId != null) { + if (enabledForTest) { + return getCurrentLocalJob(); + } else { + RunningJob latestJob = client.getJob(JobID.forName(jobId)); + return latestJob == null ? null : new YarnRunningJobStatus(latestJob); + } + } + return null; + } + + private LocalJobStatus getCurrentLocalJob() throws IOException { + if (localJob != null) { + Job latestJob; + try { + latestJob = localJob.getCluster().getJob(JobID.forName(jobId)); + } catch (InterruptedException e) { + throw new IOException(e); + } + return latestJob == null ? null : new LocalJobStatus(latestJob); + } else { + return null; + } + } + + private void pathCheckBeforeInitDistcp() throws IOException { + if (dstFs.exists(dst)) { // clean up. + throw new IOException("The dst path=" + dst + " already exists. The admin" + + " should delete it before submitting the initial distcp job."); + } + Path snapshotPath = new Path(src, + HdfsConstants.DOT_SNAPSHOT_DIR_SEPARATOR + CURRENT_SNAPSHOT_NAME); + if (srcFs.exists(snapshotPath)) { + throw new IOException("The src snapshot=" + snapshotPath + + " already exists. The admin should delete the snapshot before" + + " submitting the initial distcp."); + } + srcFs.allowSnapshot(src); + } + + /** + * Submit distcp job and return jobId. + */ + private String submitDistCpJob(String srcParam, String dstParam, + boolean useSnapshotDiff) throws IOException { + List command = new ArrayList<>(); + command.addAll(Arrays + .asList(new String[] {"-async", "-update", "-append", "-pruxgpcab"})); + if (useSnapshotDiff) { + command.add("-diff"); + command.add(LAST_SNAPSHOT_NAME); + command.add(CURRENT_SNAPSHOT_NAME); + } + command.add("-m"); + command.add(mapNum + ""); + command.add("-bandwidth"); + command.add(bandWidth + ""); + command.add(srcParam); + command.add(dstParam); + + Configuration config = new Configuration(conf); + DistCp distCp; + try { + distCp = new DistCp(config, + OptionsParser.parse(command.toArray(new String[]{}))); + Job job = distCp.createAndSubmitJob(); + LOG.info("Submit distcp job={}", job); + if (enabledForTest) { + localJob = job; + } + return job.getJobID().toString(); + } catch (Exception e) { + throw new IOException("Submit job failed.", e); + } + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + context.write(out); + if (jobId == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + Text.writeString(out, jobId); + } + out.writeInt(stage.ordinal()); + if (fPerm == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeShort(fPerm.toShort()); + } + if (acl == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + PBHelperClient.convert(acl).writeDelimitedTo(bout); + byte[] data = bout.toByteArray(); + out.writeInt(data.length); + out.write(data); + } + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + context = new FedBalanceContext(); + context.readFields(in); + src = context.getSrc(); + dst = context.getDst(); + conf = context.getConf(); + if (in.readBoolean()) { + jobId = Text.readString(in); + } + stage = Stage.values()[in.readInt()]; + if (in.readBoolean()) { + fPerm = FsPermission.read(in); + } + if (in.readBoolean()) { + int len = in.readInt(); + byte[] data = new byte[len]; + in.readFully(data); + ByteArrayInputStream bin = new ByteArrayInputStream(data); + AclProtos.GetAclStatusResponseProto proto = + AclProtos.GetAclStatusResponseProto.parseDelimitedFrom(bin); + acl = PBHelperClient.convert(proto); + } + srcFs = (DistributedFileSystem) context.getSrc().getFileSystem(conf); + dstFs = (DistributedFileSystem) context.getDst().getFileSystem(conf); + mapNum = context.getMapNum(); + bandWidth = context.getBandwidthLimit(); + forceCloseOpenFiles = context.getForceCloseOpenFiles(); + useMountReadOnly = context.getUseMountReadOnly(); + this.client = new JobClient(conf); + } + + private static void enableSnapshot(DistributedFileSystem dfs, Path path) + throws IOException { + if (!dfs.exists(new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR))) { + dfs.allowSnapshot(path); + } + } + + static void deleteSnapshot(DistributedFileSystem dfs, Path path, + String snapshotName) throws IOException { + Path snapshot = + new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR_SEPARATOR + snapshotName); + if (dfs.exists(snapshot)) { + dfs.deleteSnapshot(path, snapshotName); + } + } + + static void cleanupSnapshot(DistributedFileSystem dfs, Path path) + throws IOException { + if (dfs.exists(new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR))) { + FileStatus[] status = + dfs.listStatus(new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR)); + for (FileStatus s : status) { + deleteSnapshot(dfs, path, s.getPath().getName()); + } + dfs.disallowSnapshot(path); + } + } + + interface RunningJobStatus { + String getJobID(); + + boolean isComplete() throws IOException; + + boolean isSuccessful() throws IOException; + + String getFailureInfo() throws IOException; + } + + private static class YarnRunningJobStatus implements RunningJobStatus { + + private final RunningJob job; + + YarnRunningJobStatus(RunningJob job) { + this.job = job; + } + + @Override + public String getJobID() { + return job.getID().toString(); + } + + @Override + public boolean isComplete() throws IOException { + return job.isComplete(); + } + + @Override + public boolean isSuccessful() throws IOException { + return job.isSuccessful(); + } + + @Override + public String getFailureInfo() throws IOException { + return job.getFailureInfo(); + } + } + + private static class LocalJobStatus implements RunningJobStatus { + + private final Job testJob; + + LocalJobStatus(Job testJob) { + this.testJob = testJob; + } + + @Override + public String getJobID() { + return testJob.getJobID().toString(); + } + + @Override + public boolean isComplete() throws IOException { + return testJob.isComplete(); + } + + @Override + public boolean isSuccessful() throws IOException { + return testJob.isSuccessful(); + } + + @Override + public String getFailureInfo() throws IOException { + try { + return testJob.getStatus().getFailureInfo(); + } catch (InterruptedException e) { + throw new IOException(e); + } + } + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalance.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalance.java new file mode 100644 index 0000000000000..adfb40bf74b38 --- /dev/null +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalance.java @@ -0,0 +1,377 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tools.fedbalance; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.tools.fedbalance.procedure.BalanceProcedure; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; +import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; +import org.apache.hadoop.hdfs.server.federation.router.RouterClient; +import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.tools.fedbalance.procedure.BalanceJob; +import org.apache.hadoop.tools.fedbalance.procedure.BalanceProcedureScheduler; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.ROUTER; +import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.FORCE_CLOSE_OPEN; +import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.MAP; +import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.BANDWIDTH; +import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.TRASH; +import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.DELAY_DURATION; +import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.CLI_OPTIONS; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.FEDERATION_BALANCE_CLASS; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.TrashOption; + +/** + * Balance data from src cluster to dst cluster with distcp. + * + * 1. Move data from the source path to the destination path with distcp. + * 2. Update the the mount entry. + * 3. Delete the source path to trash. + */ +public class FedBalance extends Configured implements Tool { + + public static final Logger LOG = + LoggerFactory.getLogger(FedBalance.class); + private static final String SUBMIT_COMMAND = "submit"; + private static final String CONTINUE_COMMAND = "continue"; + private static final String NO_MOUNT = "no-mount"; + private static final String DISTCP_PROCEDURE = "distcp-procedure"; + private static final String MOUNT_TABLE_PROCEDURE = "mount-table-procedure"; + private static final String TRASH_PROCEDURE = "trash-procedure"; + + /** + * This class helps building the balance job. + */ + private class Builder { + /* Balancing in an rbf cluster. */ + private boolean routerCluster = false; + /* Force close all open files while there is no diff. */ + private boolean forceCloseOpen = false; + /* Max number of concurrent maps to use for copy. */ + private int map = 10; + /* Specify bandwidth per map in MB. */ + private int bandwidth = 10; + /* Specify the trash behaviour of the source path. */ + private TrashOption trashOpt = TrashOption.TRASH; + /* Specify the duration(millie seconds) when the procedure needs retry. */ + private long delayDuration = TimeUnit.SECONDS.toMillis(1); + /* The source input. This specifies the source path. */ + private final String inputSrc; + /* The dst input. This specifies the dst path. */ + private final String inputDst; + + Builder(String inputSrc, String inputDst) { + this.inputSrc = inputSrc; + this.inputDst = inputDst; + } + + /** + * Whether balancing in an rbf cluster. + * @param value true if it's running in a router-based federation cluster. + */ + public Builder setRouterCluster(boolean value) { + this.routerCluster = value; + return this; + } + + /** + * Whether force close all open files while there is no diff. + * @param value true if force close all the open files. + */ + public Builder setForceCloseOpen(boolean value) { + this.forceCloseOpen = value; + return this; + } + + /** + * Max number of concurrent maps to use for copy. + * @param value the map number of the distcp. + */ + public Builder setMap(int value) { + this.map = value; + return this; + } + + /** + * Specify bandwidth per map in MB. + * @param value the bandwidth. + */ + public Builder setBandWidth(int value) { + this.bandwidth = value; + return this; + } + + /** + * Specify the trash behaviour of the source path. + * @param value the trash option. + */ + public Builder setTrashOpt(TrashOption value) { + this.trashOpt = value; + return this; + } + + /** + * Specify the duration(millie seconds) when the procedure needs retry. + * @param value the delay duration of the job. + */ + public Builder setDelayDuration(long value) { + this.delayDuration = value; + return this; + } + + /** + * Build the balance job. + */ + public BalanceJob build() throws IOException { + // Construct job context. + FedBalanceContext context; + Path dst = new Path(inputDst); + if (dst.toUri().getAuthority() == null) { + throw new IOException("The destination cluster must be specified."); + } + if (routerCluster) { // router-based federation. + Path src = getSrcPath(inputSrc); + String mount = inputSrc; + context = new FedBalanceContext.Builder(src, dst, mount, getConf()) + .setForceCloseOpenFiles(forceCloseOpen) + .setUseMountReadOnly(routerCluster).setMapNum(map) + .setBandwidthLimit(bandwidth).setTrash(trashOpt) + .setDelayDuration(delayDuration).build(); + } else { // normal federation cluster. + Path src = new Path(inputSrc); + if (src.toUri().getAuthority() == null) { + throw new IOException("The source cluster must be specified."); + } + context = new FedBalanceContext.Builder(src, dst, NO_MOUNT, getConf()) + .setForceCloseOpenFiles(forceCloseOpen) + .setUseMountReadOnly(routerCluster).setMapNum(map) + .setBandwidthLimit(bandwidth).setTrash(trashOpt).build(); + } + + LOG.info(context.toString()); + // Construct the balance job. + BalanceJob.Builder builder = new BalanceJob.Builder<>(); + DistCpProcedure dcp = + new DistCpProcedure(DISTCP_PROCEDURE, null, delayDuration, context); + builder.nextProcedure(dcp); + if (routerCluster) { + MountTableProcedure mtp = + new MountTableProcedure(MOUNT_TABLE_PROCEDURE, null, delayDuration, + inputSrc, dst.toUri().getPath(), dst.toUri().getAuthority(), + getConf()); + builder.nextProcedure(mtp); + } + TrashProcedure tp = + new TrashProcedure(TRASH_PROCEDURE, null, delayDuration, context); + builder.nextProcedure(tp); + return builder.build(); + } + } + + public FedBalance() { + super(); + } + + @Override + public int run(String[] args) throws Exception { + CommandLineParser parser = new GnuParser(); + CommandLine command = + parser.parse(DistCpBalanceOptions.CLI_OPTIONS, args, true); + String[] leftOverArgs = command.getArgs(); + if (leftOverArgs == null || leftOverArgs.length < 1) { + printUsage(); + return -1; + } + String cmd = leftOverArgs[0]; + if (cmd.equals(SUBMIT_COMMAND)) { + if (leftOverArgs.length < 3) { + printUsage(); + return -1; + } + String inputSrc = leftOverArgs[1]; + String inputDst = leftOverArgs[2]; + return submit(command, inputSrc, inputDst); + } else if (cmd.equals(CONTINUE_COMMAND)) { + return continueJob(); + } else { + printUsage(); + return -1; + } + } + + /** + * Recover and continue the unfinished jobs. + */ + private int continueJob() throws InterruptedException { + BalanceProcedureScheduler scheduler = + new BalanceProcedureScheduler(getConf()); + try { + scheduler.init(true); + while (true) { + Collection jobs = scheduler.getAllJobs(); + int unfinished = 0; + for (BalanceJob job : jobs) { + if (!job.isJobDone()) { + unfinished++; + } + LOG.info(job.toString()); + } + if (unfinished == 0) { + break; + } + Thread.sleep(TimeUnit.SECONDS.toMillis(10)); + } + } catch (IOException e) { + LOG.error("Continue balance job failed.", e); + return -1; + } finally { + scheduler.shutDown(); + } + return 0; + } + + /** + * Start a ProcedureScheduler and submit the job. + * + * @param command the command options. + * @param inputSrc the source input. This specifies the source path. + * @param inputDst the dst input. This specifies the dst path. + */ + private int submit(CommandLine command, String inputSrc, String inputDst) + throws IOException { + Builder builder = new Builder(inputSrc, inputDst); + // parse options. + builder.setRouterCluster(command.hasOption(ROUTER.getOpt())); + builder.setForceCloseOpen(command.hasOption(FORCE_CLOSE_OPEN.getOpt())); + if (command.hasOption(MAP.getOpt())) { + builder.setMap(Integer.parseInt(command.getOptionValue(MAP.getOpt()))); + } + if (command.hasOption(BANDWIDTH.getOpt())) { + builder.setBandWidth( + Integer.parseInt(command.getOptionValue(BANDWIDTH.getOpt()))); + } + if (command.hasOption(DELAY_DURATION.getOpt())) { + builder.setDelayDuration( + Long.parseLong(command.getOptionValue(DELAY_DURATION.getOpt()))); + } + if (command.hasOption(TRASH.getOpt())) { + String val = command.getOptionValue(TRASH.getOpt()); + if (val.equalsIgnoreCase("skip")) { + builder.setTrashOpt(TrashOption.SKIP); + } else if (val.equalsIgnoreCase("trash")) { + builder.setTrashOpt(TrashOption.TRASH); + } else if (val.equalsIgnoreCase("delete")) { + builder.setTrashOpt(TrashOption.DELETE); + } else { + printUsage(); + return -1; + } + } + + // Submit the job. + BalanceProcedureScheduler scheduler = + new BalanceProcedureScheduler(getConf()); + scheduler.init(false); + try { + BalanceJob balanceJob = builder.build(); + // Submit and wait until the job is done. + scheduler.submit(balanceJob); + scheduler.waitUntilDone(balanceJob); + } catch (IOException e) { + LOG.error("Submit balance job failed.", e); + return -1; + } finally { + scheduler.shutDown(); + } + return 0; + } + + /** + * Get src uri from Router. + */ + private Path getSrcPath(String fedPath) throws IOException { + String address = getConf().getTrimmed( + RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, + RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT); + InetSocketAddress routerSocket = NetUtils.createSocketAddr(address); + RouterClient rClient = new RouterClient(routerSocket, getConf()); + try { + MountTableManager mountTable = rClient.getMountTableManager(); + MountTable entry = MountTableProcedure.getMountEntry(fedPath, mountTable); + if (entry == null) { + throw new IllegalArgumentException( + "The mount point doesn't exist. path=" + fedPath); + } else if (entry.getDestinations().size() > 1) { + throw new IllegalArgumentException( + "The mount point has more than one destination. path=" + fedPath); + } else { + String ns = entry.getDestinations().get(0).getNameserviceId(); + String path = entry.getDestinations().get(0).getDest(); + return new Path("hdfs://" + ns + path); + } + } finally { + rClient.close(); + } + } + + private void printUsage() { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp( + "fedbalance OPTIONS [submit|continue] \n\nOPTIONS", + CLI_OPTIONS); + } + + /** + * Main function of the FedBalance program. Parses the input arguments and + * invokes the FedBalance::run() method, via the ToolRunner. + * @param argv Command-line arguments sent to FedBalance. + */ + public static void main(String[] argv) { + Configuration conf = new HdfsConfiguration(); + Class balanceClazz = (Class) conf + .getClass(FEDERATION_BALANCE_CLASS, FedBalance.class); + Tool balancer = ReflectionUtils.newInstance(balanceClazz, conf); + int exitCode; + try { + exitCode = ToolRunner.run(balancer, argv); + } catch (Exception e) { + LOG.warn("Couldn't complete FedBalance operation.", e); + exitCode = -1; + } + System.exit(exitCode); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedureConfigKeys.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceConfigs.java similarity index 72% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedureConfigKeys.java rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceConfigs.java index f86903519604f..952aef20d9048 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedureConfigKeys.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceConfigs.java @@ -15,16 +15,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.procedure; +package org.apache.hadoop.tools.fedbalance; import org.apache.hadoop.classification.InterfaceAudience; /** - * This class contains constants for configuration keys and default values - * used in hdfs procedure. + * Federation balance configuration properties. */ @InterfaceAudience.Private -public final class BalanceProcedureConfigKeys { +public final class FedBalanceConfigs { + /* The class used for federation balance */ + public static final String FEDERATION_BALANCE_CLASS = + "federation.balance.class"; + public static final String LAST_SNAPSHOT_NAME = "DISTCP-BALANCE-CURRENT"; + public static final String CURRENT_SNAPSHOT_NAME = "DISTCP-BALANCE-NEXT"; + /* Specify the behaviour of trash. */ + public enum TrashOption { + TRASH, DELETE, SKIP + } + /* The worker threads number of the BalanceProcedureScheduler */ public static final String WORK_THREAD_NUM = "hadoop.hdfs.procedure.work.thread.num"; @@ -37,5 +46,5 @@ public final class BalanceProcedureConfigKeys { public static final String JOURNAL_CLASS = "hadoop.hdfs.procedure.journal.class"; - private BalanceProcedureConfigKeys() {} + private FedBalanceConfigs(){} } diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceContext.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceContext.java new file mode 100644 index 0000000000000..56be7db48e8ae --- /dev/null +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceContext.java @@ -0,0 +1,286 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tools.fedbalance; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.TrashOption; + +/** + * This class contains the basic information needed when Federation Balance. + */ +public class FedBalanceContext implements Writable { + + /* the source path in the source sub-cluster */ + private Path src; + /* the target path in the target sub-cluster */ + private Path dst; + /* the mount point to be balanced */ + private String mount; + /* Force close all open files when there is no diff between src and dst */ + private boolean forceCloseOpenFiles; + /* Disable write by setting the mount point readonly. */ + private boolean useMountReadOnly; + /* The map number of the distcp job. */ + private int mapNum; + /* The bandwidth limit of the distcp job(MB). */ + private int bandwidthLimit; + /* Move source path to trash after all the data are sync to target. Otherwise + delete the source directly. */ + private TrashOption trashOpt; + /* How long will the procedures be delayed. */ + private long delayDuration; + + private Configuration conf; + + public FedBalanceContext() {} + + public Configuration getConf() { + return conf; + } + + public Path getSrc() { + return src; + } + + public Path getDst() { + return dst; + } + + public String getMount() { + return mount; + } + + public boolean getForceCloseOpenFiles() { + return forceCloseOpenFiles; + } + + public boolean getUseMountReadOnly() { + return useMountReadOnly; + } + + public int getMapNum() { + return mapNum; + } + + public int getBandwidthLimit() { + return bandwidthLimit; + } + + public TrashOption getTrashOpt() { + return trashOpt; + } + + @Override + public void write(DataOutput out) throws IOException { + conf.write(out); + Text.writeString(out, src.toString()); + Text.writeString(out, dst.toString()); + Text.writeString(out, mount); + out.writeBoolean(forceCloseOpenFiles); + out.writeBoolean(useMountReadOnly); + out.writeInt(mapNum); + out.writeInt(bandwidthLimit); + out.writeInt(trashOpt.ordinal()); + out.writeLong(delayDuration); + } + + @Override + public void readFields(DataInput in) throws IOException { + conf = new Configuration(false); + conf.readFields(in); + src = new Path(Text.readString(in)); + dst = new Path(Text.readString(in)); + mount = Text.readString(in); + forceCloseOpenFiles = in.readBoolean(); + useMountReadOnly = in.readBoolean(); + mapNum = in.readInt(); + bandwidthLimit = in.readInt(); + trashOpt = TrashOption.values()[in.readInt()]; + delayDuration = in.readLong(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj == this) { + return true; + } + if (obj.getClass() != getClass()) { + return false; + } + FedBalanceContext bc = (FedBalanceContext) obj; + return new EqualsBuilder() + .append(src, bc.src) + .append(dst, bc.dst) + .append(mount, bc.mount) + .append(forceCloseOpenFiles, bc.forceCloseOpenFiles) + .append(useMountReadOnly, bc.useMountReadOnly) + .append(mapNum, bc.mapNum) + .append(bandwidthLimit, bc.bandwidthLimit) + .append(trashOpt, bc.trashOpt) + .append(delayDuration, bc.delayDuration) + .isEquals(); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(17, 37) + .append(src) + .append(dst) + .append(mount) + .append(forceCloseOpenFiles) + .append(useMountReadOnly) + .append(mapNum) + .append(bandwidthLimit) + .append(trashOpt) + .append(delayDuration) + .build(); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder("FedBalance context:"); + builder.append(" src=").append(src); + builder.append(", dst=").append(dst); + if (useMountReadOnly) { + builder.append(", router-mode=true"); + builder.append(", mount-point=").append(mount); + } else { + builder.append(", router-mode=false"); + } + builder.append(", forceCloseOpenFiles=").append(forceCloseOpenFiles); + builder.append(", trash=").append(trashOpt.name()); + builder.append(", map=").append(mapNum); + builder.append(", bandwidth=").append(bandwidthLimit); + builder.append(", delayDuration=").append(delayDuration); + return builder.toString(); + } + + static class Builder { + private final Path src; + private final Path dst; + private final String mount; + private final Configuration conf; + private boolean forceCloseOpenFiles = false; + private boolean useMountReadOnly = false; + private int mapNum; + private int bandwidthLimit; + private TrashOption trashOpt; + private long delayDuration; + + /** + * This class helps building the FedBalanceContext. + * + * @param src the source path in the source sub-cluster. + * @param dst the target path in the target sub-cluster. + * @param mount the mount point to be balanced. + * @param conf the configuration. + */ + Builder(Path src, Path dst, String mount, Configuration conf) { + this.src = src; + this.dst = dst; + this.mount = mount; + this.conf = conf; + } + + /** + * Force close open files. + * @param value true if force close all the open files. + */ + public Builder setForceCloseOpenFiles(boolean value) { + this.forceCloseOpenFiles = value; + return this; + } + + /** + * Use mount point readonly to disable write. + * @param value true if disabling write by setting mount point readonly. + */ + public Builder setUseMountReadOnly(boolean value) { + this.useMountReadOnly = value; + return this; + } + + /** + * The map number of the distcp job. + * @param value the map number of the distcp. + */ + public Builder setMapNum(int value) { + this.mapNum = value; + return this; + } + + /** + * The bandwidth limit of the distcp job(MB). + * @param value the bandwidth. + */ + public Builder setBandwidthLimit(int value) { + this.bandwidthLimit = value; + return this; + } + + /** + * Specify the trash behaviour after all the data is sync to the target. + * @param value the trash option. + * */ + public Builder setTrash(TrashOption value) { + this.trashOpt = value; + return this; + } + + /** + * Specify the delayed duration when the procedures need to retry. + */ + public Builder setDelayDuration(long value) { + this.delayDuration = value; + return this; + } + + /** + * Build the FedBalanceContext. + * + * @return the FedBalanceContext obj. + */ + public FedBalanceContext build() { + FedBalanceContext context = new FedBalanceContext(); + context.src = this.src; + context.dst = this.dst; + context.mount = this.mount; + context.conf = this.conf; + context.forceCloseOpenFiles = this.forceCloseOpenFiles; + context.useMountReadOnly = this.useMountReadOnly; + context.mapNum = this.mapNum; + context.bandwidthLimit = this.bandwidthLimit; + context.trashOpt = this.trashOpt; + context.delayDuration = this.delayDuration; + return context; + } + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/MountTableProcedure.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/MountTableProcedure.java new file mode 100644 index 0000000000000..8f789831d394f --- /dev/null +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/MountTableProcedure.java @@ -0,0 +1,244 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tools.fedbalance; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; +import org.apache.hadoop.hdfs.server.federation.router.RouterClient; +import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; +import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.tools.fedbalance.procedure.BalanceProcedure; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.net.NetUtils; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.List; + +/** + * Update mount table. + * Old mount table: + * /a/b/c -> {ns:src path:/a/b/c} + * New mount table: + * /a/b/c -> {ns:dst path:/a/b/c} + */ +public class MountTableProcedure extends BalanceProcedure { + + private String mount; + private String dstPath; + private String dstNs; + private Configuration conf; + + public MountTableProcedure() {} + + /** + * Update mount entry to specified dst uri. + * + * @param mount the mount entry to be updated. + * @param dstPath the sub-cluster uri of the dst path. + * @param conf the configuration. + */ + public MountTableProcedure(String name, String nextProcedure, + long delayDuration, String mount, String dstPath, String dstNs, + Configuration conf) throws IOException { + super(name, nextProcedure, delayDuration); + this.mount = mount; + this.dstPath = dstPath; + this.dstNs = dstNs; + this.conf = conf; + } + + @Override + public boolean execute() throws RetryException, IOException { + updateMountTable(); + return true; + } + + private void updateMountTable() throws IOException { + updateMountTableDestination(mount, dstNs, dstPath, conf); + enableWrite(mount, conf); + } + + /** + * Update the destination of the mount point to target namespace and target + * path. + * + * @param mount the mount point. + * @param dstNs the target namespace. + * @param dstPath the target path + * @param conf the configuration of the router. + */ + private static void updateMountTableDestination(String mount, String dstNs, + String dstPath, Configuration conf) throws IOException { + String address = conf.getTrimmed(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, + RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT); + InetSocketAddress routerSocket = NetUtils.createSocketAddr(address); + RouterClient rClient = new RouterClient(routerSocket, conf); + try { + MountTableManager mountTable = rClient.getMountTableManager(); + + MountTable originalEntry = getMountEntry(mount, mountTable); + if (originalEntry == null) { + throw new IOException("Mount table " + mount + " doesn't exist"); + } else { + RemoteLocation remoteLocation = + new RemoteLocation(dstNs, dstPath, mount); + originalEntry.setDestinations(Arrays.asList(remoteLocation)); + UpdateMountTableEntryRequest updateRequest = + UpdateMountTableEntryRequest.newInstance(originalEntry); + UpdateMountTableEntryResponse response = + mountTable.updateMountTableEntry(updateRequest); + if (!response.getStatus()) { + throw new IOException("Failed update mount table " + mount); + } + rClient.getMountTableManager().refreshMountTableEntries( + RefreshMountTableEntriesRequest.newInstance()); + } + } finally { + rClient.close(); + } + } + + /** + * Gets the mount table entry. + * @param mount name of the mount entry. + * @param mountTable the mount table. + * @return corresponding mount entry. + * @throws IOException in case of failure to retrieve mount entry. + */ + public static MountTable getMountEntry(String mount, + MountTableManager mountTable) + throws IOException { + GetMountTableEntriesRequest getRequest = + GetMountTableEntriesRequest.newInstance(mount); + GetMountTableEntriesResponse getResponse = + mountTable.getMountTableEntries(getRequest); + List results = getResponse.getEntries(); + MountTable existingEntry = null; + for (MountTable result : results) { + if (mount.equals(result.getSourcePath())) { + existingEntry = result; + break; + } + } + return existingEntry; + } + + /** + * Disable write by making the mount point readonly. + * + * @param mount the mount point to set readonly. + * @param conf the configuration of the router. + */ + static void disableWrite(String mount, Configuration conf) + throws IOException { + setMountReadOnly(mount, true, conf); + } + + /** + * Enable write by cancelling the mount point readonly. + * + * @param mount the mount point to cancel readonly. + * @param conf the configuration of the router. + */ + static void enableWrite(String mount, Configuration conf) throws IOException { + setMountReadOnly(mount, false, conf); + } + + /** + * Enable or disable readonly of the mount point. + * + * @param mount the mount point. + * @param readOnly enable or disable readonly. + * @param conf the configuration of the router. + */ + private static void setMountReadOnly(String mount, boolean readOnly, + Configuration conf) throws IOException { + String address = conf.getTrimmed(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, + RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT); + InetSocketAddress routerSocket = NetUtils.createSocketAddr(address); + RouterClient rClient = new RouterClient(routerSocket, conf); + try { + MountTableManager mountTable = rClient.getMountTableManager(); + + MountTable originalEntry = getMountEntry(mount, mountTable); + if (originalEntry == null) { + throw new IOException("Mount table " + mount + " doesn't exist"); + } else { + originalEntry.setReadOnly(readOnly); + UpdateMountTableEntryRequest updateRequest = + UpdateMountTableEntryRequest.newInstance(originalEntry); + UpdateMountTableEntryResponse response = + mountTable.updateMountTableEntry(updateRequest); + if (!response.getStatus()) { + throw new IOException( + "Failed update mount table " + mount + " with readonly=" + + readOnly); + } + rClient.getMountTableManager().refreshMountTableEntries( + RefreshMountTableEntriesRequest.newInstance()); + } + } finally { + rClient.close(); + } + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + Text.writeString(out, mount); + Text.writeString(out, dstPath); + Text.writeString(out, dstNs); + conf.write(out); + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + mount = Text.readString(in); + dstPath = Text.readString(in); + dstNs = Text.readString(in); + conf = new Configuration(false); + conf.readFields(in); + } + + @VisibleForTesting + String getMount() { + return mount; + } + + @VisibleForTesting + String getDstPath() { + return dstPath; + } + + @VisibleForTesting + String getDstNs() { + return dstNs; + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/TrashProcedure.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/TrashProcedure.java new file mode 100644 index 0000000000000..94ae6160b070b --- /dev/null +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/TrashProcedure.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tools.fedbalance; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.Trash; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.tools.fedbalance.procedure.BalanceProcedure; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; +import org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.TrashOption; + +/** + * This procedure moves the source path to the corresponding trash. + */ +public class TrashProcedure extends BalanceProcedure { + + private DistributedFileSystem srcFs; + private FedBalanceContext context; + private Configuration conf; + + public TrashProcedure() {} + + /** + * The constructor of TrashProcedure. + * + * @param name the name of the procedure. + * @param nextProcedure the name of the next procedure. + * @param delayDuration the delay duration when this procedure is delayed. + * @param context the federation balance context. + */ + public TrashProcedure(String name, String nextProcedure, long delayDuration, + FedBalanceContext context) throws IOException { + super(name, nextProcedure, delayDuration); + this.context = context; + this.conf = context.getConf(); + this.srcFs = (DistributedFileSystem) context.getSrc().getFileSystem(conf); + } + + @Override + public boolean execute() throws IOException { + moveToTrash(); + return true; + } + + /** + * Delete source path to trash. + */ + void moveToTrash() throws IOException { + Path src = context.getSrc(); + if (srcFs.exists(src)) { + TrashOption trashOption = context.getTrashOpt(); + switch (trashOption) { + case TRASH: + conf.setFloat(FS_TRASH_INTERVAL_KEY, 60); + if (!Trash.moveToAppropriateTrash(srcFs, src, conf)) { + throw new IOException("Failed move " + src + " to trash."); + } + break; + case DELETE: + if (!srcFs.delete(src, true)) { + throw new IOException("Failed delete " + src); + } + LOG.info("{} is deleted.", src); + break; + case SKIP: + break; + default: + throw new IOException("Unexpected trash option=" + trashOption); + } + } + } + + public FedBalanceContext getContext() { + return context; + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + context.write(out); + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + context = new FedBalanceContext(); + context.readFields(in); + conf = context.getConf(); + srcFs = (DistributedFileSystem) context.getSrc().getFileSystem(conf); + } +} diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/package-info.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/package-info.java new file mode 100644 index 0000000000000..3007402f69f7e --- /dev/null +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/package-info.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/** + * FedBalance is a tool for balancing data across federation clusters. + */ +@InterfaceAudience.Public +package org.apache.hadoop.tools.fedbalance; +import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJob.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJob.java similarity index 99% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJob.java rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJob.java index 847092a2aa27a..8d5f9d401adcd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJob.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJob.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.procedure; +package org.apache.hadoop.tools.fedbalance.procedure; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.builder.EqualsBuilder; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJournal.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJournal.java similarity index 96% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJournal.java rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJournal.java index 011ae857bc1e7..da8eb74b2bf57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJournal.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJournal.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.procedure; +package org.apache.hadoop.tools.fedbalance.procedure; import org.apache.hadoop.conf.Configurable; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJournalInfoHDFS.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJournalInfoHDFS.java similarity index 95% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJournalInfoHDFS.java rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJournalInfoHDFS.java index 4e759d8d7f30a..0da8c36637932 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJournalInfoHDFS.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJournalInfoHDFS.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.procedure; +package org.apache.hadoop.tools.fedbalance.procedure; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -37,9 +37,9 @@ import java.net.URI; import java.net.URISyntaxException; -import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.SCHEDULER_JOURNAL_URI; -import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.TMP_TAIL; -import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.JOB_PREFIX; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.TMP_TAIL; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.JOB_PREFIX; /** * BalanceJournal based on HDFS. This class stores all the journals in the HDFS. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedure.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedure.java similarity index 97% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedure.java rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedure.java index 6320e8fe994d1..080a73750ec60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedure.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedure.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.procedure; +package org.apache.hadoop.tools.fedbalance.procedure; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; @@ -29,7 +29,7 @@ import java.io.DataOutput; import java.io.IOException; -import static org.apache.hadoop.hdfs.procedure.BalanceJob.NEXT_PROCEDURE_NONE; +import static org.apache.hadoop.tools.fedbalance.procedure.BalanceJob.NEXT_PROCEDURE_NONE; /** * The basic components of the Job. Extend this class to implement different diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedureScheduler.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java similarity index 97% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedureScheduler.java rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java index 74606c5580e25..0f82b88f0a937 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedureScheduler.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.procedure; +package org.apache.hadoop.tools.fedbalance.procedure; import com.google.common.annotations.VisibleForTesting; @@ -40,9 +40,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.WORK_THREAD_NUM; -import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.WORK_THREAD_NUM_DEFAULT; -import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.JOURNAL_CLASS; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.WORK_THREAD_NUM; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.WORK_THREAD_NUM_DEFAULT; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.JOURNAL_CLASS; /** * The state machine framework consist of: * Job: The state machine. It implements the basic logic of the diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/package-info.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/package-info.java similarity index 95% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/package-info.java rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/package-info.java index 626d3b3727c05..cb03d137fa929 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/package-info.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/package-info.java @@ -23,7 +23,7 @@ @InterfaceAudience.Private @InterfaceStability.Evolving -package org.apache.hadoop.hdfs.procedure; +package org.apache.hadoop.tools.fedbalance.procedure; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-tools/hadoop-federation-balance/src/main/shellprofile.d/hadoop-federation-balance.sh b/hadoop-tools/hadoop-federation-balance/src/main/shellprofile.d/hadoop-federation-balance.sh new file mode 100644 index 0000000000000..2872c7afba019 --- /dev/null +++ b/hadoop-tools/hadoop-federation-balance/src/main/shellprofile.d/hadoop-federation-balance.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if ! declare -f hadoop_subcommand_fedbalance >/dev/null 2>/dev/null; then + + if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then + hadoop_add_subcommand "fedbalance" client "balance data between sub-clusters" + fi + + # this can't be indented otherwise shelldocs won't get it + +## @description fedbalance command for hadoop +## @audience public +## @stability stable +## @replaceable yes +function hadoop_subcommand_fedbalance +{ + # shellcheck disable=SC2034 + HADOOP_CLASSNAME=org.apache.hadoop.tools.fedbalance.FedBalance + hadoop_add_to_classpath_tools hadoop-distcp + hadoop_add_to_classpath_tools hadoop-federation-balance +} + +fi \ No newline at end of file diff --git a/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestDistCpProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestDistCpProcedure.java new file mode 100644 index 0000000000000..ec565c36d8740 --- /dev/null +++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestDistCpProcedure.java @@ -0,0 +1,446 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tools.fedbalance; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.tools.fedbalance.DistCpProcedure.Stage; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.tools.fedbalance.procedure.BalanceJob; +import org.apache.hadoop.tools.fedbalance.procedure.BalanceProcedure.RetryException; +import org.apache.hadoop.tools.fedbalance.procedure.BalanceProcedureScheduler; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.DataOutput; +import java.io.DataInputStream; +import java.io.ByteArrayInputStream; +import java.net.URI; +import java.util.Random; + +import static junit.framework.TestCase.assertTrue; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI; +import static org.apache.hadoop.test.GenericTestUtils.getMethodName; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.CURRENT_SNAPSHOT_NAME; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.LAST_SNAPSHOT_NAME; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.TrashOption; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + +/** + * Test DistCpProcedure. + */ +public class TestDistCpProcedure { + private static MiniDFSCluster cluster; + private static Configuration conf; + static final String MOUNT = "mock_mount_point"; + private static final String SRCDAT = "srcdat"; + private static final String DSTDAT = "dstdat"; + private static final long BLOCK_SIZE = 1024; + private static final long FILE_SIZE = BLOCK_SIZE * 100; + private FileEntry[] srcfiles = + {new FileEntry(SRCDAT, true), new FileEntry(SRCDAT + "/a", false), + new FileEntry(SRCDAT + "/b", true), + new FileEntry(SRCDAT + "/b/c", false)}; + private static String nnUri; + + @BeforeClass + public static void beforeClass() throws IOException { + DistCpProcedure.enabledForTest = true; + conf = new Configuration(); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + cluster.waitActive(); + + String workPath = + "hdfs://" + cluster.getNameNode().getHostAndPort() + "/procedure"; + conf.set(SCHEDULER_JOURNAL_URI, workPath); + + nnUri = FileSystem.getDefaultUri(conf).toString(); + } + + @AfterClass + public static void afterClass() { + DistCpProcedure.enabledForTest = false; + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test(timeout = 30000) + public void testSuccessfulDistCpProcedure() throws Exception { + String testRoot = nnUri + "/user/foo/testdir." + getMethodName(); + DistributedFileSystem fs = + (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf); + createFiles(fs, testRoot, srcfiles); + + Path src = new Path(testRoot, SRCDAT); + Path dst = new Path(testRoot, DSTDAT); + FsPermission originalPerm = new FsPermission(777); + fs.setPermission(src, originalPerm); + FedBalanceContext context = buildContext(src, dst, MOUNT); + DistCpProcedure dcProcedure = + new DistCpProcedure("distcp-procedure", null, 1000, context); + BalanceProcedureScheduler scheduler = new BalanceProcedureScheduler(conf); + scheduler.init(true); + + BalanceJob balanceJob = + new BalanceJob.Builder<>().nextProcedure(dcProcedure).build(); + scheduler.submit(balanceJob); + scheduler.waitUntilDone(balanceJob); + assertTrue(balanceJob.isJobDone()); + if (balanceJob.getError() != null) { + throw balanceJob.getError(); + } + assertNull(balanceJob.getError()); + assertTrue(fs.exists(dst)); + assertFalse( + fs.exists(new Path(context.getSrc(), HdfsConstants.DOT_SNAPSHOT_DIR))); + assertFalse( + fs.exists(new Path(context.getDst(), HdfsConstants.DOT_SNAPSHOT_DIR))); + assertEquals(originalPerm, fs.getFileStatus(dst).getPermission()); + assertEquals(0, fs.getFileStatus(src).getPermission().toShort()); + for (FileEntry e : srcfiles) { // verify file len. + if (!e.isDir) { + Path targetFile = new Path(testRoot, e.path.replace(SRCDAT, DSTDAT)); + assertEquals(FILE_SIZE, fs.getFileStatus(targetFile).getLen()); + } + } + cleanup(fs, new Path(testRoot)); + } + + @Test(timeout = 30000) + public void testInitDistCp() throws Exception { + String testRoot = nnUri + "/user/foo/testdir." + getMethodName(); + DistributedFileSystem fs = + (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf); + createFiles(fs, testRoot, srcfiles); + + Path src = new Path(testRoot, SRCDAT); + Path dst = new Path(testRoot, DSTDAT); + // set permission. + fs.setPermission(src, FsPermission.createImmutable((short) 020)); + + FedBalanceContext context = buildContext(src, dst, MOUNT); + DistCpProcedure dcProcedure = + new DistCpProcedure("distcp-procedure", null, 1000, context); + + // submit distcp. + try { + dcProcedure.initDistCp(); + } catch (RetryException e) { + } + fs.delete(new Path(src, "a"), true); + // wait until job done. + executeProcedure(dcProcedure, Stage.DIFF_DISTCP, + () -> dcProcedure.initDistCp()); + assertTrue(fs.exists(dst)); + // Because we used snapshot, the file should be copied. + assertTrue(fs.exists(new Path(dst, "a"))); + cleanup(fs, new Path(testRoot)); + } + + @Test(timeout = 30000) + public void testDiffDistCp() throws Exception { + String testRoot = nnUri + "/user/foo/testdir." + getMethodName(); + DistributedFileSystem fs = + (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf); + createFiles(fs, testRoot, srcfiles); + Path src = new Path(testRoot, SRCDAT); + Path dst = new Path(testRoot, DSTDAT); + + FedBalanceContext context = buildContext(src, dst, MOUNT); + DistCpProcedure dcProcedure = + new DistCpProcedure("distcp-procedure", null, 1000, context); + executeProcedure(dcProcedure, Stage.DIFF_DISTCP, + () -> dcProcedure.initDistCp()); + assertTrue(fs.exists(dst)); + + // move file out of src and test distcp. + fs.rename(new Path(src, "a"), new Path("/a")); + executeProcedure(dcProcedure, Stage.FINISH, + () -> dcProcedure.finalDistCp()); + assertFalse(fs.exists(new Path(dst, "a"))); + // move back file src/a and test distcp. + fs.rename(new Path("/a"), new Path(src, "a")); + executeProcedure(dcProcedure, Stage.FINISH, + () -> dcProcedure.finalDistCp()); + assertTrue(fs.exists(new Path(dst, "a"))); + // append file src/a and test. + OutputStream out = fs.append(new Path(src, "a")); + out.write("hello".getBytes()); + out.close(); + long len = fs.getFileStatus(new Path(src, "a")).getLen(); + executeProcedure(dcProcedure, Stage.FINISH, + () -> dcProcedure.finalDistCp()); + assertEquals(len, fs.getFileStatus(new Path(dst, "a")).getLen()); + cleanup(fs, new Path(testRoot)); + } + + @Test(timeout = 30000) + public void testStageFinalDistCp() throws Exception { + String testRoot = nnUri + "/user/foo/testdir." + getMethodName(); + DistributedFileSystem fs = + (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf); + createFiles(fs, testRoot, srcfiles); + + Path src = new Path(testRoot, SRCDAT); + Path dst = new Path(testRoot, DSTDAT); + // open files. + OutputStream out = fs.append(new Path(src, "a")); + + FedBalanceContext context = buildContext(src, dst, MOUNT); + DistCpProcedure dcProcedure = + new DistCpProcedure("distcp-procedure", null, 1000, context); + executeProcedure(dcProcedure, Stage.DIFF_DISTCP, + () -> dcProcedure.initDistCp()); + executeProcedure(dcProcedure, Stage.FINISH, + () -> dcProcedure.finalDistCp()); + // Verify all the open files have been closed. + intercept(RemoteException.class, "LeaseExpiredException", + "Expect RemoteException(LeaseExpiredException).", () -> out.close()); + cleanup(fs, new Path(testRoot)); + } + + @Test(timeout = 30000) + public void testStageFinish() throws Exception { + String testRoot = nnUri + "/user/foo/testdir." + getMethodName(); + DistributedFileSystem fs = + (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf); + Path src = new Path(testRoot, SRCDAT); + Path dst = new Path(testRoot, DSTDAT); + fs.mkdirs(src); + fs.mkdirs(dst); + fs.allowSnapshot(src); + fs.allowSnapshot(dst); + fs.createSnapshot(src, LAST_SNAPSHOT_NAME); + fs.createSnapshot(src, CURRENT_SNAPSHOT_NAME); + fs.createSnapshot(dst, LAST_SNAPSHOT_NAME); + FsPermission originalPerm = new FsPermission(777); + fs.setPermission(src, originalPerm); + + // Test the finish stage. + FedBalanceContext context = buildContext(src, dst, MOUNT); + DistCpProcedure dcProcedure = + new DistCpProcedure("distcp-procedure", null, 1000, context); + dcProcedure.disableWrite(); + dcProcedure.finish(); + + // Verify path and permission. + assertTrue(fs.exists(dst)); + assertFalse(fs.exists(new Path(src, HdfsConstants.DOT_SNAPSHOT_DIR))); + assertFalse(fs.exists(new Path(dst, HdfsConstants.DOT_SNAPSHOT_DIR))); + assertEquals(originalPerm, fs.getFileStatus(dst).getPermission()); + assertEquals(0, fs.getFileStatus(src).getPermission().toShort()); + cleanup(fs, new Path(testRoot)); + } + + @Test(timeout = 30000) + public void testRecoveryByStage() throws Exception { + String testRoot = nnUri + "/user/foo/testdir." + getMethodName(); + DistributedFileSystem fs = + (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf); + createFiles(fs, testRoot, srcfiles); + + Path src = new Path(testRoot, SRCDAT); + Path dst = new Path(testRoot, DSTDAT); + + FedBalanceContext context = buildContext(src, dst, MOUNT); + final DistCpProcedure[] dcp = new DistCpProcedure[1]; + dcp[0] = new DistCpProcedure("distcp-procedure", null, 1000, context); + + // Doing serialization and deserialization before each stage to monitor the + // recovery. + dcp[0] = serializeProcedure(dcp[0]); + executeProcedure(dcp[0], Stage.INIT_DISTCP, () -> dcp[0].preCheck()); + dcp[0] = serializeProcedure(dcp[0]); + executeProcedure(dcp[0], Stage.DIFF_DISTCP, () -> dcp[0].initDistCp()); + fs.delete(new Path(src, "a"), true); // make some difference. + dcp[0] = serializeProcedure(dcp[0]); + executeProcedure(dcp[0], Stage.DISABLE_WRITE, () -> dcp[0].diffDistCp()); + dcp[0] = serializeProcedure(dcp[0]); + executeProcedure(dcp[0], Stage.FINAL_DISTCP, () -> dcp[0].disableWrite()); + dcp[0] = serializeProcedure(dcp[0]); + OutputStream out = fs.append(new Path(src, "b/c")); + executeProcedure(dcp[0], Stage.FINISH, () -> dcp[0].finalDistCp()); + intercept(RemoteException.class, "LeaseExpiredException", + "Expect RemoteException(LeaseExpiredException).", () -> out.close()); + dcp[0] = serializeProcedure(dcp[0]); + assertTrue(dcp[0].execute()); + assertTrue(fs.exists(dst)); + assertFalse( + fs.exists(new Path(context.getSrc(), HdfsConstants.DOT_SNAPSHOT_DIR))); + assertFalse( + fs.exists(new Path(context.getDst(), HdfsConstants.DOT_SNAPSHOT_DIR))); + cleanup(fs, new Path(testRoot)); + } + + @Test(timeout = 30000) + public void testShutdown() throws Exception { + String testRoot = nnUri + "/user/foo/testdir." + getMethodName(); + DistributedFileSystem fs = + (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf); + createFiles(fs, testRoot, srcfiles); + + Path src = new Path(testRoot, SRCDAT); + Path dst = new Path(testRoot, DSTDAT); + FedBalanceContext context = buildContext(src, dst, MOUNT); + DistCpProcedure dcProcedure = + new DistCpProcedure("distcp-procedure", null, 1000, context); + BalanceProcedureScheduler scheduler = new BalanceProcedureScheduler(conf); + scheduler.init(true); + + BalanceJob balanceJob = + new BalanceJob.Builder<>().nextProcedure(dcProcedure).build(); + scheduler.submit(balanceJob); + + long sleep = Math.abs(new Random().nextLong()) % 10000; + Thread.sleep(sleep); + scheduler.shutDown(); + cleanup(fs, new Path(testRoot)); + } + + @Test(timeout = 30000) + public void testDisableWrite() throws Exception { + String testRoot = nnUri + "/user/foo/testdir." + getMethodName(); + DistributedFileSystem fs = + (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf); + createFiles(fs, testRoot, srcfiles); + Path src = new Path(testRoot, SRCDAT); + Path dst = new Path(testRoot, DSTDAT); + + FedBalanceContext context = buildContext(src, dst, MOUNT); + DistCpProcedure dcProcedure = + new DistCpProcedure("distcp-procedure", null, 1000, context); + assertNotEquals(0, fs.getFileStatus(src).getPermission().toShort()); + executeProcedure(dcProcedure, Stage.FINAL_DISTCP, + () -> dcProcedure.disableWrite()); + assertEquals(0, fs.getFileStatus(src).getPermission().toShort()); + cleanup(fs, new Path(testRoot)); + } + + private FedBalanceContext buildContext(Path src, Path dst, String mount) { + return new FedBalanceContext.Builder(src, dst, mount, conf).setMapNum(10) + .setBandwidthLimit(1).setTrash(TrashOption.TRASH).setDelayDuration(1000) + .build(); + } + + interface Call { + void execute() throws IOException, RetryException; + } + + /** + * Execute the procedure until its stage is updated to the target stage. + * + * @param procedure the procedure to be executed and verified. + * @param target the target stage. + * @param call the function executing the procedure. + */ + private static void executeProcedure(DistCpProcedure procedure, Stage target, + Call call) throws IOException { + Stage stage = Stage.PRE_CHECK; + procedure.updateStage(stage); + while (stage != target) { + try { + call.execute(); + } catch (RetryException e) { + } finally { + stage = procedure.getStage(); + } + } + } + + static class FileEntry { + private String path; + private boolean isDir; + + FileEntry(String path, boolean isDir) { + this.path = path; + this.isDir = isDir; + } + + String getPath() { + return path; + } + + boolean isDirectory() { + return isDir; + } + } + + /** + * Create directories and files with random data. + * + * @param fs the file system obj. + * @param topdir the base dir of the directories and files. + * @param entries the directory and file entries to be created. + */ + private void createFiles(DistributedFileSystem fs, String topdir, + FileEntry[] entries) throws IOException { + long seed = System.currentTimeMillis(); + Random rand = new Random(seed); + short replicationFactor = 2; + for (FileEntry entry : entries) { + Path newPath = new Path(topdir + "/" + entry.getPath()); + if (entry.isDirectory()) { + fs.mkdirs(newPath); + } else { + int bufSize = 128; + DFSTestUtil.createFile(fs, newPath, bufSize, FILE_SIZE, BLOCK_SIZE, + replicationFactor, seed); + } + seed = System.currentTimeMillis() + rand.nextLong(); + } + } + + private DistCpProcedure serializeProcedure(DistCpProcedure dcp) + throws IOException { + ByteArrayOutputStream bao = new ByteArrayOutputStream(); + DataOutput dataOut = new DataOutputStream(bao); + dcp.write(dataOut); + dcp = new DistCpProcedure(); + dcp.readFields( + new DataInputStream(new ByteArrayInputStream(bao.toByteArray()))); + return dcp; + } + + private void cleanup(DistributedFileSystem dfs, Path root) + throws IOException { + Path src = new Path(root, SRCDAT); + Path dst = new Path(root, DSTDAT); + DistCpProcedure.cleanupSnapshot(dfs, src); + DistCpProcedure.cleanupSnapshot(dfs, dst); + dfs.delete(root, true); + } +} diff --git a/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestMountTableProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestMountTableProcedure.java new file mode 100644 index 0000000000000..9dd4e5da8fe9d --- /dev/null +++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestMountTableProcedure.java @@ -0,0 +1,222 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tools.fedbalance; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.ha.HAServiceProtocol; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; +import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; +import org.apache.hadoop.hdfs.server.federation.router.Router; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; +import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.util.Time; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutput; +import java.io.DataInputStream; +import java.io.ByteArrayInputStream; +import java.io.DataOutputStream; +import java.net.InetSocketAddress; +import java.net.URI; +import java.util.Collections; +import java.util.List; + +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport; +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.synchronizeRecords; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +/** + * Basic tests of MountTableProcedure. + */ +public class TestMountTableProcedure { + + private static StateStoreDFSCluster cluster; + private static RouterContext routerContext; + private static Configuration routerConf; + private static List mockMountTable; + private static StateStoreService stateStore; + + @BeforeClass + public static void globalSetUp() throws Exception { + cluster = new StateStoreDFSCluster(false, 1); + // Build and start a router with State Store + admin + RPC + Configuration conf = new RouterConfigBuilder() + .stateStore() + .admin() + .rpc() + .build(); + cluster.addRouterOverrides(conf); + cluster.startRouters(); + routerContext = cluster.getRandomRouter(); + mockMountTable = cluster.generateMockMountTable(); + Router router = routerContext.getRouter(); + stateStore = router.getStateStore(); + + // Add two name services for testing + ActiveNamenodeResolver membership = router.getNamenodeResolver(); + membership.registerNamenode(createNamenodeReport("ns0", "nn1", + HAServiceProtocol.HAServiceState.ACTIVE)); + membership.registerNamenode(createNamenodeReport("ns1", "nn1", + HAServiceProtocol.HAServiceState.ACTIVE)); + stateStore.refreshCaches(true); + + routerConf = new Configuration(); + InetSocketAddress routerSocket = router.getAdminServerAddress(); + routerConf.setSocketAddr(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, + routerSocket); + } + + @AfterClass + public static void tearDown() { + cluster.stopRouter(routerContext); + } + + @Before + public void testSetup() throws Exception { + assertTrue( + synchronizeRecords(stateStore, mockMountTable, MountTable.class)); + // Avoid running with random users + routerContext.resetAdminClient(); + } + + @Test + public void testUpdateMountpoint() throws Exception { + // Firstly add mount entry: /test-path->{ns0,/test-path}. + String mount = "/test-path"; + String dst = "/test-dst"; + MountTable newEntry = MountTable + .newInstance(mount, Collections.singletonMap("ns0", mount), + Time.now(), Time.now()); + MountTableManager mountTable = + routerContext.getAdminClient().getMountTableManager(); + AddMountTableEntryRequest addRequest = + AddMountTableEntryRequest.newInstance(newEntry); + AddMountTableEntryResponse addResponse = + mountTable.addMountTableEntry(addRequest); + assertTrue(addResponse.getStatus()); + // verify the mount entry is added successfully. + GetMountTableEntriesRequest request = + GetMountTableEntriesRequest.newInstance("/"); + stateStore.loadCache(MountTableStoreImpl.class, true); // load cache. + GetMountTableEntriesResponse response = + mountTable.getMountTableEntries(request); + assertEquals(3, response.getEntries().size()); + + // set the mount table to readonly. + MountTableProcedure.disableWrite(mount, routerConf); + + // test MountTableProcedure updates the mount point. + String dstNs = "ns1"; + MountTableProcedure smtp = + new MountTableProcedure("single-mount-table-procedure", null, + 1000, mount, dst, dstNs, routerConf); + assertTrue(smtp.execute()); + stateStore.loadCache(MountTableStoreImpl.class, true); // load cache. + // verify the mount entry is updated to / + MountTable entry = + MountTableProcedure.getMountEntry(mount, mountTable); + assertNotNull(entry); + assertEquals(1, entry.getDestinations().size()); + String nsId = entry.getDestinations().get(0).getNameserviceId(); + String dstPath = entry.getDestinations().get(0).getDest(); + assertEquals(dstNs, nsId); + assertEquals(dst, dstPath); + // Verify the mount table is not readonly. + URI address = routerContext.getFileSystemURI(); + DFSClient routerClient = new DFSClient(address, routerConf); + MountTableProcedure.enableWrite(mount, routerConf); + intercept(RemoteException.class, "No namenode available to invoke mkdirs", + "Expect no namenode exception.", () -> routerClient + .mkdirs(mount + "/file", new FsPermission(020), false)); + } + + @Test + public void testDisableAndEnableWrite() throws Exception { + // Firstly add mount entry: /test-write->{ns0,/test-write}. + String mount = "/test-write"; + MountTable newEntry = MountTable + .newInstance(mount, Collections.singletonMap("ns0", mount), + Time.now(), Time.now()); + MountTableManager mountTable = + routerContext.getAdminClient().getMountTableManager(); + AddMountTableEntryRequest addRequest = + AddMountTableEntryRequest.newInstance(newEntry); + AddMountTableEntryResponse addResponse = + mountTable.addMountTableEntry(addRequest); + assertTrue(addResponse.getStatus()); + stateStore.loadCache(MountTableStoreImpl.class, true); // load cache. + + // Construct client. + URI address = routerContext.getFileSystemURI(); + DFSClient routerClient = new DFSClient(address, routerConf); + // Verify the mount point is not readonly. + intercept(RemoteException.class, "No namenode available to invoke mkdirs", + "Expect no namenode exception.", () -> routerClient + .mkdirs(mount + "/file", new FsPermission(020), false)); + + // Verify disable write. + MountTableProcedure.disableWrite(mount, routerConf); + intercept(RemoteException.class, "is in a read only mount point", + "Expect readonly exception.", () -> routerClient + .mkdirs(mount + "/dir", new FsPermission(020), false)); + + // Verify enable write. + MountTableProcedure.enableWrite(mount, routerConf); + intercept(RemoteException.class, "No namenode available to invoke mkdirs", + "Expect no namenode exception.", () -> routerClient + .mkdirs(mount + "/file", new FsPermission(020), false)); + } + + @Test + public void testSeDeserialize() throws Exception { + String fedPath = "/test-path"; + String dst = "/test-dst"; + String dstNs = "ns1"; + MountTableProcedure smtp = + new MountTableProcedure("single-mount-table-procedure", null, + 1000, fedPath, dst, dstNs, routerConf); + ByteArrayOutputStream bao = new ByteArrayOutputStream(); + DataOutput dataOut = new DataOutputStream(bao); + smtp.write(dataOut); + smtp = new MountTableProcedure(); + smtp.readFields( + new DataInputStream(new ByteArrayInputStream(bao.toByteArray()))); + assertEquals(fedPath, smtp.getMount()); + assertEquals(dst, smtp.getDstPath()); + assertEquals(dstNs, smtp.getDstNs()); + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestTrashProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestTrashProcedure.java new file mode 100644 index 0000000000000..a128932d52362 --- /dev/null +++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestTrashProcedure.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tools.fedbalance; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutput; +import java.io.DataInputStream; +import java.io.ByteArrayInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.TrashOption; +import static org.apache.hadoop.test.GenericTestUtils.getMethodName; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; + +/** + * Test TrashProcedure. + */ +public class TestTrashProcedure { + + private static Configuration conf; + private static MiniDFSCluster cluster; + private static String nnUri; + + @BeforeClass + public static void beforeClass() throws IOException { + conf = new Configuration(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + cluster.waitActive(); + nnUri = FileSystem.getDefaultUri(conf).toString(); + } + + @AfterClass + public static void afterClass() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testTrashProcedure() throws Exception { + Path src = new Path("/" + getMethodName() + "-src"); + Path dst = new Path("/" + getMethodName() + "-dst"); + FileSystem fs = cluster.getFileSystem(); + fs.mkdirs(src); + fs.mkdirs(new Path(src, "dir")); + assertTrue(fs.exists(src)); + + FedBalanceContext context = + new FedBalanceContext.Builder(src, dst, TestDistCpProcedure.MOUNT, conf) + .setMapNum(10).setBandwidthLimit(1).setTrash(TrashOption.TRASH) + .build(); + TrashProcedure trashProcedure = + new TrashProcedure("trash-procedure", null, 1000, context); + trashProcedure.moveToTrash(); + assertFalse(fs.exists(src)); + } + + @Test + public void testSeDeserialize() throws Exception { + Path src = new Path("/" + getMethodName() + "-src"); + Path dst = new Path("/" + getMethodName() + "-dst"); + FedBalanceContext context = + new FedBalanceContext.Builder(src, dst, TestDistCpProcedure.MOUNT, conf) + .setMapNum(10).setBandwidthLimit(1).setTrash(TrashOption.TRASH) + .build(); + TrashProcedure trashProcedure = + new TrashProcedure("trash-procedure", null, 1000, context); + ByteArrayOutputStream bao = new ByteArrayOutputStream(); + DataOutput dataOut = new DataOutputStream(bao); + trashProcedure.write(dataOut); + trashProcedure = new TrashProcedure(); + trashProcedure.readFields( + new DataInputStream(new ByteArrayInputStream(bao.toByteArray()))); + assertEquals(context, trashProcedure.getContext()); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/MultiPhaseProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/MultiPhaseProcedure.java similarity index 97% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/MultiPhaseProcedure.java rename to hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/MultiPhaseProcedure.java index 27cfebd3a3486..b9c9c1e1eeff8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/MultiPhaseProcedure.java +++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/MultiPhaseProcedure.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.procedure; +package org.apache.hadoop.tools.fedbalance.procedure; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/RecordProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/RecordProcedure.java similarity index 96% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/RecordProcedure.java rename to hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/RecordProcedure.java index 706d4a1bcec45..9754b0994cc81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/RecordProcedure.java +++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/RecordProcedure.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.procedure; +package org.apache.hadoop.tools.fedbalance.procedure; import java.util.ArrayList; import java.util.List; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/RetryProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/RetryProcedure.java similarity index 97% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/RetryProcedure.java rename to hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/RetryProcedure.java index 336873e6a85f2..faec834f9862a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/RetryProcedure.java +++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/RetryProcedure.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.procedure; +package org.apache.hadoop.tools.fedbalance.procedure; import java.io.DataInput; import java.io.DataOutput; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/TestBalanceProcedureScheduler.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/TestBalanceProcedureScheduler.java similarity index 98% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/TestBalanceProcedureScheduler.java rename to hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/TestBalanceProcedureScheduler.java index 39e000b644da6..7a2b449ce48c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/TestBalanceProcedureScheduler.java +++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/TestBalanceProcedureScheduler.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.procedure; +package org.apache.hadoop.tools.fedbalance.procedure; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -43,8 +43,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.SCHEDULER_JOURNAL_URI; -import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.WORK_THREAD_NUM; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.WORK_THREAD_NUM; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertEquals; @@ -70,6 +70,7 @@ public static void setup() throws IOException { CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "hdfs:///"); CONF.setBoolean(DFS_NAMENODE_ACLS_ENABLED_KEY, true); CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); + CONF.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); CONF.setInt(WORK_THREAD_NUM, 1); cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(3).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/UnrecoverableProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/UnrecoverableProcedure.java similarity index 96% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/UnrecoverableProcedure.java rename to hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/UnrecoverableProcedure.java index 941d0a0ae7e41..804f1aa548be5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/UnrecoverableProcedure.java +++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/UnrecoverableProcedure.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.procedure; +package org.apache.hadoop.tools.fedbalance.procedure; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/WaitProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/WaitProcedure.java similarity index 97% rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/WaitProcedure.java rename to hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/WaitProcedure.java index 8666caf2f60e5..af46b17afb89b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/WaitProcedure.java +++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/WaitProcedure.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.procedure; +package org.apache.hadoop.tools.fedbalance.procedure; import org.apache.hadoop.util.Time; diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml index f923bb7c36e3f..cc811fca6949d 100644 --- a/hadoop-tools/hadoop-tools-dist/pom.xml +++ b/hadoop-tools/hadoop-tools-dist/pom.xml @@ -44,6 +44,11 @@ hadoop-distcp compile + + org.apache.hadoop + hadoop-federation-balance + compile + org.apache.hadoop hadoop-archives diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index eb0a31a36734e..f026bc261e00b 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -32,6 +32,7 @@ hadoop-streaming hadoop-distcp + hadoop-federation-balance hadoop-dynamometer hadoop-archives hadoop-archive-logs From 123777823edc98553fcef61f1913ab6e4cd5aa9a Mon Sep 17 00:00:00 2001 From: S O'Donnell Date: Thu, 18 Jun 2020 12:26:22 +0100 Subject: [PATCH 035/131] HDFS-15406. Improve the speed of Datanode Block Scan. Contributed by hemanthboyina --- .../hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java | 4 +++- .../apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index aa0cc5685fd81..5843c7d6696b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -133,6 +133,7 @@ public class FsVolumeImpl implements FsVolumeSpi { protected volatile long configuredCapacity; private final FileIoProvider fileIoProvider; private final DataNodeVolumeMetrics metrics; + private URI baseURI; /** * Per-volume worker pool that processes new blocks to cache. @@ -182,6 +183,7 @@ public class FsVolumeImpl implements FsVolumeSpi { File parent = currentDir.getParentFile(); cacheExecutor = initializeCacheExecutor(parent); this.metrics = DataNodeVolumeMetrics.create(conf, parent.getPath()); + this.baseURI = new File(currentDir.getParent()).toURI(); } else { cacheExecutor = null; this.metrics = null; @@ -506,7 +508,7 @@ BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException { @Override public URI getBaseURI() { - return new File(currentDir.getParent()).toURI(); + return baseURI; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 453e04f8f1ea6..5d636d5b8a468 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -239,7 +239,7 @@ public static FsVolumeImpl getVolume(DataNode dn, File basePath) throws try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset() .getFsVolumeReferences()) { for (FsVolumeSpi vol : volumes) { - if (vol.getBaseURI().equals(basePath.toURI())) { + if (new File(vol.getBaseURI()).equals(basePath)) { return (FsVolumeImpl) vol; } } From edf716a5c3ed7f51c994ec8bcc460445f9bb8ece Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Thu, 18 Jun 2020 06:43:35 -0700 Subject: [PATCH 036/131] Revert "HDFS-15372. Files in snapshots no longer see attribute provider permissions. Contributed by Stephen O'Donnell." This reverts commit 730a39d1388548f22f76132a6734d61c24c3eb72. --- .../hdfs/server/namenode/FSDirectory.java | 16 +-- .../server/namenode/FSPermissionChecker.java | 46 +++---- .../hdfs/server/namenode/INodesInPath.java | 42 ------- .../namenode/TestINodeAttributeProvider.java | 115 ------------------ 4 files changed, 20 insertions(+), 199 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index cd9eb0944f566..5895c6b08ec23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -73,6 +73,7 @@ import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; +import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -2031,20 +2032,7 @@ INodeAttributes getAttributes(INodesInPath iip) // first empty component for the root. however file status // related calls are expected to strip out the root component according // to TestINodeAttributeProvider. - // Due to HDFS-15372 the attribute provider should received the resolved - // snapshot path. Ie, rather than seeing /d/.snapshot/sn/data it should - // see /d/data. However, for the path /d/.snapshot/sn it should see this - // full path. Node.getPathComponents always resolves the path to the - // original location, so we need to check if ".snapshot/sn" is the last - // path to ensure the provider receives the correct components. - byte[][] components; - if (iip.isSnapshot() && !iip.isDotSnapshotDirPrefix()) { - // For snapshot paths, node.getPathComponents unless the last component - // is like ".snapshot/sn" - components = node.getPathComponents(); - } else { - components = iip.getPathComponents(); - } + byte[][] components = iip.getPathComponents(); components = Arrays.copyOfRange(components, 1, components.length); nodeAttrs = ap.getAttributes(components, nodeAttrs); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 615b164c19814..c697ead7000d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -19,14 +19,11 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Stack; import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.ipc.CallerContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -210,7 +207,7 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, final INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.length]; final byte[][] components = inodesInPath.getPathComponents(); for (int i = 0; i < inodes.length && inodes[i] != null; i++) { - inodeAttrs[i] = getINodeAttrs(inodes[i], snapshotId); + inodeAttrs[i] = getINodeAttrs(components, i, inodes[i], snapshotId); } String path = inodesInPath.getPath(); @@ -260,7 +257,8 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, void checkPermission(INode inode, int snapshotId, FsAction access) throws AccessControlException { byte[][] pathComponents = inode.getPathComponents(); - INodeAttributes nodeAttributes = getINodeAttrs(inode, snapshotId); + INodeAttributes nodeAttributes = getINodeAttrs(pathComponents, + pathComponents.length - 1, inode, snapshotId); try { INodeAttributes[] iNodeAttr = {nodeAttributes}; AccessControlEnforcer enforcer = getAccessControlEnforcer(); @@ -369,31 +367,23 @@ public void checkPermissionWithContext( authzContext.getSubAccess(), authzContext.isIgnoreEmptyDir()); } - private INodeAttributes getINodeAttrs(INode inode, int snapshotId) { + private INodeAttributes getINodeAttrs(byte[][] pathByNameArr, int pathIdx, + INode inode, int snapshotId) { INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId); - /** - * This logic is similar to {@link FSDirectory#getAttributes()} and it - * ensures that the attribute provider sees snapshot paths resolved to their - * original location. This means the attributeProvider can apply permissions - * to the snapshot paths in the same was as the live paths. See HDFS-15372. - */ if (getAttributesProvider() != null) { + String[] elements = new String[pathIdx + 1]; /** - * If we have an inode representing a path like /d/.snapshot/snap1 - * then calling inode.getPathComponents returns [null, d, snap1]. If we - * call inode.getFullPathName() it will return /d/.snapshot/snap1. For - * this special path (snapshot root) the attribute provider should see: - * - * [null, d, .snapshot/snap1] - * - * Using IIP.resolveFromRoot, it will take the inode fullPathName and - * construct an IIP object that give the correct components as above. + * {@link INode#getPathComponents(String)} returns a null component + * for the root only path "/". Assign an empty string if so. */ - INodesInPath iip = INodesInPath.resolveFromRoot(inode); - byte[][] components = iip.getPathComponents(); - components = Arrays.copyOfRange(components, 1, components.length); - inodeAttrs = getAttributesProvider() - .getAttributes(components, inodeAttrs); + if (pathByNameArr.length == 1 && pathByNameArr[0] == null) { + elements[0] = ""; + } else { + for (int i = 0; i < elements.length; i++) { + elements[i] = DFSUtil.bytes2String(pathByNameArr[i]); + } + } + inodeAttrs = getAttributesProvider().getAttributes(elements, inodeAttrs); } return inodeAttrs; } @@ -449,7 +439,7 @@ private void checkSubAccess(byte[][] components, int pathIdx, if (!(cList.isEmpty() && ignoreEmptyDir)) { //TODO have to figure this out with inodeattribute provider INodeAttributes inodeAttr = - getINodeAttrs(d, snapshotId); + getINodeAttrs(components, pathIdx, d, snapshotId); if (!hasPermission(inodeAttr, access)) { throw new AccessControlException( toAccessControlString(inodeAttr, d.getFullPathName(), access)); @@ -467,7 +457,7 @@ private void checkSubAccess(byte[][] components, int pathIdx, if (inodeAttr.getFsPermission().getStickyBit()) { for (INode child : cList) { INodeAttributes childInodeAttr = - getINodeAttrs(child, snapshotId); + getINodeAttrs(components, pathIdx, child, snapshotId); if (isStickyBitViolated(inodeAttr, childInodeAttr)) { List allComponentList = new ArrayList<>(); for (int i = 0; i <= pathIdx; ++i) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index 179c1c01be4fc..f072220677733 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -46,20 +46,6 @@ private static boolean isDotSnapshotDir(byte[] pathComponent) { Arrays.equals(HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent); } - /** - * Returns true if the given path component starts with the same byte stream - * as {@link HdfsConstants#DOT_SNAPSHOT_DIR}, indicating the component - * starts with a DotSnapshot directory. - * @param pathComponent Bytes representing the pathComponent - * @return True is the component starts with - * {@link HdfsConstants#DOT_SNAPSHOT_DIR} and false otherwise. - */ - private static boolean isDotSnapshotDirPrefix(byte[] pathComponent) { - return pathComponent != null && - isDotSnapshotDir(Arrays.copyOf( - pathComponent, HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES.length)); - } - private static INode[] getINodes(final INode inode) { int depth = 0, index; INode tmp = inode; @@ -149,27 +135,6 @@ static INodesInPath resolve(final INodeDirectory startingDir, return resolve(startingDir, components, false); } - /** - * Retrieves the existing INodes from a path, starting at the root directory. - * The root directory is located by following the parent link in the inode - * recursively until the final root inode is found. - * The inodes returned will depend upon the output of inode.getFullPathName(). - * For a snapshot path, like /data/.snapshot/snap1, it will be resolved to: - * [null, data, .snapshot/snap1] - * For a file in the snapshot, as inode.getFullPathName resolves the snapshot - * information, the returned inodes for a path like /data/.snapshot/snap1/d1 - * would be: - * [null, data, d1] - * @param inode the {@link INode} to be resolved - * @return INodesInPath - */ - static INodesInPath resolveFromRoot(INode inode) { - INode[] inodes = getINodes(inode); - byte[][] paths = INode.getPathComponents(inode.getFullPathName()); - INodeDirectory rootDir = inodes[0].asDirectory(); - return resolve(rootDir, paths); - } - static INodesInPath resolve(final INodeDirectory startingDir, byte[][] components, final boolean isRaw) { Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0); @@ -497,13 +462,6 @@ boolean isDotSnapshotDir() { return isDotSnapshotDir(getLastLocalName()); } - /** - * @return Return true if .snapshot is the prefix of the last path component. - */ - boolean isDotSnapshotDirPrefix() { - return isDotSnapshotDirPrefix(getLastLocalName()); - } - /** * @return if this is a /.reserved/raw path. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java index e7e1f90b2c0f3..433be79b87a28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java @@ -33,7 +33,6 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; @@ -81,7 +80,6 @@ public void checkPermission(String fsOwner, String supergroup, ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); } CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + "|" + access); - CALLED.add("checkPermission|" + path); } @Override @@ -95,7 +93,6 @@ public void checkPermissionWithContext( CALLED.add("checkPermission|" + authzContext.getAncestorAccess() + "|" + authzContext.getParentAccess() + "|" + authzContext .getAccess()); - CALLED.add("checkPermission|" + authzContext.getPath()); } } @@ -112,12 +109,7 @@ public void stop() { @Override public INodeAttributes getAttributes(String[] pathElements, final INodeAttributes inode) { - String fullPath = String.join("/", pathElements); - if (!fullPath.startsWith("/")) { - fullPath = "/" + fullPath; - } CALLED.add("getAttributes"); - CALLED.add("getAttributes|"+fullPath); final boolean useDefault = useDefault(pathElements); final boolean useNullAcl = useNullAclFeature(pathElements); return new INodeAttributes() { @@ -493,111 +485,4 @@ public Void run() throws Exception { } }); } - - @Test - // HDFS-15372 - Attribute provider should not see the snapshot path as it - // should be resolved into the original path name before it hits the provider. - public void testAttrProviderSeesResolvedSnapahotPaths() throws Exception { - FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); - DistributedFileSystem hdfs = miniDFS.getFileSystem(); - final Path userPath = new Path("/user"); - final Path authz = new Path("/user/authz"); - final Path authzChild = new Path("/user/authz/child2"); - - fs.mkdirs(userPath); - fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION)); - fs.mkdirs(authz); - hdfs.allowSnapshot(userPath); - fs.setPermission(authz, new FsPermission(HDFS_PERMISSION)); - fs.mkdirs(authzChild); - fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION)); - fs.createSnapshot(userPath, "snapshot_1"); - UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1", - new String[]{"g1"}); - ugi.doAs(new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); - final Path snapChild = - new Path("/user/.snapshot/snapshot_1/authz/child2"); - // Run various methods on the path to access the attributes etc. - fs.getAclStatus(snapChild); - fs.getContentSummary(snapChild); - fs.getFileStatus(snapChild); - Assert.assertFalse(CALLED.contains("getAttributes|" + - snapChild.toString())); - Assert.assertTrue(CALLED.contains("getAttributes|/user/authz/child2")); - // The snapshot path should be seen by the permission checker, but when - // it checks access, the paths will be resolved so the attributeProvider - // only sees the resolved path. - Assert.assertTrue( - CALLED.contains("checkPermission|" + snapChild.toString())); - CALLED.clear(); - fs.getAclStatus(new Path("/")); - Assert.assertTrue(CALLED.contains("checkPermission|/")); - Assert.assertTrue(CALLED.contains("getAttributes|/")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user")); - Assert.assertTrue(CALLED.contains("checkPermission|/user")); - Assert.assertTrue(CALLED.contains("getAttributes|/user")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot")); - Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot")); - // attribute provider never sees the .snapshot path directly. - Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot/snapshot_1")); - Assert.assertTrue( - CALLED.contains("checkPermission|/user/.snapshot/snapshot_1")); - Assert.assertTrue( - CALLED.contains("getAttributes|/user/.snapshot/snapshot_1")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz")); - Assert.assertTrue(CALLED - .contains("checkPermission|/user/.snapshot/snapshot_1/authz")); - Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/authz")); - Assert.assertTrue(CALLED.contains("checkPermission|/user/authz")); - Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); - return null; - } - }); - // Delete the files / folders covered by the snapshot, then re-check they - // are all readable correctly. - fs.delete(authz, true); - ugi.doAs(new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot")); - Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot")); - // attribute provider never sees the .snapshot path directly. - Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot/snapshot_1")); - Assert.assertTrue( - CALLED.contains("checkPermission|/user/.snapshot/snapshot_1")); - Assert.assertTrue( - CALLED.contains("getAttributes|/user/.snapshot/snapshot_1")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz")); - Assert.assertTrue(CALLED - .contains("checkPermission|/user/.snapshot/snapshot_1/authz")); - Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); - - return null; - } - }); - - } } From d50e93ce7b6aba235ecc0143fe2c7a0150a3ceae Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Thu, 18 Jun 2020 06:44:20 -0700 Subject: [PATCH 037/131] HDFS-15372. Files in snapshots no longer see attribute provider permissions. Contributed by Stephen O'Donnell. Signed-off-by: Wei-Chiu Chuang --- .../hdfs/server/namenode/FSDirectory.java | 19 ++- .../server/namenode/FSPermissionChecker.java | 44 ++++--- .../hdfs/server/namenode/INodesInPath.java | 21 ++++ .../namenode/TestINodeAttributeProvider.java | 115 ++++++++++++++++++ 4 files changed, 179 insertions(+), 20 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 5895c6b08ec23..527ca241bc56e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -73,7 +73,6 @@ import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; -import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -2032,7 +2031,23 @@ INodeAttributes getAttributes(INodesInPath iip) // first empty component for the root. however file status // related calls are expected to strip out the root component according // to TestINodeAttributeProvider. - byte[][] components = iip.getPathComponents(); + // Due to HDFS-15372 the attribute provider should received the resolved + // snapshot path. Ie, rather than seeing /d/.snapshot/sn/data it should + // see /d/data. However, for the path /d/.snapshot/sn it should see this + // full path. If the current inode is the snapshot name, it always has the + // same ID as its parent inode, so we can use that to check if it is the + // path which needs handled specially. + byte[][] components; + INodeDirectory parent = node.getParent(); + if (iip.isSnapshot() + && parent != null && parent.getId() != node.getId()) { + // For snapshot paths, we always user node.getPathComponents so the + // snapshot path is resolved to the real path, unless the last component + // is the snapshot name root directory. + components = node.getPathComponents(); + } else { + components = iip.getPathComponents(); + } components = Arrays.copyOfRange(components, 1, components.length); nodeAttrs = ap.getAttributes(components, nodeAttrs); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index c697ead7000d2..d60098273d738 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Stack; @@ -207,7 +208,7 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, final INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.length]; final byte[][] components = inodesInPath.getPathComponents(); for (int i = 0; i < inodes.length && inodes[i] != null; i++) { - inodeAttrs[i] = getINodeAttrs(components, i, inodes[i], snapshotId); + inodeAttrs[i] = getINodeAttrs(inodes[i], snapshotId); } String path = inodesInPath.getPath(); @@ -257,8 +258,7 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, void checkPermission(INode inode, int snapshotId, FsAction access) throws AccessControlException { byte[][] pathComponents = inode.getPathComponents(); - INodeAttributes nodeAttributes = getINodeAttrs(pathComponents, - pathComponents.length - 1, inode, snapshotId); + INodeAttributes nodeAttributes = getINodeAttrs(inode, snapshotId); try { INodeAttributes[] iNodeAttr = {nodeAttributes}; AccessControlEnforcer enforcer = getAccessControlEnforcer(); @@ -367,23 +367,31 @@ public void checkPermissionWithContext( authzContext.getSubAccess(), authzContext.isIgnoreEmptyDir()); } - private INodeAttributes getINodeAttrs(byte[][] pathByNameArr, int pathIdx, - INode inode, int snapshotId) { + private INodeAttributes getINodeAttrs(INode inode, int snapshotId) { INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId); + /** + * This logic is similar to {@link FSDirectory#getAttributes()} and it + * ensures that the attribute provider sees snapshot paths resolved to their + * original location. This means the attributeProvider can apply permissions + * to the snapshot paths in the same was as the live paths. See HDFS-15372. + */ if (getAttributesProvider() != null) { - String[] elements = new String[pathIdx + 1]; /** - * {@link INode#getPathComponents(String)} returns a null component - * for the root only path "/". Assign an empty string if so. + * If we have an inode representing a path like /d/.snapshot/snap1 + * then calling inode.getPathComponents returns [null, d, snap1]. If we + * call inode.getFullPathName() it will return /d/.snapshot/snap1. For + * this special path (snapshot root) the attribute provider should see: + * + * [null, d, .snapshot/snap1] + * + * Using IIP.resolveFromRoot, it will take the inode fullPathName and + * construct an IIP object that give the correct components as above. */ - if (pathByNameArr.length == 1 && pathByNameArr[0] == null) { - elements[0] = ""; - } else { - for (int i = 0; i < elements.length; i++) { - elements[i] = DFSUtil.bytes2String(pathByNameArr[i]); - } - } - inodeAttrs = getAttributesProvider().getAttributes(elements, inodeAttrs); + INodesInPath iip = INodesInPath.resolveFromRoot(inode); + byte[][] components = iip.getPathComponents(); + components = Arrays.copyOfRange(components, 1, components.length); + inodeAttrs = getAttributesProvider() + .getAttributes(components, inodeAttrs); } return inodeAttrs; } @@ -439,7 +447,7 @@ private void checkSubAccess(byte[][] components, int pathIdx, if (!(cList.isEmpty() && ignoreEmptyDir)) { //TODO have to figure this out with inodeattribute provider INodeAttributes inodeAttr = - getINodeAttrs(components, pathIdx, d, snapshotId); + getINodeAttrs(d, snapshotId); if (!hasPermission(inodeAttr, access)) { throw new AccessControlException( toAccessControlString(inodeAttr, d.getFullPathName(), access)); @@ -457,7 +465,7 @@ private void checkSubAccess(byte[][] components, int pathIdx, if (inodeAttr.getFsPermission().getStickyBit()) { for (INode child : cList) { INodeAttributes childInodeAttr = - getINodeAttrs(components, pathIdx, child, snapshotId); + getINodeAttrs(child, snapshotId); if (isStickyBitViolated(inodeAttr, childInodeAttr)) { List allComponentList = new ArrayList<>(); for (int i = 0; i <= pathIdx; ++i) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index f072220677733..cc5eaa0ebe838 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -135,6 +135,27 @@ static INodesInPath resolve(final INodeDirectory startingDir, return resolve(startingDir, components, false); } + /** + * Retrieves the existing INodes from a path, starting at the root directory. + * The root directory is located by following the parent link in the inode + * recursively until the final root inode is found. + * The inodes returned will depend upon the output of inode.getFullPathName(). + * For a snapshot path, like /data/.snapshot/snap1, it will be resolved to: + * [null, data, .snapshot/snap1] + * For a file in the snapshot, as inode.getFullPathName resolves the snapshot + * information, the returned inodes for a path like /data/.snapshot/snap1/d1 + * would be: + * [null, data, d1] + * @param inode the {@link INode} to be resolved + * @return INodesInPath + */ + static INodesInPath resolveFromRoot(INode inode) { + INode[] inodes = getINodes(inode); + byte[][] paths = INode.getPathComponents(inode.getFullPathName()); + INodeDirectory rootDir = inodes[0].asDirectory(); + return resolve(rootDir, paths); + } + static INodesInPath resolve(final INodeDirectory startingDir, byte[][] components, final boolean isRaw) { Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java index 433be79b87a28..e7e1f90b2c0f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; @@ -80,6 +81,7 @@ public void checkPermission(String fsOwner, String supergroup, ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); } CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + "|" + access); + CALLED.add("checkPermission|" + path); } @Override @@ -93,6 +95,7 @@ public void checkPermissionWithContext( CALLED.add("checkPermission|" + authzContext.getAncestorAccess() + "|" + authzContext.getParentAccess() + "|" + authzContext .getAccess()); + CALLED.add("checkPermission|" + authzContext.getPath()); } } @@ -109,7 +112,12 @@ public void stop() { @Override public INodeAttributes getAttributes(String[] pathElements, final INodeAttributes inode) { + String fullPath = String.join("/", pathElements); + if (!fullPath.startsWith("/")) { + fullPath = "/" + fullPath; + } CALLED.add("getAttributes"); + CALLED.add("getAttributes|"+fullPath); final boolean useDefault = useDefault(pathElements); final boolean useNullAcl = useNullAclFeature(pathElements); return new INodeAttributes() { @@ -485,4 +493,111 @@ public Void run() throws Exception { } }); } + + @Test + // HDFS-15372 - Attribute provider should not see the snapshot path as it + // should be resolved into the original path name before it hits the provider. + public void testAttrProviderSeesResolvedSnapahotPaths() throws Exception { + FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); + DistributedFileSystem hdfs = miniDFS.getFileSystem(); + final Path userPath = new Path("/user"); + final Path authz = new Path("/user/authz"); + final Path authzChild = new Path("/user/authz/child2"); + + fs.mkdirs(userPath); + fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION)); + fs.mkdirs(authz); + hdfs.allowSnapshot(userPath); + fs.setPermission(authz, new FsPermission(HDFS_PERMISSION)); + fs.mkdirs(authzChild); + fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION)); + fs.createSnapshot(userPath, "snapshot_1"); + UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1", + new String[]{"g1"}); + ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); + final Path snapChild = + new Path("/user/.snapshot/snapshot_1/authz/child2"); + // Run various methods on the path to access the attributes etc. + fs.getAclStatus(snapChild); + fs.getContentSummary(snapChild); + fs.getFileStatus(snapChild); + Assert.assertFalse(CALLED.contains("getAttributes|" + + snapChild.toString())); + Assert.assertTrue(CALLED.contains("getAttributes|/user/authz/child2")); + // The snapshot path should be seen by the permission checker, but when + // it checks access, the paths will be resolved so the attributeProvider + // only sees the resolved path. + Assert.assertTrue( + CALLED.contains("checkPermission|" + snapChild.toString())); + CALLED.clear(); + fs.getAclStatus(new Path("/")); + Assert.assertTrue(CALLED.contains("checkPermission|/")); + Assert.assertTrue(CALLED.contains("getAttributes|/")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user")); + Assert.assertTrue(CALLED.contains("checkPermission|/user")); + Assert.assertTrue(CALLED.contains("getAttributes|/user")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot")); + Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot")); + // attribute provider never sees the .snapshot path directly. + Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot/snapshot_1")); + Assert.assertTrue( + CALLED.contains("checkPermission|/user/.snapshot/snapshot_1")); + Assert.assertTrue( + CALLED.contains("getAttributes|/user/.snapshot/snapshot_1")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz")); + Assert.assertTrue(CALLED + .contains("checkPermission|/user/.snapshot/snapshot_1/authz")); + Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/authz")); + Assert.assertTrue(CALLED.contains("checkPermission|/user/authz")); + Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); + return null; + } + }); + // Delete the files / folders covered by the snapshot, then re-check they + // are all readable correctly. + fs.delete(authz, true); + ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot")); + Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot")); + // attribute provider never sees the .snapshot path directly. + Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot/snapshot_1")); + Assert.assertTrue( + CALLED.contains("checkPermission|/user/.snapshot/snapshot_1")); + Assert.assertTrue( + CALLED.contains("getAttributes|/user/.snapshot/snapshot_1")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz")); + Assert.assertTrue(CALLED + .contains("checkPermission|/user/.snapshot/snapshot_1/authz")); + Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); + + return null; + } + }); + + } } From 9821b94c946b5102f34e39f58493d31a0bb93547 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Fri, 19 Jun 2020 13:29:44 +0900 Subject: [PATCH 038/131] HADOOP-16888. [JDK11] Support JDK11 in the precommit job (#2012) * Install JDK 11 in the docker image * Refactor Dockerfile * Add compile test with JDK11 via Yetus multijdk feature --- Jenkinsfile | 5 ++++ dev-support/docker/Dockerfile | 53 +++++++++-------------------------- 2 files changed, 18 insertions(+), 40 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 02b9a0eabdff3..1d4c6fe4c3c97 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -154,6 +154,11 @@ pipeline { # use emoji vote so it is easier to find the broken line YETUS_ARGS+=("--github-use-emoji-vote") + # test with Java 8 and 11 + YETUS_ARGS+=("--java-home=/usr/lib/jvm/java-8-openjdk-amd64") + YETUS_ARGS+=("--multijdkdirs=/usr/lib/jvm/java-11-openjdk-amd64") + YETUS_ARGS+=("--multijdktests=compile") + "${TESTPATCHBIN}" "${YETUS_ARGS[@]}" ''' } diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 5bd867f2f56c1..818d394bf921f 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -33,16 +33,10 @@ RUN echo APT::Install-Suggests "0"\; >> /etc/apt/apt.conf.d/10disableextras ENV DEBIAN_FRONTEND noninteractive ENV DEBCONF_TERSE true -###### -# Install common dependencies from packages. Versions here are either -# sufficient or irrelevant. -# -# WARNING: DO NOT PUT JAVA APPS HERE! Otherwise they will install default -# Ubuntu Java. See Java section below! -###### # hadolint ignore=DL3008 RUN apt-get -q update \ && apt-get -q install -y --no-install-recommends \ + ant \ apt-utils \ bats \ build-essential \ @@ -51,11 +45,13 @@ RUN apt-get -q update \ cmake \ curl \ doxygen \ + findbugs \ fuse \ g++ \ gcc \ git \ gnupg-agent \ + libbcprov-java \ libbz2-dev \ libcurl4-openssl-dev \ libfuse-dev \ @@ -64,11 +60,13 @@ RUN apt-get -q update \ libsasl2-dev \ libsnappy-dev \ libssl-dev \ - libsnappy-dev \ libtool \ libzstd1-dev \ locales \ make \ + maven \ + openjdk-11-jdk \ + openjdk-8-jdk \ pinentry-curses \ pkg-config \ python \ @@ -86,15 +84,13 @@ RUN apt-get -q update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* - -####### -# OpenJDK 8 -####### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends openjdk-8-jdk libbcprov-java \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* +###### +# Set env vars required to build Hadoop +###### +ENV MAVEN_HOME /usr +# JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) +ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 +ENV FINDBUGS_HOME /usr ###### # Install Google Protobuf 3.7.1 (3.0.0 ships with Bionic) @@ -113,29 +109,6 @@ RUN mkdir -p /opt/protobuf-src \ ENV PROTOBUF_HOME /opt/protobuf ENV PATH "${PATH}:/opt/protobuf/bin" -###### -# Install Apache Maven 3.6.0 (3.6.0 ships with Bionic) -###### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends maven \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -ENV MAVEN_HOME /usr -# JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) -ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 - -###### -# Install findbugs 3.1.0 (3.1.0 ships with Bionic) -# Ant is needed for findbugs -###### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends findbugs ant \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -ENV FINDBUGS_HOME /usr - #### # Install pylint at fixed version (2.0.0 removed python2 support) # https://github.com/PyCQA/pylint/issues/2294 From 469841446f921f3da5bbd96cf83b3a808dde8084 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 19 Jun 2020 14:50:24 +0200 Subject: [PATCH 039/131] YARN-9930. Support max running app logic for CapacityScheduler. Contributed by Peter Bacsko --- .../scheduler/capacity/AbstractCSQueue.java | 16 + .../capacity/CSMaxRunningAppsEnforcer.java | 436 ++++++++++++++++++ .../scheduler/capacity/CapacityScheduler.java | 19 +- .../CapacitySchedulerConfiguration.java | 35 +- .../scheduler/capacity/LeafQueue.java | 81 +++- .../scheduler/capacity/ParentQueue.java | 30 ++ .../common/fica/FiCaSchedulerApp.java | 21 + .../reservation/TestReservationSystem.java | 4 + .../capacity/TestApplicationLimits.java | 1 + .../TestCSMaxRunningAppsEnforcer.java | 278 +++++++++++ .../TestCapacitySchedulerMaxParallelApps.java | 312 +++++++++++++ .../scheduler/capacity/TestLeafQueue.java | 2 + .../scheduler/capacity/TestQueueState.java | 1 + .../capacity/TestQueueStateManager.java | 1 + 14 files changed, 1229 insertions(+), 8 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSMaxRunningAppsEnforcer.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSMaxRunningAppsEnforcer.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMaxParallelApps.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 0a4a14f063e44..968d971ce1ffb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -146,6 +146,7 @@ protected enum CapacityConfigType { volatile Priority priority = Priority.newInstance(0); private Map userWeights = new HashMap(); + private int maxParallelApps; public AbstractCSQueue(CapacitySchedulerContext cs, String queueName, CSQueue parent, CSQueue old) throws IOException { @@ -390,6 +391,11 @@ protected void setupQueueConfigs(Resource clusterResource, // and queue setting setupMaximumAllocation(configuration); + // Max parallel apps + int queueMaxParallelApps = + configuration.getMaxParallelAppsForQueue(getQueuePath()); + setMaxParallelApps(queueMaxParallelApps); + // initialized the queue state based on previous state, configured state // and its parent state. QueueState previous = getState(); @@ -1431,4 +1437,14 @@ public long getDefaultApplicationLifetime() { public boolean getDefaultAppLifetimeWasSpecifiedInConfig() { return defaultAppLifetimeWasSpecifiedInConfig; } + + public void setMaxParallelApps(int maxParallelApps) { + this.maxParallelApps = maxParallelApps; + } + + public int getMaxParallelApps() { + return maxParallelApps; + } + + abstract int getNumRunnableApps(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSMaxRunningAppsEnforcer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSMaxRunningAppsEnforcer.java new file mode 100644 index 0000000000000..d1a62b4094135 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSMaxRunningAppsEnforcer.java @@ -0,0 +1,436 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.PriorityQueue; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.ListMultimap; + +/** + * Handles tracking and enforcement for user and queue maxRunningApps + * constraints. + */ +public class CSMaxRunningAppsEnforcer { + private static final Logger LOG = LoggerFactory.getLogger( + CSMaxRunningAppsEnforcer.class); + + private final CapacityScheduler scheduler; + + // Tracks the number of running applications by user. + private final Map usersNumRunnableApps; + + private final ListMultimap usersNonRunnableApps; + + public CSMaxRunningAppsEnforcer(CapacityScheduler scheduler) { + this.scheduler = scheduler; + this.usersNumRunnableApps = new HashMap(); + this.usersNonRunnableApps = ArrayListMultimap.create(); + } + + /** + * Checks whether making the application runnable would exceed any + * maxRunningApps limits. Also sets the "runnable" flag on the + * attempt. + * + * @param attempt the app attempt being checked + * @return true if the application is runnable; false otherwise + */ + public boolean checkRunnabilityWithUpdate( + FiCaSchedulerApp attempt) { + boolean attemptCanRun = !exceedUserMaxParallelApps(attempt.getUser()) + && !exceedQueueMaxParallelApps(attempt.getCSLeafQueue()); + + attempt.setRunnable(attemptCanRun); + + return attemptCanRun; + } + + /** + * Checks whether the number of user runnable apps exceeds the limitation. + * + * @param user the user name + * @return true if the number hits the limit; false otherwise + */ + private boolean exceedUserMaxParallelApps(String user) { + Integer userNumRunnable = usersNumRunnableApps.get(user); + if (userNumRunnable == null) { + userNumRunnable = 0; + } + if (userNumRunnable >= getUserMaxParallelApps(user)) { + LOG.info("Maximum runnable apps exceeded for user {}", user); + return true; + } + + return false; + } + + /** + * Recursively checks whether the number of queue runnable apps exceeds the + * limitation. + * + * @param queue the current queue + * @return true if the number hits the limit; false otherwise + */ + private boolean exceedQueueMaxParallelApps(AbstractCSQueue queue) { + // Check queue and all parent queues + while (queue != null) { + if (queue.getNumRunnableApps() >= queue.getMaxParallelApps()) { + LOG.info("Maximum runnable apps exceeded for queue {}", + queue.getQueuePath()); + return true; + } + queue = (AbstractCSQueue) queue.getParent(); + } + + return false; + } + + public void trackApp(FiCaSchedulerApp app) { + if (app.isRunnable()) { + trackRunnableApp(app); + } else { + trackNonRunnableApp(app); + } + } + /** + * Tracks the given new runnable app for purposes of maintaining max running + * app limits. + */ + private void trackRunnableApp(FiCaSchedulerApp app) { + String user = app.getUser(); + AbstractCSQueue queue = (AbstractCSQueue) app.getQueue(); + // Increment running counts for all parent queues + ParentQueue parent = (ParentQueue) queue.getParent(); + while (parent != null) { + parent.incrementRunnableApps(); + parent = (ParentQueue) parent.getParent(); + } + + Integer userNumRunnable = usersNumRunnableApps.get(user); + usersNumRunnableApps.put(user, (userNumRunnable == null ? 0 + : userNumRunnable) + 1); + } + + /** + * Tracks the given new non runnable app so that it can be made runnable when + * it would not violate max running app limits. + */ + private void trackNonRunnableApp(FiCaSchedulerApp app) { + String user = app.getUser(); + usersNonRunnableApps.put(user, app); + } + + /** + * This is called after reloading the allocation configuration when the + * scheduler is reinitialized + * + * Checks to see whether any non-runnable applications become runnable + * now that the max running apps of given queue has been changed + * + * Runs in O(n) where n is the number of apps that are non-runnable and in + * the queues that went from having no slack to having slack. + */ + + public void updateRunnabilityOnReload() { + ParentQueue rootQueue = (ParentQueue) scheduler.getRootQueue(); + List> appsNowMaybeRunnable = + new ArrayList>(); + + gatherPossiblyRunnableAppLists(rootQueue, appsNowMaybeRunnable); + + updateAppsRunnability(appsNowMaybeRunnable, Integer.MAX_VALUE); + } + + /** + * Checks to see whether any other applications runnable now that the given + * application has been removed from the given queue. And makes them so. + * + * Runs in O(n log(n)) where n is the number of queues that are under the + * highest queue that went from having no slack to having slack. + */ + public void updateRunnabilityOnAppRemoval(FiCaSchedulerApp app) { + // childqueueX might have no pending apps itself, but if a queue higher up + // in the hierarchy parentqueueY has a maxRunningApps set, an app completion + // in childqueueX could allow an app in some other distant child of + // parentqueueY to become runnable. + // An app removal will only possibly allow another app to become runnable if + // the queue was already at its max before the removal. + // Thus we find the ancestor queue highest in the tree for which the app + // that was at its maxRunningApps before the removal. + LeafQueue queue = app.getCSLeafQueue(); + AbstractCSQueue highestQueueWithAppsNowRunnable = + (queue.getNumRunnableApps() == queue.getMaxParallelApps() - 1) + ? queue : null; + + ParentQueue parent = (ParentQueue) queue.getParent(); + while (parent != null) { + if (parent.getNumRunnableApps() == parent.getMaxParallelApps() - 1) { + highestQueueWithAppsNowRunnable = parent; + } + parent = (ParentQueue) parent.getParent(); + } + + List> appsNowMaybeRunnable = + new ArrayList>(); + + // Compile lists of apps which may now be runnable + // We gather lists instead of building a set of all non-runnable apps so + // that this whole operation can be O(number of queues) instead of + // O(number of apps) + if (highestQueueWithAppsNowRunnable != null) { + gatherPossiblyRunnableAppLists(highestQueueWithAppsNowRunnable, + appsNowMaybeRunnable); + } + String user = app.getUser(); + Integer userNumRunning = usersNumRunnableApps.get(user); + if (userNumRunning == null) { + userNumRunning = 0; + } + if (userNumRunning == getUserMaxParallelApps(user) - 1) { + List userWaitingApps = usersNonRunnableApps.get(user); + if (userWaitingApps != null) { + appsNowMaybeRunnable.add(userWaitingApps); + } + } + + updateAppsRunnability(appsNowMaybeRunnable, + appsNowMaybeRunnable.size()); + } + + /** + * Checks to see whether applications are runnable now by iterating + * through each one of them and check if the queue and user have slack. + * + * if we know how many apps can be runnable, there is no need to iterate + * through all apps, maxRunnableApps is used to break out of the iteration. + */ + private void updateAppsRunnability(List> + appsNowMaybeRunnable, int maxRunnableApps) { + // Scan through and check whether this means that any apps are now runnable + Iterator iter = new MultiListStartTimeIterator( + appsNowMaybeRunnable); + FiCaSchedulerApp prev = null; + List noLongerPendingApps = new ArrayList<>(); + while (iter.hasNext()) { + FiCaSchedulerApp next = iter.next(); + if (next == prev) { + continue; + } + + if (checkRunnabilityWithUpdate(next)) { + LeafQueue nextQueue = next.getCSLeafQueue(); + LOG.info("{} is now runnable in {}", + next.getApplicationAttemptId(), nextQueue); + trackRunnableApp(next); + FiCaSchedulerApp appSched = next; + nextQueue.submitApplicationAttempt(next, next.getUser()); + noLongerPendingApps.add(appSched); + + if (noLongerPendingApps.size() >= maxRunnableApps) { + break; + } + } + + prev = next; + } + + // We remove the apps from their pending lists afterwards so that we don't + // pull them out from under the iterator. If they are not in these lists + // in the first place, there is a bug. + for (FiCaSchedulerApp appSched : noLongerPendingApps) { + if (!(appSched.getCSLeafQueue().removeNonRunnableApp(appSched))) { + LOG.error("Can't make app runnable that does not already exist in queue" + + " as non-runnable: {}. This should never happen.", + appSched.getApplicationAttemptId()); + } + + if (!usersNonRunnableApps.remove(appSched.getUser(), appSched)) { + LOG.error("Waiting app {} expected to be in " + + "usersNonRunnableApps, but was not. This should never happen.", + appSched.getApplicationAttemptId()); + } + } + } + + public void untrackApp(FiCaSchedulerApp app) { + if (app.isRunnable()) { + untrackRunnableApp(app); + } else { + untrackNonRunnableApp(app); + } + } + + /** + * Updates the relevant tracking variables after a runnable app with the given + * queue and user has been removed. + */ + private void untrackRunnableApp(FiCaSchedulerApp app) { + // Update usersRunnableApps + String user = app.getUser(); + int newUserNumRunning = usersNumRunnableApps.get(user) - 1; + if (newUserNumRunning == 0) { + usersNumRunnableApps.remove(user); + } else { + usersNumRunnableApps.put(user, newUserNumRunning); + } + + // Update runnable app bookkeeping for queues + AbstractCSQueue queue = (AbstractCSQueue) app.getQueue(); + ParentQueue parent = (ParentQueue) queue.getParent(); + while (parent != null) { + parent.decrementRunnableApps(); + parent = (ParentQueue) parent.getParent(); + } + } + + /** + * Stops tracking the given non-runnable app. + */ + private void untrackNonRunnableApp(FiCaSchedulerApp app) { + usersNonRunnableApps.remove(app.getUser(), app); + } + + /** + * Traverses the queue hierarchy under the given queue to gather all lists + * of non-runnable applications. + */ + private void gatherPossiblyRunnableAppLists(AbstractCSQueue queue, + List> appLists) { + if (queue.getNumRunnableApps() < queue.getMaxParallelApps()) { + if (queue instanceof LeafQueue) { + appLists.add( + ((LeafQueue)queue).getCopyOfNonRunnableAppSchedulables()); + } else { + for (CSQueue child : queue.getChildQueues()) { + gatherPossiblyRunnableAppLists((AbstractCSQueue) child, appLists); + } + } + } + } + + private int getUserMaxParallelApps(String user) { + CapacitySchedulerConfiguration conf = scheduler.getConfiguration(); + if (conf == null) { + return Integer.MAX_VALUE; + } + + int userMaxParallelApps = conf.getMaxParallelAppsForUser(user); + + return userMaxParallelApps; + } + + /** + * Takes a list of lists, each of which is ordered by start time, and returns + * their elements in order of start time. + * + * We maintain positions in each of the lists. Each next() call advances + * the position in one of the lists. We maintain a heap that orders lists + * by the start time of the app in the current position in that list. + * This allows us to pick which list to advance in O(log(num lists)) instead + * of O(num lists) time. + */ + static class MultiListStartTimeIterator implements + Iterator { + + private List[] appLists; + private int[] curPositionsInAppLists; + private PriorityQueue appListsByCurStartTime; + + @SuppressWarnings("unchecked") + MultiListStartTimeIterator(List> appListList) { + appLists = appListList.toArray(new List[appListList.size()]); + curPositionsInAppLists = new int[appLists.length]; + appListsByCurStartTime = new PriorityQueue(); + for (int i = 0; i < appLists.length; i++) { + long time = appLists[i].isEmpty() ? Long.MAX_VALUE : appLists[i].get(0) + .getStartTime(); + appListsByCurStartTime.add(new IndexAndTime(i, time)); + } + } + + @Override + public boolean hasNext() { + return !appListsByCurStartTime.isEmpty() + && appListsByCurStartTime.peek().time != Long.MAX_VALUE; + } + + @Override + public FiCaSchedulerApp next() { + IndexAndTime indexAndTime = appListsByCurStartTime.remove(); + int nextListIndex = indexAndTime.index; + FiCaSchedulerApp next = appLists[nextListIndex] + .get(curPositionsInAppLists[nextListIndex]); + curPositionsInAppLists[nextListIndex]++; + + if (curPositionsInAppLists[nextListIndex] < + appLists[nextListIndex].size()) { + indexAndTime.time = appLists[nextListIndex] + .get(curPositionsInAppLists[nextListIndex]).getStartTime(); + } else { + indexAndTime.time = Long.MAX_VALUE; + } + appListsByCurStartTime.add(indexAndTime); + + return next; + } + + @Override + public void remove() { + throw new UnsupportedOperationException("Remove not supported"); + } + + private static class IndexAndTime implements Comparable { + private int index; + private long time; + + IndexAndTime(int index, long time) { + this.index = index; + this.time = time; + } + + @Override + public int compareTo(IndexAndTime o) { + return time < o.time ? -1 : (time > o.time ? 1 : 0); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof IndexAndTime)) { + return false; + } + IndexAndTime other = (IndexAndTime)o; + return other.time == time; + } + + @Override + public int hashCode() { + return (int)time; + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index a6aa82443cc11..bd2acd7611536 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -242,8 +242,11 @@ public Configuration getConf() { private static final long DEFAULT_ASYNC_SCHEDULER_INTERVAL = 5; private long asyncMaxPendingBacklogs; + private CSMaxRunningAppsEnforcer maxRunningEnforcer; + public CapacityScheduler() { super(CapacityScheduler.class.getName()); + this.maxRunningEnforcer = new CSMaxRunningAppsEnforcer(this); } @Override @@ -483,6 +486,7 @@ public void reinitialize(Configuration newConf, RMContext rmContext, super.reinitialize(newConf, rmContext); } + maxRunningEnforcer.updateRunnabilityOnReload(); } finally { writeLock.unlock(); } @@ -1083,6 +1087,9 @@ private void addApplicationAttempt( // SchedulerApplication#setCurrentAppAttempt. attempt.setPriority(application.getPriority()); + maxRunningEnforcer.checkRunnabilityWithUpdate(attempt); + maxRunningEnforcer.trackApp(attempt); + queue.submitApplicationAttempt(attempt, application.getUser()); LOG.info("Added Application Attempt " + applicationAttemptId + " to scheduler from user " + application.getUser() + " in queue " @@ -1176,8 +1183,13 @@ private void doneApplicationAttempt( LOG.error( "Cannot finish application " + "from non-leaf queue: " + csQueue.getQueuePath()); - } else{ + } else { csQueue.finishApplicationAttempt(attempt, csQueue.getQueuePath()); + + maxRunningEnforcer.untrackApp(attempt); + if (attempt.isRunnable()) { + maxRunningEnforcer.updateRunnabilityOnAppRemoval(attempt); + } } } finally { writeLock.unlock(); @@ -3253,4 +3265,9 @@ public boolean isMultiNodePlacementEnabled() { public int getNumAsyncSchedulerThreads() { return asyncSchedulerThreads == null ? 0 : asyncSchedulerThreads.size(); } + + @VisibleForTesting + public void setMaxRunningAppsEnforcer(CSMaxRunningAppsEnforcer enforcer) { + this.maxRunningEnforcer = enforcer; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 496dd0b290d49..3bebb44a6f64c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -378,6 +378,10 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur public static final Pattern RESOURCE_PATTERN = Pattern.compile(PATTERN_FOR_ABSOLUTE_RESOURCE); + public static final String MAX_PARALLEL_APPLICATIONS = "max-parallel-apps"; + + public static final int DEFAULT_MAX_PARALLEL_APPLICATIONS = Integer.MAX_VALUE; + /** * Different resource types supported. */ @@ -412,7 +416,11 @@ static String getQueueOrderingPolicyPrefix(String queue) { String queueName = PREFIX + queue + DOT + ORDERING_POLICY + DOT; return queueName; } - + + static String getUserPrefix(String user) { + return PREFIX + "user." + user + DOT; + } + private String getNodeLabelPrefix(String queue, String label) { if (label.equals(CommonNodeLabelsManager.NO_LABEL)) { return getQueuePrefix(queue); @@ -1392,6 +1400,31 @@ public boolean shouldAppFailFast(Configuration conf) { return conf.getBoolean(APP_FAIL_FAST, DEFAULT_APP_FAIL_FAST); } + public Integer getMaxParallelAppsForQueue(String queue) { + int defaultMaxParallelAppsForQueue = + getInt(PREFIX + MAX_PARALLEL_APPLICATIONS, + DEFAULT_MAX_PARALLEL_APPLICATIONS); + + String maxParallelAppsForQueue = get(getQueuePrefix(queue) + + MAX_PARALLEL_APPLICATIONS); + + return (maxParallelAppsForQueue != null) ? + Integer.parseInt(maxParallelAppsForQueue) + : defaultMaxParallelAppsForQueue; + } + + public Integer getMaxParallelAppsForUser(String user) { + int defaultMaxParallelAppsForUser = + getInt(PREFIX + "user." + MAX_PARALLEL_APPLICATIONS, + DEFAULT_MAX_PARALLEL_APPLICATIONS); + String maxParallelAppsForUser = get(getUserPrefix(user) + + MAX_PARALLEL_APPLICATIONS); + + return (maxParallelAppsForUser != null) ? + Integer.parseInt(maxParallelAppsForUser) + : defaultMaxParallelAppsForUser; + } + private static final String PREEMPTION_CONFIG_PREFIX = "yarn.resourcemanager.monitor.capacity.preemption."; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 9f0caf291ea2f..4d83538c981f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -129,6 +129,9 @@ public class LeafQueue extends AbstractCSQueue { List priorityAcls = new ArrayList(); + private final List runnableApps = new ArrayList<>(); + private final List nonRunnableApps = new ArrayList<>(); + @SuppressWarnings({ "unchecked", "rawtypes" }) public LeafQueue(CapacitySchedulerContext cs, String queueName, CSQueue parent, CSQueue old) throws IOException { @@ -159,6 +162,7 @@ protected void setupQueueConfigs(Resource clusterResource) setupQueueConfigs(clusterResource, csContext.getConfiguration()); } + @SuppressWarnings("checkstyle:nowhitespaceafter") protected void setupQueueConfigs(Resource clusterResource, CapacitySchedulerConfiguration conf) throws IOException { @@ -289,7 +293,9 @@ protected void setupQueueConfigs(Resource clusterResource, + " (int)(configuredMaximumSystemApplications * absoluteCapacity)]" + "\n" + "maxApplicationsPerUser = " + maxApplicationsPerUser + " [= (int)(maxApplications * (userLimit / 100.0f) * " - + "userLimitFactor) ]" + "\n" + "usedCapacity = " + + "userLimitFactor) ]" + "\n" + + "maxParallelApps = " + getMaxParallelApps() + "\n" + + "usedCapacity = " + + queueCapacities.getUsedCapacity() + " [= usedResourcesMemory / " + "(clusterResourceMemory * absoluteCapacity)]" + "\n" + "absoluteUsedCapacity = " + absoluteUsedCapacity @@ -386,7 +392,8 @@ void setUserLimitFactor(float userLimitFactor) { public int getNumApplications() { readLock.lock(); try { - return getNumPendingApplications() + getNumActiveApplications(); + return getNumPendingApplications() + getNumActiveApplications() + + getNumNonRunnableApps(); } finally { readLock.unlock(); } @@ -887,16 +894,28 @@ protected void activateApplications() { writeLock.unlock(); } } - + private void addApplicationAttempt(FiCaSchedulerApp application, User user) { writeLock.lock(); try { + applicationAttemptMap.put(application.getApplicationAttemptId(), + application); + + if (application.isRunnable()) { + runnableApps.add(application); + LOG.debug("Adding runnable application: {}", + application.getApplicationAttemptId()); + } else { + nonRunnableApps.add(application); + LOG.info("Application attempt {} is not runnable," + + " parallel limit reached", application.getApplicationAttemptId()); + return; + } + // Accept user.submitApplication(); getPendingAppsOrderingPolicy().addSchedulableEntity(application); - applicationAttemptMap.put(application.getApplicationAttemptId(), - application); // Activate applications if (Resources.greaterThan(resourceCalculator, lastClusterResource, @@ -917,7 +936,9 @@ private void addApplicationAttempt(FiCaSchedulerApp application, .getPendingApplications() + " #user-active-applications: " + user .getActiveApplications() + " #queue-pending-applications: " + getNumPendingApplications() + " #queue-active-applications: " - + getNumActiveApplications()); + + getNumActiveApplications() + + " #queue-nonrunnable-applications: " + + getNumNonRunnableApps()); } finally { writeLock.unlock(); } @@ -950,6 +971,15 @@ private void removeApplicationAttempt( // which is caused by wrong invoking order, will fix UT separately User user = usersManager.getUserAndAddIfAbsent(userName); + boolean runnable = runnableApps.remove(application); + if (!runnable) { + // removeNonRunnableApp acquires the write lock again, which is fine + if (!removeNonRunnableApp(application)) { + LOG.error("Given app to remove " + application + + " does not exist in queue " + getQueuePath()); + } + } + String partitionName = application.getAppAMNodePartitionName(); boolean wasActive = orderingPolicy.removeSchedulableEntity(application); if (!wasActive) { @@ -2229,4 +2259,43 @@ private void updateQueuePreemptionMetrics(RMContainer rmc) { usedSeconds); metrics.updatePreemptedForCustomResources(containerResource); } + + @Override + int getNumRunnableApps() { + readLock.lock(); + try { + return runnableApps.size(); + } finally { + readLock.unlock(); + } + } + + int getNumNonRunnableApps() { + readLock.lock(); + try { + return nonRunnableApps.size(); + } finally { + readLock.unlock(); + } + } + + boolean removeNonRunnableApp(FiCaSchedulerApp app) { + writeLock.lock(); + try { + return nonRunnableApps.remove(app); + } finally { + writeLock.unlock(); + } + } + + List getCopyOfNonRunnableAppSchedulables() { + List appsToReturn = new ArrayList<>(); + readLock.lock(); + try { + appsToReturn.addAll(nonRunnableApps); + } finally { + readLock.unlock(); + } + return appsToReturn; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 95f5468ebafaf..bbb80ba73361c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -93,6 +93,8 @@ public class ParentQueue extends AbstractCSQueue { private long lastSkipQueueDebugLoggingTimestamp = -1; + private int runnableApps; + public ParentQueue(CapacitySchedulerContext cs, String queueName, CSQueue parent, CSQueue old) throws IOException { super(cs, queueName, parent, old); @@ -1383,4 +1385,32 @@ public void stopQueue() { public QueueOrderingPolicy getQueueOrderingPolicy() { return queueOrderingPolicy; } + + @Override + int getNumRunnableApps() { + readLock.lock(); + try { + return runnableApps; + } finally { + readLock.unlock(); + } + } + + void incrementRunnableApps() { + writeLock.lock(); + try { + runnableApps++; + } finally { + writeLock.unlock(); + } + } + + void decrementRunnableApps() { + writeLock.lock(); + try { + runnableApps--; + } finally { + writeLock.unlock(); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java index 8f6fb6388724d..cf6ffd9823525 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java @@ -112,6 +112,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt { private AbstractContainerAllocator containerAllocator; + private boolean runnable; + /** * to hold the message if its app doesn't not get container from a node */ @@ -139,6 +141,7 @@ public FiCaSchedulerApp(ApplicationAttemptId applicationAttemptId, RMContext rmContext, Priority appPriority, boolean isAttemptRecovering, ActivitiesManager activitiesManager) { super(applicationAttemptId, user, queue, abstractUsersManager, rmContext); + this.runnable = true; RMApp rmApp = rmContext.getRMApps().get(getApplicationId()); @@ -1219,4 +1222,22 @@ public boolean moveReservation(RMContainer reservedContainer, writeLock.unlock(); } } + + public void setRunnable(boolean runnable) { + writeLock.lock(); + try { + this.runnable = runnable; + } finally { + writeLock.unlock(); + } + } + + public boolean isRunnable() { + readLock.lock(); + try { + return runnable; + } finally { + readLock.unlock(); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationSystem.java index ff5738c03aab6..389dd62e4afbd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationSystem.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationSystem.java @@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSMaxRunningAppsEnforcer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; @@ -175,6 +176,9 @@ private CapacityScheduler initializeCapacityScheduler() { CapacityScheduler cs = Mockito.spy(new CapacityScheduler()); cs.setConf(conf); + CSMaxRunningAppsEnforcer enforcer = + Mockito.mock(CSMaxRunningAppsEnforcer.class); + cs.setMaxRunningAppsEnforcer(enforcer); mockRMContext = ReservationSystemTestUtil.createRMContext(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java index bad943c0b585d..93d8d5a7ceac5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java @@ -184,6 +184,7 @@ private FiCaSchedulerApp getMockApplication(int appId, String user, doReturn(amResource).when(application).getAMResource( CommonNodeLabelsManager.NO_LABEL); when(application.compareInputOrderTo(any(FiCaSchedulerApp.class))).thenCallRealMethod(); + when(application.isRunnable()).thenReturn(true); return application; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSMaxRunningAppsEnforcer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSMaxRunningAppsEnforcer.java new file mode 100644 index 0000000000000..e3c05a1b7cca5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSMaxRunningAppsEnforcer.java @@ -0,0 +1,278 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX; +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.security.AppPriorityACLsManager; +import org.apache.hadoop.yarn.util.ControlledClock; +import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; +import org.junit.Before; +import org.junit.Test; + +public class TestCSMaxRunningAppsEnforcer { + private CapacitySchedulerQueueManager queueManager; + private CSMaxRunningAppsEnforcer maxAppsEnforcer; + private int appNum; + private ControlledClock clock; + private RMContext rmContext; + private CapacityScheduler scheduler; + private ActivitiesManager activitiesManager; + private CapacitySchedulerConfiguration csConfig; + + @Before + public void setup() throws IOException { + csConfig = new CapacitySchedulerConfiguration(); + rmContext = mock(RMContext.class); + when(rmContext.getYarnConfiguration()).thenReturn(csConfig); + when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap<>()); + clock = new ControlledClock(); + scheduler = mock(CapacityScheduler.class); + when(rmContext.getScheduler()).thenReturn(scheduler); + when(scheduler.getConf()).thenReturn(csConfig); + when(scheduler.getConfig()).thenReturn(csConfig); + when(scheduler.getConfiguration()).thenReturn(csConfig); + when(scheduler.getResourceCalculator()).thenReturn( + new DefaultResourceCalculator()); + when(scheduler.getRMContext()).thenReturn(rmContext); + when(scheduler.getClusterResource()) + .thenReturn(Resource.newInstance(16384, 8)); + when(scheduler.getMinimumAllocation()) + .thenReturn(Resource.newInstance(1024, 1)); + when(scheduler.getMinimumResourceCapability()) + .thenReturn(Resource.newInstance(1024, 1)); + activitiesManager = mock(ActivitiesManager.class); + maxAppsEnforcer = new CSMaxRunningAppsEnforcer(scheduler); + appNum = 0; + setupQueues(csConfig); + RMNodeLabelsManager labelManager = mock(RMNodeLabelsManager.class); + AppPriorityACLsManager appPriorityACLManager = + mock(AppPriorityACLsManager.class); + when(rmContext.getNodeLabelManager()).thenReturn(labelManager); + when(labelManager.getResourceByLabel(anyString(), any(Resource.class))) + .thenReturn(Resource.newInstance(16384, 8)); + queueManager = new CapacitySchedulerQueueManager(csConfig, labelManager, + appPriorityACLManager); + queueManager.setCapacitySchedulerContext(scheduler); + queueManager.initializeQueues(csConfig); + } + + private void setupQueues(CapacitySchedulerConfiguration config) { + config.setQueues(CapacitySchedulerConfiguration.ROOT, + new String[] {"queue1", "queue2"}); + config.setQueues("root.queue1", new String[] {"subqueue1", "subqueue2"}); + config.setQueues("root.queue1.subqueue1", new String[] {"leaf1"}); + config.setQueues("root.queue1.subqueue2", new String[] {"leaf2"}); + config.setFloat(PREFIX + "root.capacity", 100.0f); + config.setFloat(PREFIX + "root.queue1.capacity", 50.0f); + config.setFloat(PREFIX + "root.queue2.capacity", 50.0f); + config.setFloat(PREFIX + "root.queue1.subqueue1.capacity", 50.0f); + config.setFloat(PREFIX + "root.queue1.subqueue2.capacity", 50.0f); + config.setFloat(PREFIX + "root.queue1.subqueue1.leaf1.capacity", 100.0f); + config.setFloat(PREFIX + "root.queue1.subqueue2.leaf2.capacity", 100.0f); + } + + private FiCaSchedulerApp addApp(LeafQueue queue, String user) { + ApplicationId appId = ApplicationId.newInstance(0, appNum++); + ApplicationAttemptId attId = ApplicationAttemptId.newInstance(appId, 0); + + FiCaSchedulerApp attempt = new FiCaSchedulerApp(attId, + user, queue, queue.getAbstractUsersManager(), + rmContext, Priority.newInstance(0), false, + activitiesManager) { + + private final long startTime = clock.getTime(); + + @Override + public long getStartTime() { + return startTime; + } + }; + + maxAppsEnforcer.checkRunnabilityWithUpdate(attempt); + maxAppsEnforcer.trackApp(attempt); + + queue.submitApplicationAttempt(attempt, attempt.getUser()); + + return attempt; + } + + private void removeApp(FiCaSchedulerApp attempt) { + LeafQueue queue = attempt.getCSLeafQueue(); + queue.finishApplicationAttempt(attempt, queue.getQueuePath()); + maxAppsEnforcer.untrackApp(attempt); + maxAppsEnforcer.updateRunnabilityOnAppRemoval(attempt); + } + + @Test + public void testRemoveDoesNotEnableAnyApp() { + ParentQueue root = + (ParentQueue) queueManager.getRootQueue(); + LeafQueue leaf1 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue1.leaf1"); + LeafQueue leaf2 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue2.leaf2"); + root.setMaxParallelApps(2); + leaf1.setMaxParallelApps(1); + leaf2.setMaxParallelApps(1); + + FiCaSchedulerApp app1 = addApp(leaf1, "user"); + addApp(leaf2, "user"); + addApp(leaf2, "user"); + assertEquals(1, leaf1.getNumRunnableApps()); + assertEquals(1, leaf2.getNumRunnableApps()); + assertEquals(1, leaf2.getNumNonRunnableApps()); + + removeApp(app1); + assertEquals(0, leaf1.getNumRunnableApps()); + assertEquals(1, leaf2.getNumRunnableApps()); + assertEquals(1, leaf2.getNumNonRunnableApps()); + } + + @Test + public void testRemoveEnablesAppOnCousinQueue() { + LeafQueue leaf1 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue1.leaf1"); + LeafQueue leaf2 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue2.leaf2"); + ParentQueue queue1 = (ParentQueue) queueManager + .getQueueByFullName("root.queue1"); + queue1.setMaxParallelApps(2); + + FiCaSchedulerApp app1 = addApp(leaf1, "user"); + addApp(leaf2, "user"); + addApp(leaf2, "user"); + assertEquals(1, leaf1.getNumRunnableApps()); + assertEquals(1, leaf2.getNumRunnableApps()); + assertEquals(1, leaf2.getNumNonRunnableApps()); + + removeApp(app1); + assertEquals(0, leaf1.getNumRunnableApps()); + assertEquals(2, leaf2.getNumRunnableApps()); + assertEquals(0, leaf2.getNumNonRunnableApps()); + } + + @Test + public void testRemoveEnablesOneByQueueOneByUser() { + LeafQueue leaf1 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue1.leaf1"); + LeafQueue leaf2 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue2.leaf2"); + leaf1.setMaxParallelApps(2); + //userMaxApps.put("user1", 1); + csConfig.setInt(PREFIX + "user.user1.max-parallel-apps", 1); + + FiCaSchedulerApp app1 = addApp(leaf1, "user1"); + addApp(leaf1, "user2"); + addApp(leaf1, "user3"); + addApp(leaf2, "user1"); + assertEquals(2, leaf1.getNumRunnableApps()); + assertEquals(1, leaf1.getNumNonRunnableApps()); + assertEquals(1, leaf2.getNumNonRunnableApps()); + + removeApp(app1); + assertEquals(2, leaf1.getNumRunnableApps()); + assertEquals(1, leaf2.getNumRunnableApps()); + assertEquals(0, leaf1.getNumNonRunnableApps()); + assertEquals(0, leaf2.getNumNonRunnableApps()); + } + + @Test + public void testRemoveEnablingOrderedByStartTime() { + LeafQueue leaf1 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue1.leaf1"); + LeafQueue leaf2 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue2.leaf2"); + ParentQueue queue1 = (ParentQueue) queueManager + .getQueueByFullName("root.queue1"); + queue1.setMaxParallelApps(2); + FiCaSchedulerApp app1 = addApp(leaf1, "user"); + addApp(leaf2, "user"); + addApp(leaf2, "user"); + clock.tickSec(20); + addApp(leaf1, "user"); + assertEquals(1, leaf1.getNumRunnableApps()); + assertEquals(1, leaf2.getNumRunnableApps()); + assertEquals(1, leaf1.getNumNonRunnableApps()); + assertEquals(1, leaf2.getNumNonRunnableApps()); + removeApp(app1); + assertEquals(0, leaf1.getNumRunnableApps()); + assertEquals(2, leaf2.getNumRunnableApps()); + assertEquals(0, leaf2.getNumNonRunnableApps()); + } + + @Test + public void testMultipleAppsWaitingOnCousinQueue() { + LeafQueue leaf1 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue1.leaf1"); + LeafQueue leaf2 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue2.leaf2"); + ParentQueue queue1 = (ParentQueue) queueManager + .getQueueByFullName("root.queue1"); + queue1.setMaxParallelApps(2); + FiCaSchedulerApp app1 = addApp(leaf1, "user"); + addApp(leaf2, "user"); + addApp(leaf2, "user"); + addApp(leaf2, "user"); + assertEquals(1, leaf1.getNumRunnableApps()); + assertEquals(1, leaf2.getNumRunnableApps()); + assertEquals(2, leaf2.getNumNonRunnableApps()); + removeApp(app1); + assertEquals(0, leaf1.getNumRunnableApps()); + assertEquals(2, leaf2.getNumRunnableApps()); + assertEquals(1, leaf2.getNumNonRunnableApps()); + } + + @Test + public void testMultiListStartTimeIteratorEmptyAppLists() { + List> lists = + new ArrayList>(); + lists.add(Arrays.asList(mockAppAttempt(1))); + lists.add(Arrays.asList(mockAppAttempt(2))); + Iterator iter = + new CSMaxRunningAppsEnforcer.MultiListStartTimeIterator(lists); + assertEquals(1, iter.next().getStartTime()); + assertEquals(2, iter.next().getStartTime()); + } + + private FiCaSchedulerApp mockAppAttempt(long startTime) { + FiCaSchedulerApp schedApp = mock(FiCaSchedulerApp.class); + when(schedApp.getStartTime()).thenReturn(startTime); + return schedApp; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMaxParallelApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMaxParallelApps.java new file mode 100644 index 0000000000000..d2e3278b0dc6c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMaxParallelApps.java @@ -0,0 +1,312 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.MockAM; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmissionData; +import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmitter; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; +import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.Sets; + +public class TestCapacitySchedulerMaxParallelApps { + private CapacitySchedulerConfiguration conf; + private MockRM rm; + private MockNM nm1; + + private RMApp app1; + private MockAM am1; + private RMApp app2; + private MockAM am2; + private RMApp app3; + private RMAppAttempt attempt3; + private RMApp app4; + private RMAppAttempt attempt4; + + private ParentQueue rootQueue; + private LeafQueue defaultQueue; + + @Before + public void setUp() { + CapacitySchedulerConfiguration config = + new CapacitySchedulerConfiguration(); + config.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, + DominantResourceCalculator.class.getName()); + + conf = new CapacitySchedulerConfiguration(config); + } + + @After + public void after() { + if (rm != null) { + rm.stop(); + } + } + + @Test(timeout = 30000) + public void testMaxParallelAppsExceedsQueueSetting() throws Exception { + conf.setInt("yarn.scheduler.capacity.root.default.max-parallel-apps", 2); + executeCommonStepsAndChecks(); + testWhenSettingsExceeded(); + } + + @Test(timeout = 30000) + public void testMaxParallelAppsExceedsDefaultQueueSetting() + throws Exception { + conf.setInt("yarn.scheduler.capacity.max-parallel-apps", 2); + executeCommonStepsAndChecks(); + testWhenSettingsExceeded(); + } + + @Test(timeout = 30000) + public void testMaxParallelAppsExceedsUserSetting() throws Exception { + conf.setInt("yarn.scheduler.capacity.user.testuser.max-parallel-apps", 2); + executeCommonStepsAndChecks(); + testWhenSettingsExceeded(); + } + + @Test(timeout = 30000) + public void testMaxParallelAppsExceedsDefaultUserSetting() throws Exception { + conf.setInt("yarn.scheduler.capacity.user.max-parallel-apps", 2); + executeCommonStepsAndChecks(); + testWhenSettingsExceeded(); + } + + @Test(timeout = 30000) + public void testMaxParallelAppsWhenReloadingConfig() throws Exception { + conf.setInt("yarn.scheduler.capacity.root.default.max-parallel-apps", 2); + + executeCommonStepsAndChecks(); + + RMContext rmContext = rm.getRMContext(); + // Disable parallel apps setting + max out AM percent + conf.unset("yarn.scheduler.capacity.root.default.max-parallel-apps"); + conf.setFloat(PREFIX + "maximum-am-resource-percent", 1.0f); + CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); + cs.reinitialize(conf, rmContext); + + // Both app #3 and app #4 should transition to RUNNABLE + launchAMandWaitForRunning(app3, attempt3, nm1); + launchAMandWaitForRunning(app4, attempt4, nm1); + verifyRunningAndAcceptedApps(4, 0); + } + + @Test(timeout = 30000) + public void testMaxAppsReachedWithNonRunnableApps() throws Exception { + conf.setInt("yarn.scheduler.capacity.root.default.max-parallel-apps", 2); + conf.setInt("yarn.scheduler.capacity.root.default.maximum-applications", 4); + executeCommonStepsAndChecks(); + + RMApp app5 = MockRMAppSubmitter.submit(rm, + MockRMAppSubmissionData.Builder.createWithMemory(512, rm) + .withAppName("app5") + .withUser("testuser") + .withQueue("default") + .withWaitForAppAcceptedState(false) + .build()); + + rm.waitForState(app5.getApplicationId(), RMAppState.FAILED); + } + + private void executeCommonStepsAndChecks() throws Exception { + rm = new MockRM(conf); + rm.start(); + + nm1 = rm.registerNode("h1:1234", 4096, 8); + rm.registerNode("h2:1234", 4096, 8); + rm.registerNode("h3:1234", 4096, 8); + + rm.drainEvents(); + + app1 = MockRMAppSubmitter.submit(rm, + MockRMAppSubmissionData.Builder.createWithMemory(512, rm) + .withAppName("app1") + .withUser("testuser") + .withQueue("default") + .build()); + + am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); + + app2 = MockRMAppSubmitter.submit(rm, + MockRMAppSubmissionData.Builder.createWithMemory(512, rm) + .withAppName("app2") + .withUser("testuser") + .withQueue("default") + .build()); + am2 = MockRM.launchAndRegisterAM(app2, rm, nm1); + + app3 = MockRMAppSubmitter.submit(rm, + MockRMAppSubmissionData.Builder.createWithMemory(512, rm) + .withAppName("app3") + .withUser("testuser") + .withQueue("default") + .build()); + attempt3 = MockRM.waitForAttemptScheduled(app3, rm); + + app4 = MockRMAppSubmitter.submit(rm, + MockRMAppSubmissionData.Builder.createWithMemory(512, rm) + .withAppName("app4") + .withUser("testuser") + .withQueue("default") + .build()); + attempt4 = MockRM.waitForAttemptScheduled(app4, rm); + + // Check that app attempt #3 and #4 are non-runnable + rootQueue = getRootQueue(); + defaultQueue = getDefaultQueue(); + Set nonRunnables = + Sets.newHashSet( + attempt3.getAppAttemptId(), + attempt4.getAppAttemptId()); + verifyRunnableAppsInParent(rootQueue, 2); + verifyRunnableAppsInLeaf(defaultQueue, 2, nonRunnables); + verifyRunningAndAcceptedApps(2, 2); + } + + private void testWhenSettingsExceeded() throws Exception { + // Stop app #1 + unregisterAMandWaitForFinish(app1, am1, nm1); + + // Launch app #3 + launchAMandWaitForRunning(app3, attempt3, nm1); + + // Check that attempt #4 is still non-runnable + verifyRunnableAppsInParent(rootQueue, 2); + verifyRunnableAppsInLeaf(defaultQueue, 2, + Collections.singleton(attempt4.getAppAttemptId())); + verifyRunningAndAcceptedApps(2, 1); + + // Stop app #2 + unregisterAMandWaitForFinish(app2, am2, nm1); + + // Launch app #4 + launchAMandWaitForRunning(app4, attempt4, nm1); + verifyRunnableAppsInParent(rootQueue, 2); + verifyRunnableAppsInLeaf(defaultQueue, 2, + Collections.emptySet()); + verifyRunningAndAcceptedApps(2, 0); + } + + @SuppressWarnings("checkstyle:hiddenfield") + private LeafQueue getDefaultQueue() { + CSQueue defaultQueue = + ((CapacityScheduler) rm.getResourceScheduler()).getQueue("default"); + + return (LeafQueue) defaultQueue; + } + + private ParentQueue getRootQueue() { + CSQueue root = + ((CapacityScheduler) rm.getResourceScheduler()).getQueue("root"); + + return (ParentQueue) root; + } + + private void verifyRunnableAppsInParent(ParentQueue queue, + int expectedRunnable) { + assertEquals("Num of runnable apps", expectedRunnable, + queue.getNumRunnableApps()); + } + + private void verifyRunnableAppsInLeaf(LeafQueue queue, int expectedRunnable, + Set nonRunnableIds) { + assertEquals("Num of runnable apps", expectedRunnable, + queue.getNumRunnableApps()); + + queue.getCopyOfNonRunnableAppSchedulables() + .stream() + .map(fca -> fca.getApplicationAttemptId()) + .forEach(id -> assertTrue(id + " not found as non-runnable", + nonRunnableIds.contains(id))); + } + + private void verifyRunningAndAcceptedApps(int expectedRunning, + int expectedAccepted) throws YarnException { + GetApplicationsRequest request = GetApplicationsRequest.newInstance(); + + GetApplicationsResponse resp = + rm.getClientRMService().getApplications(request); + + List apps = resp.getApplicationList(); + + long runningCount = apps + .stream() + .filter(report -> + report.getYarnApplicationState() == YarnApplicationState.RUNNING) + .count(); + + long acceptedCount = apps + .stream() + .filter(report -> + report.getYarnApplicationState() == YarnApplicationState.ACCEPTED) + .count(); + + assertEquals("Running apps count", expectedRunning, runningCount); + assertEquals("Accepted apps count", expectedAccepted, acceptedCount); + } + + private void unregisterAMandWaitForFinish(RMApp app, MockAM am, MockNM nm) + throws Exception { + am.unregisterAppAttempt(); + nm.nodeHeartbeat(app.getCurrentAppAttempt().getAppAttemptId(), 1, + ContainerState.COMPLETE); + rm.waitForState(app.getCurrentAppAttempt().getAppAttemptId(), + RMAppAttemptState.FINISHED); + } + + @SuppressWarnings("rawtypes") + private MockAM launchAMandWaitForRunning(RMApp app, RMAppAttempt attempt, + MockNM nm) throws Exception { + nm.nodeHeartbeat(true); + ((AbstractYarnScheduler)rm.getResourceScheduler()).update(); + rm.drainEvents(); + nm.nodeHeartbeat(true); + MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId()); + am.registerAppAttempt(); + rm.waitForState(app.getApplicationId(), RMAppState.RUNNING); + + return am; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 3353eacbd704f..f664e038f164d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -456,6 +456,8 @@ public void testPolicyConfiguration() throws Exception { @Test public void testAppAttemptMetrics() throws Exception { + CSMaxRunningAppsEnforcer enforcer = mock(CSMaxRunningAppsEnforcer.class); + cs.setMaxRunningAppsEnforcer(enforcer); // Manipulate queue 'a' LeafQueue a = stubLeafQueue((LeafQueue) queues.get(B)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java index 13957e9b417b7..aa3b5919fcc60 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java @@ -218,6 +218,7 @@ private FiCaSchedulerApp getMockApplication(ApplicationId appId, String user, CommonNodeLabelsManager.NO_LABEL); when(application.compareInputOrderTo(any(FiCaSchedulerApp.class))) .thenCallRealMethod(); + when(application.isRunnable()).thenReturn(true); return application; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueStateManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueStateManager.java index a4c1300df37f6..e893717a8dd0f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueStateManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueStateManager.java @@ -157,6 +157,7 @@ private FiCaSchedulerApp getMockApplication(ApplicationId appId, String user, CommonNodeLabelsManager.NO_LABEL); when(application.compareInputOrderTo(any(FiCaSchedulerApp.class))) .thenCallRealMethod(); + when(application.isRunnable()).thenReturn(true); return application; } } From 3472c3efc0014237d0cc4d9a989393b8513d2ab6 Mon Sep 17 00:00:00 2001 From: Mehakmeet Singh Date: Fri, 19 Jun 2020 18:33:49 +0530 Subject: [PATCH 040/131] HADOOP-17065. Add Network Counters to ABFS (#2056) Contributed by Mehakmeet Singh. --- ...rumentation.java => AbfsCountersImpl.java} | 13 +- .../hadoop/fs/azurebfs/AbfsStatistic.java | 20 +- .../fs/azurebfs/AzureBlobFileSystem.java | 15 +- .../fs/azurebfs/AzureBlobFileSystemStore.java | 15 +- .../fs/azurebfs/services/AbfsClient.java | 25 +- .../AbfsClientThrottlingAnalyzer.java | 7 +- .../AbfsClientThrottlingIntercept.java | 14 +- .../azurebfs/services/AbfsRestOperation.java | 24 +- .../azurebfs/AbstractAbfsIntegrationTest.java | 3 +- .../azurebfs/ITestAbfsNetworkStatistics.java | 253 ++++++++++++++++++ .../fs/azurebfs/ITestAbfsStatistics.java | 2 +- .../azurebfs/TestAbfsNetworkStatistics.java | 67 +++++ .../fs/azurebfs/TestAbfsStatistics.java | 2 +- .../fs/azurebfs/services/TestAbfsClient.java | 4 +- 14 files changed, 430 insertions(+), 34 deletions(-) rename hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/{AbfsInstrumentation.java => AbfsCountersImpl.java} (96%) create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsInstrumentation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java similarity index 96% rename from hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsInstrumentation.java rename to hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java index 9094c4065de0c..57cc3eada4847 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsInstrumentation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java @@ -41,7 +41,7 @@ /** * Instrumentation of Abfs counters. */ -public class AbfsInstrumentation implements AbfsCounters { +public class AbfsCountersImpl implements AbfsCounters { /** * Single context for all the Abfs counters to separate them from other @@ -78,10 +78,17 @@ public class AbfsInstrumentation implements AbfsCounters { DIRECTORIES_DELETED, FILES_CREATED, FILES_DELETED, - ERROR_IGNORED + ERROR_IGNORED, + CONNECTIONS_MADE, + SEND_REQUESTS, + GET_RESPONSES, + BYTES_SENT, + BYTES_RECEIVED, + READ_THROTTLES, + WRITE_THROTTLES }; - public AbfsInstrumentation(URI uri) { + public AbfsCountersImpl(URI uri) { UUID fileSystemInstanceId = UUID.randomUUID(); registry.tag(REGISTRY_ID, "A unique identifier for the instance", diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsStatistic.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsStatistic.java index a9867aa12b85e..2935cd754315d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsStatistic.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsStatistic.java @@ -22,7 +22,7 @@ /** * Statistic which are collected in Abfs. - * Available as metrics in {@link AbfsInstrumentation}. + * Available as metrics in {@link AbfsCountersImpl}. */ public enum AbfsStatistic { @@ -57,7 +57,23 @@ public enum AbfsStatistic { FILES_DELETED("files_deleted", "Total number of files deleted from the object store."), ERROR_IGNORED("error_ignored", - "Errors caught and ignored."); + "Errors caught and ignored."), + + //Network statistics. + CONNECTIONS_MADE("connections_made", + "Total number of times a connection was made with the data store."), + SEND_REQUESTS("send_requests", + "Total number of times http requests were sent to the data store."), + GET_RESPONSES("get_responses", + "Total number of times a response was received."), + BYTES_SENT("bytes_sent", + "Total bytes uploaded."), + BYTES_RECEIVED("bytes_received", + "Total bytes received."), + READ_THROTTLES("read_throttles", + "Total number of times a read operation is throttled."), + WRITE_THROTTLES("write_throttles", + "Total number of times a write operation is throttled."); private String statName; private String statDescription; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java index 6694c134b4187..daa1905366543 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java @@ -97,7 +97,7 @@ public class AzureBlobFileSystem extends FileSystem { private boolean delegationTokenEnabled = false; private AbfsDelegationTokenManager delegationTokenManager; - private AbfsCounters instrumentation; + private AbfsCounters abfsCounters; @Override public void initialize(URI uri, Configuration configuration) @@ -109,11 +109,12 @@ public void initialize(URI uri, Configuration configuration) LOG.debug("Initializing AzureBlobFileSystem for {}", uri); this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); - this.abfsStore = new AzureBlobFileSystemStore(uri, this.isSecureScheme(), configuration); + abfsCounters = new AbfsCountersImpl(uri); + this.abfsStore = new AzureBlobFileSystemStore(uri, this.isSecureScheme(), + configuration, abfsCounters); LOG.trace("AzureBlobFileSystemStore init complete"); final AbfsConfiguration abfsConfiguration = abfsStore.getAbfsConfiguration(); - instrumentation = new AbfsInstrumentation(uri); this.setWorkingDirectory(this.getHomeDirectory()); if (abfsConfiguration.getCreateRemoteFileSystemDuringInitialization()) { @@ -150,8 +151,8 @@ public String toString() { sb.append("uri=").append(uri); sb.append(", user='").append(abfsStore.getUser()).append('\''); sb.append(", primaryUserGroup='").append(abfsStore.getPrimaryGroup()).append('\''); - if (instrumentation != null) { - sb.append(", Statistics: {").append(instrumentation.formString("{", "=", + if (abfsCounters != null) { + sb.append(", Statistics: {").append(abfsCounters.formString("{", "=", "}", true)); sb.append("}"); } @@ -392,7 +393,7 @@ private void statIncrement(AbfsStatistic statistic) { * @param statistic the Statistic to be incremented. */ private void incrementStatistic(AbfsStatistic statistic) { - instrumentation.incrementCounter(statistic, 1); + abfsCounters.incrementCounter(statistic, 1); } /** @@ -1241,7 +1242,7 @@ boolean getIsNamespaceEnabled() throws AzureBlobFileSystemException { @VisibleForTesting Map getInstrumentationMap() { - return instrumentation.toMap(); + return abfsCounters.toMap(); } @Override diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index a01d31a182757..397afc8efbb18 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -82,6 +82,7 @@ import org.apache.hadoop.fs.azurebfs.oauth2.IdentityTransformerInterface; import org.apache.hadoop.fs.azurebfs.services.AbfsAclHelper; import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; import org.apache.hadoop.fs.azurebfs.services.AbfsInputStream; import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamContext; @@ -143,8 +144,9 @@ public class AzureBlobFileSystemStore implements Closeable { private final IdentityTransformerInterface identityTransformer; private final AbfsPerfTracker abfsPerfTracker; - public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, Configuration configuration) - throws IOException { + public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, + Configuration configuration, + AbfsCounters abfsCounters) throws IOException { this.uri = uri; String[] authorityParts = authorityParts(uri); final String fileSystemName = authorityParts[0]; @@ -182,7 +184,7 @@ public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, Configuration c boolean usingOauth = (authType == AuthType.OAuth); boolean useHttps = (usingOauth || abfsConfiguration.isHttpsAlwaysUsed()) ? true : isSecureScheme; this.abfsPerfTracker = new AbfsPerfTracker(fileSystemName, accountName, this.abfsConfiguration); - initializeClient(uri, fileSystemName, accountName, useHttps); + initializeClient(uri, fileSystemName, accountName, useHttps, abfsCounters); final Class identityTransformerClass = configuration.getClass(FS_AZURE_IDENTITY_TRANSFORM_CLASS, IdentityTransformer.class, IdentityTransformerInterface.class); @@ -1170,7 +1172,8 @@ public boolean isAtomicRenameKey(String key) { return isKeyForDirectorySet(key, azureAtomicRenameDirSet); } - private void initializeClient(URI uri, String fileSystemName, String accountName, boolean isSecure) + private void initializeClient(URI uri, String fileSystemName, + String accountName, boolean isSecure, AbfsCounters abfsCounters) throws IOException { if (this.client != null) { return; @@ -1214,11 +1217,11 @@ private void initializeClient(URI uri, String fileSystemName, String accountName if (tokenProvider != null) { this.client = new AbfsClient(baseUrl, creds, abfsConfiguration, new ExponentialRetryPolicy(abfsConfiguration.getMaxIoRetries()), - tokenProvider, abfsPerfTracker); + tokenProvider, abfsPerfTracker, abfsCounters); } else { this.client = new AbfsClient(baseUrl, creds, abfsConfiguration, new ExponentialRetryPolicy(abfsConfiguration.getMaxIoRetries()), - sasTokenProvider, abfsPerfTracker); + sasTokenProvider, abfsPerfTracker, abfsCounters); } LOG.trace("AbfsClient init complete"); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java index f104e7b9c4d39..f614bbd41d2ac 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java @@ -73,11 +73,13 @@ public class AbfsClient implements Closeable { private final AuthType authType; private AccessTokenProvider tokenProvider; private SASTokenProvider sasTokenProvider; + private final AbfsCounters abfsCounters; private AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials, final AbfsConfiguration abfsConfiguration, final ExponentialRetryPolicy exponentialRetryPolicy, - final AbfsPerfTracker abfsPerfTracker) { + final AbfsPerfTracker abfsPerfTracker, + final AbfsCounters abfsCounters) { this.baseUrl = baseUrl; this.sharedKeyCredentials = sharedKeyCredentials; String baseUrlString = baseUrl.toString(); @@ -104,14 +106,17 @@ private AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCreden this.userAgent = initializeUserAgent(abfsConfiguration, sslProviderName); this.abfsPerfTracker = abfsPerfTracker; + this.abfsCounters = abfsCounters; } public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials, final AbfsConfiguration abfsConfiguration, final ExponentialRetryPolicy exponentialRetryPolicy, final AccessTokenProvider tokenProvider, - final AbfsPerfTracker abfsPerfTracker) { - this(baseUrl, sharedKeyCredentials, abfsConfiguration, exponentialRetryPolicy, abfsPerfTracker); + final AbfsPerfTracker abfsPerfTracker, + final AbfsCounters abfsCounters) { + this(baseUrl, sharedKeyCredentials, abfsConfiguration, + exponentialRetryPolicy, abfsPerfTracker, abfsCounters); this.tokenProvider = tokenProvider; } @@ -119,8 +124,10 @@ public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredent final AbfsConfiguration abfsConfiguration, final ExponentialRetryPolicy exponentialRetryPolicy, final SASTokenProvider sasTokenProvider, - final AbfsPerfTracker abfsPerfTracker) { - this(baseUrl, sharedKeyCredentials, abfsConfiguration, exponentialRetryPolicy, abfsPerfTracker); + final AbfsPerfTracker abfsPerfTracker, + final AbfsCounters abfsCounters) { + this(baseUrl, sharedKeyCredentials, abfsConfiguration, + exponentialRetryPolicy, abfsPerfTracker, abfsCounters); this.sasTokenProvider = sasTokenProvider; } @@ -892,4 +899,12 @@ URL getBaseUrl() { public SASTokenProvider getSasTokenProvider() { return this.sasTokenProvider; } + + /** + * Getter for abfsCounters from AbfsClient. + * @return AbfsCounters instance. + */ + protected AbfsCounters getAbfsCounters() { + return abfsCounters; + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingAnalyzer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingAnalyzer.java index f1e5aaae6835c..e1a799b7a2648 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingAnalyzer.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingAnalyzer.java @@ -114,16 +114,19 @@ public void addBytesTransferred(long count, boolean isFailedOperation) { /** * Suspends the current storage operation, as necessary, to reduce throughput. + * @return true if Thread sleeps(Throttling occurs) else false. */ - public void suspendIfNecessary() { + public boolean suspendIfNecessary() { int duration = sleepDuration; if (duration > 0) { try { Thread.sleep(duration); + return true; } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } } + return false; } @VisibleForTesting @@ -269,4 +272,4 @@ static class AbfsOperationMetrics { this.operationsSuccessful = new AtomicLong(); } } -} \ No newline at end of file +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingIntercept.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingIntercept.java index 1c6ce17a38c3c..7303e833418db 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingIntercept.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingIntercept.java @@ -23,6 +23,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.azurebfs.AbfsStatistic; import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; /** @@ -103,17 +104,24 @@ static void updateMetrics(AbfsRestOperationType operationType, * uses this to suspend the request, if necessary, to minimize errors and * maximize throughput. */ - static void sendingRequest(AbfsRestOperationType operationType) { + static void sendingRequest(AbfsRestOperationType operationType, + AbfsCounters abfsCounters) { if (!isAutoThrottlingEnabled) { return; } switch (operationType) { case ReadFile: - singleton.readThrottler.suspendIfNecessary(); + if (singleton.readThrottler.suspendIfNecessary() + && abfsCounters != null) { + abfsCounters.incrementCounter(AbfsStatistic.READ_THROTTLES, 1); + } break; case Append: - singleton.writeThrottler.suspendIfNecessary(); + if (singleton.writeThrottler.suspendIfNecessary() + && abfsCounters != null) { + abfsCounters.incrementCounter(AbfsStatistic.WRITE_THROTTLES, 1); + } break; default: break; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java index 521da96e9603e..f3986d4b1f35d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java @@ -27,6 +27,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.azurebfs.AbfsStatistic; import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; @@ -66,6 +67,7 @@ public class AbfsRestOperation { private int retryCount = 0; private AbfsHttpOperation result; + private AbfsCounters abfsCounters; public AbfsHttpOperation getResult() { return result; @@ -131,6 +133,7 @@ String getSasToken() { this.hasRequestBody = (AbfsHttpConstants.HTTP_METHOD_PUT.equals(method) || AbfsHttpConstants.HTTP_METHOD_PATCH.equals(method)); this.sasToken = sasToken; + this.abfsCounters = client.getAbfsCounters(); } /** @@ -160,6 +163,7 @@ String getSasToken() { this.buffer = buffer; this.bufferOffset = bufferOffset; this.bufferLength = bufferLength; + this.abfsCounters = client.getAbfsCounters(); } /** @@ -205,6 +209,7 @@ private boolean executeHttpOperation(final int retryCount) throws AzureBlobFileS try { // initialize the HTTP request and open the connection httpOperation = new AbfsHttpOperation(url, method, requestHeaders); + incrementCounter(AbfsStatistic.CONNECTIONS_MADE, 1); switch(client.getAuthType()) { case Custom: @@ -229,14 +234,19 @@ private boolean executeHttpOperation(final int retryCount) throws AzureBlobFileS // dump the headers AbfsIoUtils.dumpHeadersToDebugLog("Request Headers", httpOperation.getConnection().getRequestProperties()); - AbfsClientThrottlingIntercept.sendingRequest(operationType); + AbfsClientThrottlingIntercept.sendingRequest(operationType, abfsCounters); if (hasRequestBody) { // HttpUrlConnection requires httpOperation.sendRequest(buffer, bufferOffset, bufferLength); + incrementCounter(AbfsStatistic.SEND_REQUESTS, 1); + incrementCounter(AbfsStatistic.BYTES_SENT, bufferLength); } httpOperation.processResponse(buffer, bufferOffset, bufferLength); + incrementCounter(AbfsStatistic.GET_RESPONSES, 1); + incrementCounter(AbfsStatistic.BYTES_RECEIVED, + httpOperation.getBytesReceived()); } catch (IOException ex) { if (ex instanceof UnknownHostException) { LOG.warn(String.format("Unknown host name: %s. Retrying to resolve the host name...", httpOperation.getUrl().getHost())); @@ -276,4 +286,16 @@ private boolean executeHttpOperation(final int retryCount) throws AzureBlobFileS return true; } + + /** + * Incrementing Abfs counters with a long value. + * + * @param statistic the Abfs statistic that needs to be incremented. + * @param value the value to be incremented by. + */ + private void incrementCounter(AbfsStatistic statistic, long value) { + if (abfsCounters != null) { + abfsCounters.incrementCounter(statistic, value); + } + } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java index f41cbd63186af..a80bee65bf4f3 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java @@ -436,9 +436,10 @@ protected AbfsOutputStream createAbfsOutputStreamWithFlushEnabled( * @param metricMap map of (String, Long) with statistics name as key and * statistics value as map value. */ - protected void assertAbfsStatistics(AbfsStatistic statistic, + protected long assertAbfsStatistics(AbfsStatistic statistic, long expectedValue, Map metricMap) { assertEquals("Mismatch in " + statistic.getStatName(), expectedValue, (long) metricMap.get(statistic.getStatName())); + return expectedValue; } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java new file mode 100644 index 0000000000000..904fdf3f7c16e --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java @@ -0,0 +1,253 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.IOException; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.junit.Test; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; + +public class ITestAbfsNetworkStatistics extends AbstractAbfsIntegrationTest { + + private static final Logger LOG = + LoggerFactory.getLogger(ITestAbfsNetworkStatistics.class); + private static final int LARGE_OPERATIONS = 10; + + public ITestAbfsNetworkStatistics() throws Exception { + } + + /** + * Testing connections_made, send_request and bytes_send statistics in + * {@link AbfsRestOperation}. + */ + @Test + public void testAbfsHttpSendStatistics() throws IOException { + describe("Test to check correct values of statistics after Abfs http send " + + "request is done."); + + AzureBlobFileSystem fs = getFileSystem(); + Map metricMap; + Path sendRequestPath = path(getMethodName()); + String testNetworkStatsString = "http_send"; + long connectionsMade, requestsSent, bytesSent; + + /* + * Creating AbfsOutputStream will result in 1 connection made and 1 send + * request. + */ + try (AbfsOutputStream out = createAbfsOutputStreamWithFlushEnabled(fs, + sendRequestPath)) { + out.write(testNetworkStatsString.getBytes()); + + /* + * Flushes all outstanding data (i.e. the current unfinished packet) + * from the client into the service on all DataNode replicas. + */ + out.hflush(); + + metricMap = fs.getInstrumentationMap(); + + /* + * Testing the network stats with 1 write operation. + * + * connections_made : 3(getFileSystem()) + 1(AbfsOutputStream) + 2(flush). + * + * send_requests : 1(getFileSystem()) + 1(AbfsOutputStream) + 2(flush). + * + * bytes_sent : bytes wrote in AbfsOutputStream. + */ + connectionsMade = assertAbfsStatistics(AbfsStatistic.CONNECTIONS_MADE, + 6, metricMap); + requestsSent = assertAbfsStatistics(AbfsStatistic.SEND_REQUESTS, 4, + metricMap); + bytesSent = assertAbfsStatistics(AbfsStatistic.BYTES_SENT, + testNetworkStatsString.getBytes().length, metricMap); + + } + + // To close the AbfsOutputStream 1 connection is made and 1 request is sent. + connectionsMade++; + requestsSent++; + + try (AbfsOutputStream out = createAbfsOutputStreamWithFlushEnabled(fs, + sendRequestPath)) { + + for (int i = 0; i < LARGE_OPERATIONS; i++) { + out.write(testNetworkStatsString.getBytes()); + + /* + * 1 flush call would create 2 connections and 2 send requests. + * when hflush() is called it will essentially trigger append() and + * flush() inside AbfsRestOperation. Both of which calls + * executeHttpOperation() method which creates a connection and sends + * requests. + */ + out.hflush(); + } + + metricMap = fs.getInstrumentationMap(); + + /* + * Testing the network stats with Large amount of bytes sent. + * + * connections made : connections_made(Last assertion) + 1 + * (AbfsOutputStream) + LARGE_OPERATIONS * 2(flush). + * + * send requests : requests_sent(Last assertion) + 1(AbfsOutputStream) + + * LARGE_OPERATIONS * 2(flush). + * + * bytes sent : bytes_sent(Last assertion) + LARGE_OPERATIONS * (bytes + * wrote each time). + * + */ + assertAbfsStatistics(AbfsStatistic.CONNECTIONS_MADE, + connectionsMade + 1 + LARGE_OPERATIONS * 2, metricMap); + assertAbfsStatistics(AbfsStatistic.SEND_REQUESTS, + requestsSent + 1 + LARGE_OPERATIONS * 2, metricMap); + assertAbfsStatistics(AbfsStatistic.BYTES_SENT, + bytesSent + LARGE_OPERATIONS * (testNetworkStatsString.getBytes().length), + metricMap); + + } + + } + + /** + * Testing get_response and bytes_received in {@link AbfsRestOperation}. + */ + @Test + public void testAbfsHttpResponseStatistics() throws IOException { + describe("Test to check correct values of statistics after Http " + + "Response is processed."); + + AzureBlobFileSystem fs = getFileSystem(); + Path getResponsePath = path(getMethodName()); + Map metricMap; + String testResponseString = "some response"; + long getResponses, bytesReceived; + + FSDataOutputStream out = null; + FSDataInputStream in = null; + try { + + /* + * Creating a File and writing some bytes in it. + * + * get_response : 3(getFileSystem) + 1(OutputStream creation) + 2 + * (Writing data in Data store). + * + */ + out = fs.create(getResponsePath); + out.write(testResponseString.getBytes()); + out.hflush(); + + // open would require 1 get response. + in = fs.open(getResponsePath); + // read would require 1 get response and also get the bytes received. + int result = in.read(); + + // Confirming read isn't -1. + LOG.info("Result of read operation : {}", result); + + metricMap = fs.getInstrumentationMap(); + + /* + * Testing values of statistics after writing and reading a buffer. + * + * get_responses - 6(above operations) + 1(open()) + 1 (read()). + * + * bytes_received - This should be equal to bytes sent earlier. + */ + getResponses = assertAbfsStatistics(AbfsStatistic.GET_RESPONSES, 8, + metricMap); + // Testing that bytes received is equal to bytes sent. + long bytesSend = metricMap.get(AbfsStatistic.BYTES_SENT.getStatName()); + bytesReceived = assertAbfsStatistics(AbfsStatistic.BYTES_RECEIVED, + bytesSend, + metricMap); + + } finally { + IOUtils.cleanupWithLogger(LOG, out, in); + } + + // To close the streams 1 response is received. + getResponses++; + + try { + + /* + * Creating a file and writing buffer into it. Also recording the + * buffer for future read() call. + * This creating outputStream and writing requires 2 * + * (LARGE_OPERATIONS) get requests. + */ + StringBuilder largeBuffer = new StringBuilder(); + out = fs.create(getResponsePath); + for (int i = 0; i < LARGE_OPERATIONS; i++) { + out.write(testResponseString.getBytes()); + out.hflush(); + largeBuffer.append(testResponseString); + } + + // Open requires 1 get_response. + in = fs.open(getResponsePath); + + /* + * Reading the file which was written above. This read() call would + * read bytes equal to the bytes that was written above. + * Get response would be 1 only. + */ + in.read(0, largeBuffer.toString().getBytes(), 0, + largeBuffer.toString().getBytes().length); + + metricMap = fs.getInstrumentationMap(); + + /* + * Testing the statistics values after writing and reading a large buffer. + * + * get_response : get_responses(Last assertion) + 1 + * (OutputStream) + 2 * LARGE_OPERATIONS(Writing and flushing + * LARGE_OPERATIONS times) + 1(open()) + 1(read()). + * + * bytes_received : bytes_received(Last assertion) + LARGE_OPERATIONS * + * bytes wrote each time (bytes_received is equal to bytes wrote in the + * File). + * + */ + assertAbfsStatistics(AbfsStatistic.BYTES_RECEIVED, + bytesReceived + LARGE_OPERATIONS * (testResponseString.getBytes().length), + metricMap); + assertAbfsStatistics(AbfsStatistic.GET_RESPONSES, + getResponses + 3 + 2 * LARGE_OPERATIONS, metricMap); + + } finally { + IOUtils.cleanupWithLogger(LOG, out, in); + } + } + +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java index c88dc847a3f9a..42205807c1b3e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java @@ -45,7 +45,7 @@ public void testInitialStatsValues() throws IOException { describe("Testing the initial values of Abfs counters"); AbfsCounters abfsCounters = - new AbfsInstrumentation(getFileSystem().getUri()); + new AbfsCountersImpl(getFileSystem().getUri()); Map metricMap = abfsCounters.toMap(); for (Map.Entry entry : metricMap.entrySet()) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java new file mode 100644 index 0000000000000..0639cf2f82b9a --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.IOException; +import java.util.Map; + +import org.junit.Test; + +import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; + +public class TestAbfsNetworkStatistics extends AbstractAbfsIntegrationTest { + + private static final int LARGE_OPERATIONS = 1000; + + public TestAbfsNetworkStatistics() throws Exception { + } + + /** + * Test to check correct values of read and write throttling statistics in + * {@code AbfsClientThrottlingAnalyzer}. + */ + @Test + public void testAbfsThrottlingStatistics() throws IOException { + describe("Test to check correct values of read throttle and write " + + "throttle statistics in Abfs"); + + AbfsCounters statistics = + new AbfsCountersImpl(getFileSystem().getUri()); + + /* + * Calling the throttle methods to check correct summation and values of + * the counters. + */ + for (int i = 0; i < LARGE_OPERATIONS; i++) { + statistics.incrementCounter(AbfsStatistic.READ_THROTTLES, 1); + statistics.incrementCounter(AbfsStatistic.WRITE_THROTTLES, 1); + } + + Map metricMap = statistics.toMap(); + + /* + * Test to check read and write throttle statistics gave correct values for + * 1000 calls. + */ + assertAbfsStatistics(AbfsStatistic.READ_THROTTLES, LARGE_OPERATIONS, + metricMap); + assertAbfsStatistics(AbfsStatistic.WRITE_THROTTLES, LARGE_OPERATIONS, + metricMap); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsStatistics.java index 20d96fadef6e7..f831d2d4cd26b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsStatistics.java @@ -43,7 +43,7 @@ public void testInitializeStats() throws IOException { describe("Testing the counter values after Abfs is initialised"); AbfsCounters instrumentation = - new AbfsInstrumentation(getFileSystem().getUri()); + new AbfsCountersImpl(getFileSystem().getUri()); //Testing summation of the counter values. for (int i = 0; i < LARGE_OPS; i++) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java index 0fd65fb0a60c8..8197e7e20209e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java @@ -100,7 +100,7 @@ public TestAbfsClient(){ private String getUserAgentString(AbfsConfiguration config, boolean includeSSLProvider) throws MalformedURLException { AbfsClient client = new AbfsClient(new URL("https://azure.com"), null, - config, null, (AccessTokenProvider) null, null); + config, null, (AccessTokenProvider) null, null, null); String sslProviderName = null; if (includeSSLProvider) { sslProviderName = DelegatingSSLSocketFactory.getDefaultFactory() @@ -267,7 +267,7 @@ public static AbfsClient createTestClientFromCurrentContext( (currentAuthType == AuthType.OAuth ? abfsConfig.getTokenProvider() : null), - tracker); + tracker, null); return testClient; } From 100ec8e8709e79a6729aab0dac15e080dd747ee5 Mon Sep 17 00:00:00 2001 From: belugabehr <12578579+belugabehr@users.noreply.github.com> Date: Fri, 19 Jun 2020 13:23:43 -0400 Subject: [PATCH 041/131] HADOOP-17009: Embrace Immutability of Java Collections --- .../org/apache/hadoop/fs/AbstractFileSystem.java | 3 +-- .../src/main/java/org/apache/hadoop/fs/Stat.java | 7 ++----- .../java/org/apache/hadoop/fs/shell/Delete.java | 3 ++- .../main/java/org/apache/hadoop/ha/HAAdmin.java | 11 ++++++----- .../hadoop/http/lib/StaticUserWebFilter.java | 10 +++------- .../org/apache/hadoop/metrics2/util/MBeans.java | 5 ++--- .../hadoop/net/CachedDNSToSwitchMapping.java | 3 +-- .../org/apache/hadoop/net/NetworkTopology.java | 6 ++---- .../java/org/apache/hadoop/net/TableMapping.java | 3 ++- .../hadoop/security/CompositeGroupsMapping.java | 11 +++++------ .../JniBasedUnixGroupsNetgroupMapping.java | 3 ++- .../apache/hadoop/security/LdapGroupsMapping.java | 6 +++--- .../org/apache/hadoop/security/NetgroupCache.java | 4 ++-- .../hadoop/security/ShellBasedIdMapping.java | 3 ++- .../web/DelegationTokenAuthenticationHandler.java | 14 +++++--------- .../org/apache/hadoop/service/AbstractService.java | 6 +++--- .../apache/hadoop/service/CompositeService.java | 3 ++- .../hadoop/service/launcher/ServiceLauncher.java | 3 ++- .../org/apache/hadoop/util/HttpExceptionUtils.java | 8 ++++---- .../main/java/org/apache/hadoop/util/Shell.java | 8 ++++---- 20 files changed, 55 insertions(+), 65 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index 1df68b647c99a..32926d55de874 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -23,7 +23,6 @@ import java.lang.reflect.InvocationTargetException; import java.net.URI; import java.net.URISyntaxException; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; @@ -1032,7 +1031,7 @@ public String getCanonicalServiceName() { */ @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" }) public List> getDelegationTokens(String renewer) throws IOException { - return new ArrayList>(0); + return Collections.emptyList(); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java index 5e80a140175e6..f6c2f2af1c9ba 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java @@ -20,8 +20,7 @@ import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; +import java.util.Collections; import java.util.NoSuchElementException; import java.util.StringTokenizer; @@ -65,9 +64,7 @@ public Stat(Path path, long blockSize, boolean deref, FileSystem fs) this.blockSize = blockSize; this.dereference = deref; // LANG = C setting - Map env = new HashMap(); - env.put("LANG", "C"); - setEnvironment(env); + setEnvironment(Collections.singletonMap("LANG", "C")); } public FileStatus getFileStatus() throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java index 3c9368ca2ed9b..184b674adcc27 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java @@ -20,6 +20,7 @@ import java.io.FileNotFoundException; import java.io.IOException; +import java.util.Collections; import java.util.LinkedList; import java.util.List; @@ -97,7 +98,7 @@ protected List expandArgument(String arg) throws IOException { throw e; } // prevent -f on a non-existent glob from failing - return new LinkedList(); + return Collections.emptyList(); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java index 0950ea7e01c57..34e37650ade1c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java @@ -19,9 +19,9 @@ import java.io.IOException; import java.io.PrintStream; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Map; import org.apache.commons.cli.Options; @@ -107,8 +107,7 @@ protected HAAdmin(Configuration conf) { protected abstract HAServiceTarget resolveTarget(String string); protected Collection getTargetIds(String targetNodeToActivate) { - return new ArrayList( - Arrays.asList(new String[]{targetNodeToActivate})); + return Collections.singleton(targetNodeToActivate); } protected String getUsageString() { @@ -188,8 +187,10 @@ private int transitionToActive(final CommandLine cmd) private boolean isOtherTargetNodeActive(String targetNodeToActivate, boolean forceActive) throws IOException { Collection targetIds = getTargetIds(targetNodeToActivate); - targetIds.remove(targetNodeToActivate); - for(String targetId : targetIds) { + for (String targetId : targetIds) { + if (targetNodeToActivate.equals(targetId)) { + continue; + } HAServiceTarget target = resolveTarget(targetId); if (!checkManualStateManagementOK(target)) { return true; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java index fc64697bb8c75..915427f8e1845 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.security.Principal; -import java.util.HashMap; +import java.util.Collections; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -121,14 +121,10 @@ public void init(FilterConfig conf) throws ServletException { @Override public void initFilter(FilterContainer container, Configuration conf) { - HashMap options = new HashMap(); - String username = getUsernameFromConf(conf); - options.put(HADOOP_HTTP_STATIC_USER, username); - container.addFilter("static_user_filter", - StaticUserFilter.class.getName(), - options); + container.addFilter("static_user_filter", StaticUserFilter.class.getName(), + Collections.singletonMap(HADOOP_HTTP_STATIC_USER, username)); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java index 1b50498bbaf5a..4aef03a5e645f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.util; import java.lang.management.ManagementFactory; -import java.util.HashMap; +import java.util.Collections; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -70,8 +70,7 @@ private MBeans() { */ static public ObjectName register(String serviceName, String nameName, Object theMbean) { - return register(serviceName, nameName, new HashMap(), - theMbean); + return register(serviceName, nameName, Collections.emptyMap(), theMbean); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java index af487ed5c61a5..803fcec8d6c77 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java @@ -130,8 +130,7 @@ public List resolve(List names) { */ @Override public Map getSwitchMap() { - Map switchMap = new HashMap(cache); - return switchMap; + return new HashMap<>(cache); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java index 9f52fed9678b9..893012befcf44 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java @@ -196,10 +196,8 @@ public List getDatanodesInRack(String loc) { loc = loc.substring(1); } InnerNode rack = (InnerNode) clusterMap.getLoc(loc); - if (rack == null) { - return null; - } - return new ArrayList(rack.getChildren()); + return (rack == null) ? new ArrayList<>(0) + : new ArrayList<>(rack.getChildren()); } finally { netlock.readLock().unlock(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java index cd3514c4bce16..2beda8401f8d1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java @@ -25,6 +25,7 @@ import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -130,7 +131,7 @@ public synchronized List resolve(List names) { if (map == null) { LOG.warn("Failed to read topology table. " + NetworkTopology.DEFAULT_RACK + " will be used for all nodes."); - map = new HashMap(); + map = Collections.emptyMap(); } } List results = new ArrayList(names.size()); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java index b762df2acc022..5040de1e65056 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -69,8 +70,8 @@ public class CompositeGroupsMapping public synchronized List getGroups(String user) throws IOException { Set groupSet = new TreeSet(); - List groups = null; for (GroupMappingServiceProvider provider : providersList) { + List groups = Collections.emptyList(); try { groups = provider.getGroups(user); } catch (Exception e) { @@ -78,17 +79,15 @@ public synchronized List getGroups(String user) throws IOException { user, provider.getClass().getSimpleName(), e.toString()); LOG.debug("Stacktrace: ", e); } - if (groups != null && ! groups.isEmpty()) { + if (!groups.isEmpty()) { groupSet.addAll(groups); if (!combined) break; } } - List results = new ArrayList(groupSet.size()); - results.addAll(groupSet); - return results; + return new ArrayList<>(groupSet); } - + /** * Caches groups, no need to do that for this provider */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java index 9ba55e436f3f8..65bd1c00333a9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.LinkedList; @@ -125,6 +126,6 @@ protected synchronized List getUsersForNetgroup(String netgroup) { if (users != null && users.length != 0) { return Arrays.asList(users); } - return new LinkedList(); + return Collections.emptyList(); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java index 8e71f69c858d1..7c53948cc1f98 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java @@ -518,11 +518,11 @@ List doGetGroups(String user, int goUpHierarchy) if (!results.hasMoreElements()) { LOG.debug("doGetGroups({}) returned no groups because the " + "user is not found.", user); - return new ArrayList<>(); + return Collections.emptyList(); } SearchResult result = results.nextElement(); - List groups = null; + List groups = Collections.emptyList(); if (useOneQuery) { try { /** @@ -548,7 +548,7 @@ List doGetGroups(String user, int goUpHierarchy) "the second LDAP query using the user's DN.", e); } } - if (groups == null || groups.isEmpty() || goUpHierarchy > 0) { + if (groups.isEmpty() || goUpHierarchy > 0) { groups = lookupGroup(result, c, goUpHierarchy); } LOG.debug("doGetGroups({}) returned {}", user, groups); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java index 4495a66c4322f..aa06c59a64814 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.security; +import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; -import java.util.LinkedList; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -61,7 +61,7 @@ public static void getNetgroups(final String user, * @return list of cached groups */ public static List getNetgroupNames() { - return new LinkedList(getGroups()); + return new ArrayList<>(getGroups()); } private static Set getGroups() { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java index 92ea83d8f1da5..93231075282da 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java @@ -24,6 +24,7 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.regex.Matcher; @@ -534,7 +535,7 @@ synchronized private void updateMapIncr(final int id, static final class PassThroughMap extends HashMap { public PassThroughMap() { - this(new HashMap()); + this(Collections.emptyMap()); } public PassThroughMap(Map mapping) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java index 284044fd938a8..1de534f36ba4f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java @@ -20,9 +20,8 @@ import java.io.IOException; import java.io.Writer; import java.text.MessageFormat; -import java.util.HashMap; +import java.util.Collections; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.Map; import java.util.Properties; import java.util.Set; @@ -301,8 +300,7 @@ public boolean managementOperation(AuthenticationToken token, dt.decodeFromUrlString(tokenToRenew); long expirationTime = tokenManager.renewToken(dt, requestUgi.getShortUserName()); - map = new HashMap(); - map.put("long", expirationTime); + map = Collections.singletonMap("long", expirationTime); } catch (IOException ex) { throw new AuthenticationException(ex.toString(), ex); } @@ -358,13 +356,11 @@ public boolean managementOperation(AuthenticationToken token, @SuppressWarnings("unchecked") private static Map delegationTokenToJSON(Token token) throws IOException { - Map json = new LinkedHashMap(); - json.put( + Map json = Collections.singletonMap( KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON, token.encodeToUrlString()); - Map response = new LinkedHashMap(); - response.put(KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_JSON, - json); + Map response = Collections.singletonMap( + KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_JSON, json); return response; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java index c9fec435bfa24..0aa5bafe88423 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -426,7 +427,7 @@ private void recordLifecycleEvent() { @Override public synchronized List getLifecycleHistory() { - return new ArrayList(lifecycleHistory); + return Collections.unmodifiableList(new ArrayList<>(lifecycleHistory)); } /** @@ -483,8 +484,7 @@ public void removeBlocker(String name) { @Override public Map getBlockers() { synchronized (blockerMap) { - Map map = new HashMap(blockerMap); - return map; + return Collections.unmodifiableMap(new HashMap<>(blockerMap)); } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java index 4aa2f23fad730..ee66e90f7c4ee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java @@ -19,6 +19,7 @@ package org.apache.hadoop.service; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience.Public; @@ -60,7 +61,7 @@ public CompositeService(String name) { */ public List getServices() { synchronized (serviceList) { - return new ArrayList(serviceList); + return Collections.unmodifiableList(new ArrayList<>(serviceList)); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java index 5e8a1f4eb21fb..6d161bf8b613c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java @@ -23,6 +23,7 @@ import java.net.URL; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import com.google.common.annotations.VisibleForTesting; @@ -894,7 +895,7 @@ public List extractCommandOptions(Configuration conf, List args) { int size = args.size(); if (size <= 1) { - return new ArrayList<>(0); + return Collections.emptyList(); } List coreArgs = args.subList(1, size); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java index 12d1ef01201a2..3cc7a4bb4ea5b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java @@ -28,6 +28,7 @@ import java.io.Writer; import java.lang.reflect.Constructor; import java.net.HttpURLConnection; +import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; @@ -71,8 +72,8 @@ public static void createServletExceptionResponse( json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex)); json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName()); json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName()); - Map jsonResponse = new LinkedHashMap(); - jsonResponse.put(ERROR_JSON, json); + Map jsonResponse = + Collections.singletonMap(ERROR_JSON, json); Writer writer = response.getWriter(); JsonSerialization.writer().writeValue(writer, jsonResponse); writer.flush(); @@ -91,8 +92,7 @@ public static Response createJerseyExceptionResponse(Response.Status status, json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex)); json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName()); json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName()); - Map response = new LinkedHashMap(); - response.put(ERROR_JSON, json); + Map response = Collections.singletonMap(ERROR_JSON, json); return Response.status(status).type(MediaType.APPLICATION_JSON). entity(response).build(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index e66c81b4b8df6..0dc49739c4b5a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -28,6 +28,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.Timer; import java.util.TimerTask; @@ -871,6 +872,7 @@ protected Shell(long interval, boolean redirectErrorStream) { this.interval = interval; this.lastTime = (interval < 0) ? 0 : -interval; this.redirectErrorStream = redirectErrorStream; + this.environment = Collections.emptyMap(); } /** @@ -878,7 +880,7 @@ protected Shell(long interval, boolean redirectErrorStream) { * @param env Mapping of environment variables */ protected void setEnvironment(Map env) { - this.environment = env; + this.environment = Objects.requireNonNull(env); } /** @@ -915,9 +917,7 @@ private void runCommand() throws IOException { builder.environment().clear(); } - if (environment != null) { - builder.environment().putAll(this.environment); - } + builder.environment().putAll(this.environment); if (dir != null) { builder.directory(this.dir); From 8fd0fdf8890b4c0cf3ea977be8fae8fa17e6599b Mon Sep 17 00:00:00 2001 From: Masatake Iwasaki Date: Sat, 20 Jun 2020 07:37:55 +0900 Subject: [PATCH 042/131] MAPREDUCE-7281. Fix NoClassDefFoundError on 'mapred minicluster'. (#2077) --- hadoop-mapreduce-project/bin/mapred | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred index 9773ec89ded6c..3e52556a08f0b 100755 --- a/hadoop-mapreduce-project/bin/mapred +++ b/hadoop-mapreduce-project/bin/mapred @@ -105,6 +105,8 @@ function mapredcmd_case minicluster) hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}/timelineservice"'/*' hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}/test"'/*' + junitjar=$(echo "${HADOOP_TOOLS_LIB_JARS_DIR}"/junit-[0-9]*.jar) + hadoop_add_classpath "${junitjar}" HADOOP_CLASSNAME=org.apache.hadoop.mapreduce.MiniHadoopClusterManager ;; *) From b27810aa6015253866ccc0ccc7247ad7024c0730 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Sat, 20 Jun 2020 00:32:02 -0700 Subject: [PATCH 043/131] HDFS-15418. ViewFileSystemOverloadScheme should represent mount links as non symlinks. Contributed by Uma Maheswara Rao G. --- .../apache/hadoop/fs/viewfs/Constants.java | 8 ++ .../hadoop/fs/viewfs/ViewFileSystem.java | 71 +++++++--- .../viewfs/ViewFileSystemOverloadScheme.java | 20 ++- .../org/apache/hadoop/fs/viewfs/ViewFs.java | 80 +++++++---- .../TestViewFsOverloadSchemeListStatus.java | 132 ++++++++++++++++++ .../fs/viewfs/TestViewfsFileStatus.java | 4 +- .../src/site/markdown/ViewFsOverloadScheme.md | 42 +++--- ...mOverloadSchemeHdfsFileSystemContract.java | 5 + ...ileSystemOverloadSchemeWithHdfsScheme.java | 9 ++ 9 files changed, 295 insertions(+), 76 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java index 0a5d4b46ce2d8..f454f63084cda 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java @@ -90,4 +90,12 @@ public interface Constants { String CONFIG_VIEWFS_ENABLE_INNER_CACHE = "fs.viewfs.enable.inner.cache"; boolean CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT = true; + + /** + * Enable ViewFileSystem to show mountlinks as symlinks. + */ + String CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS = + "fs.viewfs.mount.links.as.symlinks"; + + boolean CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT = true; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 895edc01397dc..1ee06e02aab15 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -20,6 +20,8 @@ import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE; import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT; import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; import java.io.FileNotFoundException; @@ -527,10 +529,18 @@ public void access(Path path, FsAction mode) throws AccessControlException, * the target path FileStatus object. The target path will be available via * getSymlink on that children's FileStatus object. Since it represents as * symlink, isDirectory on that children's FileStatus will return false. + * This behavior can be changed by setting an advanced configuration + * fs.viewfs.mount.links.as.symlinks to false. In this case, mount points will + * be represented as non-symlinks and all the file/directory attributes like + * permissions, isDirectory etc will be assigned from it's resolved target + * directory/file. * * If you want to get the FileStatus of target path for that children, you may * want to use GetFileStatus API with that children's symlink path. Please see * {@link ViewFileSystem#getFileStatus(Path f)} + * + * Note: In ViewFileSystem, by default the mount links are represented as + * symlinks. */ @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, @@ -1114,6 +1124,7 @@ static class InternalDirOfViewFs extends FileSystem { final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable final URI myUri; + private final boolean showMountLinksAsSymlinks; public InternalDirOfViewFs(final InodeTree.INodeDir dir, final long cTime, final UserGroupInformation ugi, URI uri, @@ -1127,6 +1138,9 @@ public InternalDirOfViewFs(final InodeTree.INodeDir dir, theInternalDir = dir; creationTime = cTime; this.ugi = ugi; + showMountLinksAsSymlinks = config + .getBoolean(CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, + CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT); } static private void checkPathIsSlash(final Path f) throws IOException { @@ -1216,37 +1230,50 @@ public FileStatus[] listStatus(Path f) throws AccessControlException, for (Entry> iEntry : theInternalDir.getChildren().entrySet()) { INode inode = iEntry.getValue(); + Path path = new Path(inode.fullPath).makeQualified(myUri, null); if (inode.isLink()) { INodeLink link = (INodeLink) inode; + + if (showMountLinksAsSymlinks) { + // To maintain backward compatibility, with default option(showing + // mount links as symlinks), we will represent target link as + // symlink and rest other properties are belongs to mount link only. + result[i++] = + new FileStatus(0, false, 0, 0, creationTime, creationTime, + PERMISSION_555, ugi.getShortUserName(), + ugi.getPrimaryGroupName(), link.getTargetLink(), + path); + continue; + } + + // We will represent as non-symlinks. Here it will show target + // directory/file properties like permissions, isDirectory etc on + // mount path. The path will be a mount link path and isDirectory is + // true if target is dir, otherwise false. + String linkedPath = link.getTargetFileSystem().getUri().getPath(); + if ("".equals(linkedPath)) { + linkedPath = "/"; + } try { - String linkedPath = link.getTargetFileSystem().getUri().getPath(); - if("".equals(linkedPath)) { - linkedPath = "/"; - } FileStatus status = ((ChRootedFileSystem)link.getTargetFileSystem()) .getMyFs().getFileStatus(new Path(linkedPath)); - result[i++] = new FileStatus(status.getLen(), false, - status.getReplication(), status.getBlockSize(), - status.getModificationTime(), status.getAccessTime(), - status.getPermission(), status.getOwner(), status.getGroup(), - link.getTargetLink(), - new Path(inode.fullPath).makeQualified( - myUri, null)); + result[i++] = new FileStatus(status.getLen(), status.isDirectory(), + status.getReplication(), status.getBlockSize(), + status.getModificationTime(), status.getAccessTime(), + status.getPermission(), status.getOwner(), status.getGroup(), + null, path); } catch (FileNotFoundException ex) { - result[i++] = new FileStatus(0, false, 0, 0, - creationTime, creationTime, PERMISSION_555, - ugi.getShortUserName(), ugi.getPrimaryGroupName(), - link.getTargetLink(), - new Path(inode.fullPath).makeQualified( - myUri, null)); + LOG.warn("Cannot get one of the children's(" + path + + ") target path(" + link.getTargetFileSystem().getUri() + + ") file status.", ex); + throw ex; } } else { - result[i++] = new FileStatus(0, true, 0, 0, - creationTime, creationTime, PERMISSION_555, - ugi.getShortUserName(), ugi.getPrimaryGroupName(), - new Path(inode.fullPath).makeQualified( - myUri, null)); + result[i++] = + new FileStatus(0, true, 0, 0, creationTime, creationTime, + PERMISSION_555, ugi.getShortUserName(), + ugi.getPrimaryGroupName(), path); } } if (fallbackStatuses.length > 0) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java index 36f9cd104cb6b..672022be82409 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java @@ -59,9 +59,9 @@ * data to mount with other hdfs and object store clusters(hdfs://NN1, * o3fs://bucket1.volume1/, s3a://bucket1/) * - * fs.viewfs.mounttable.Cluster./user = hdfs://NN1/user - * fs.viewfs.mounttable.Cluster./data = o3fs://bucket1.volume1/data - * fs.viewfs.mounttable.Cluster./backup = s3a://bucket1/backup/ + * fs.viewfs.mounttable.Cluster.link./user = hdfs://NN1/user + * fs.viewfs.mounttable.Cluster.link./data = o3fs://bucket1.volume1/data + * fs.viewfs.mounttable.Cluster.link./backup = s3a://bucket1/backup/ * * Op1: Create file hdfs://Cluster/user/fileA will go to hdfs://NN1/user/fileA * Op2: Create file hdfs://Cluster/data/datafile will go to @@ -75,15 +75,19 @@ * data to mount with other hdfs and object store clusters * (hdfs://NN1, o3fs://bucket1.volume1/) * - * fs.viewfs.mounttable.bucketA./user = hdfs://NN1/user - * fs.viewfs.mounttable.bucketA./data = o3fs://bucket1.volume1/data - * fs.viewfs.mounttable.bucketA./salesDB = s3a://bucketA/salesDB/ + * fs.viewfs.mounttable.bucketA.link./user = hdfs://NN1/user + * fs.viewfs.mounttable.bucketA.link./data = o3fs://bucket1.volume1/data + * fs.viewfs.mounttable.bucketA.link./salesDB = s3a://bucketA/salesDB/ * * Op1: Create file s3a://bucketA/user/fileA will go to hdfs://NN1/user/fileA * Op2: Create file s3a://bucketA/data/datafile will go to * o3fs://bucket1.volume1/data/datafile * Op3: Create file s3a://bucketA/salesDB/dbfile will go to * s3a://bucketA/salesDB/dbfile + * + * Note: In ViewFileSystemOverloadScheme, by default the mount links will be + * represented as non-symlinks. If you want to change this behavior, please see + * {@link ViewFileSystem#listStatus(Path)} *****************************************************************************/ @InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase", "Hive" }) @InterfaceStability.Evolving @@ -107,6 +111,10 @@ public void initialize(URI theUri, Configuration conf) throws IOException { } String mountTableConfigPath = conf.get(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH); + /* The default value to false in ViewFSOverloadScheme */ + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, + conf.getBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, + false)); if (null != mountTableConfigPath) { MountTableConfigLoader loader = new HCFSMountTableConfigLoader(); loader.load(mountTableConfigPath, conf); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index 4578a4c353e40..fae5d1b5f62ab 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.fs.viewfs; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT; import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; import java.io.FileNotFoundException; @@ -67,7 +69,8 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Time; - +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * ViewFs (extends the AbstractFileSystem interface) implements a client-side @@ -154,6 +157,7 @@ @InterfaceAudience.Public @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ public class ViewFs extends AbstractFileSystem { + static final Logger LOG = LoggerFactory.getLogger(ViewFs.class); final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable final Configuration config; @@ -161,6 +165,7 @@ public class ViewFs extends AbstractFileSystem { Path homeDir = null; private ViewFileSystem.RenameStrategy renameStrategy = ViewFileSystem.RenameStrategy.SAME_MOUNTPOINT; + private static boolean showMountLinksAsSymlinks = true; static AccessControlException readOnlyMountTable(final String operation, final String p) { @@ -209,6 +214,9 @@ public ViewFs(final Configuration conf) throws IOException, creationTime = Time.now(); ugi = UserGroupInformation.getCurrentUser(); config = conf; + showMountLinksAsSymlinks = config + .getBoolean(CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, + CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT); // Now build client side view (i.e. client side mount table) from config. String authority = theUri.getAuthority(); fsState = new InodeTree(conf, authority) { @@ -453,12 +461,17 @@ public LocatedFileStatus getViewFsFileStatus(LocatedFileStatus stat, * the target path FileStatus object. The target path will be available via * getSymlink on that children's FileStatus object. Since it represents as * symlink, isDirectory on that children's FileStatus will return false. + * This behavior can be changed by setting an advanced configuration + * fs.viewfs.mount.links.as.symlinks to false. In this case, mount points will + * be represented as non-symlinks and all the file/directory attributes like + * permissions, isDirectory etc will be assigned from it's resolved target + * directory/file. * * If you want to get the FileStatus of target path for that children, you may * want to use GetFileStatus API with that children's symlink path. Please see * {@link ViewFs#getFileStatus(Path f)} * - * Note: In ViewFs, the mount links are represented as symlinks. + * Note: In ViewFs, by default the mount links are represented as symlinks. */ @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, @@ -999,8 +1012,7 @@ public int getUriDefaultPort() { * will be listed in the returned result. */ @Override - public FileStatus[] listStatus(final Path f) throws AccessControlException, - IOException { + public FileStatus[] listStatus(final Path f) throws IOException { checkPathIsSlash(f); FileStatus[] fallbackStatuses = listStatusForFallbackLink(); FileStatus[] result = new FileStatus[theInternalDir.getChildren().size()]; @@ -1008,37 +1020,51 @@ public FileStatus[] listStatus(final Path f) throws AccessControlException, for (Entry> iEntry : theInternalDir.getChildren().entrySet()) { INode inode = iEntry.getValue(); - - + Path path = new Path(inode.fullPath).makeQualified(myUri, null); if (inode.isLink()) { INodeLink link = (INodeLink) inode; + if (showMountLinksAsSymlinks) { + // To maintain backward compatibility, with default option(showing + // mount links as symlinks), we will represent target link as + // symlink and rest other properties are belongs to mount link only. + result[i++] = + new FileStatus(0, false, 0, 0, creationTime, creationTime, + PERMISSION_555, ugi.getShortUserName(), + ugi.getPrimaryGroupName(), link.getTargetLink(), + path); + continue; + } + + // We will represent as non-symlinks. Here it will show target + // directory/file properties like permissions, isDirectory etc on + // mount path. The path will be a mount link path and isDirectory is + // true if target is dir, otherwise false. + String linkedPath = link.getTargetFileSystem().getUri().getPath(); + if ("".equals(linkedPath)) { + linkedPath = "/"; + } try { - String linkedPath = link.getTargetFileSystem().getUri().getPath(); - FileStatus status = ((ChRootedFs)link.getTargetFileSystem()) - .getMyFs().getFileStatus(new Path(linkedPath)); - result[i++] = new FileStatus(status.getLen(), false, - status.getReplication(), status.getBlockSize(), - status.getModificationTime(), status.getAccessTime(), - status.getPermission(), status.getOwner(), status.getGroup(), - link.getTargetLink(), - new Path(inode.fullPath).makeQualified( - myUri, null)); + FileStatus status = + ((ChRootedFs) link.getTargetFileSystem()).getMyFs() + .getFileStatus(new Path(linkedPath)); + result[i++] = new FileStatus(status.getLen(), status.isDirectory(), + status.getReplication(), status.getBlockSize(), + status.getModificationTime(), status.getAccessTime(), + status.getPermission(), status.getOwner(), status.getGroup(), + null, path); } catch (FileNotFoundException ex) { - result[i++] = new FileStatus(0, false, 0, 0, - creationTime, creationTime, PERMISSION_555, - ugi.getShortUserName(), ugi.getPrimaryGroupName(), - link.getTargetLink(), - new Path(inode.fullPath).makeQualified( - myUri, null)); + LOG.warn("Cannot get one of the children's(" + path + + ") target path(" + link.getTargetFileSystem().getUri() + + ") file status.", ex); + throw ex; } } else { - result[i++] = new FileStatus(0, true, 0, 0, - creationTime, creationTime, - PERMISSION_555, ugi.getShortUserName(), ugi.getGroupNames()[0], - new Path(inode.fullPath).makeQualified( - myUri, null)); + result[i++] = + new FileStatus(0, true, 0, 0, creationTime, creationTime, + PERMISSION_555, ugi.getShortUserName(), + ugi.getGroupNames()[0], path); } } if (fallbackStatuses.length > 0) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java new file mode 100644 index 0000000000000..0cf691481f720 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.*; + +/** + * ViewFsOverloadScheme ListStatus. + */ +public class TestViewFsOverloadSchemeListStatus { + + private static final File TEST_DIR = + GenericTestUtils.getTestDir(TestViewfsFileStatus.class.getSimpleName()); + + @Before + public void setUp() { + FileUtil.fullyDelete(TEST_DIR); + assertTrue(TEST_DIR.mkdirs()); + } + + @After + public void tearDown() throws IOException { + FileUtil.fullyDelete(TEST_DIR); + } + + /** + * Tests the ACL and isDirectory returned from listStatus for directories and + * files. + */ + @Test + public void testListStatusACL() throws IOException, URISyntaxException { + String testfilename = "testFileACL"; + String childDirectoryName = "testDirectoryACL"; + TEST_DIR.mkdirs(); + File infile = new File(TEST_DIR, testfilename); + final byte[] content = "dingos".getBytes(); + + try (FileOutputStream fos = new FileOutputStream(infile)) { + fos.write(content); + } + assertEquals(content.length, infile.length()); + File childDir = new File(TEST_DIR, childDirectoryName); + childDir.mkdirs(); + + Configuration conf = new Configuration(); + ConfigUtil.addLink(conf, "/file", infile.toURI()); + ConfigUtil.addLink(conf, "/dir", childDir.toURI()); + String fileScheme = "file"; + conf.set(String.format("fs.%s.impl", fileScheme), + ViewFileSystemOverloadScheme.class.getName()); + conf.set(String + .format(FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + fileScheme), LocalFileSystem.class.getName()); + String fileUriStr = "file:///"; + try (FileSystem vfs = FileSystem.get(new URI(fileUriStr), conf)) { + assertEquals(ViewFileSystemOverloadScheme.class, vfs.getClass()); + FileStatus[] statuses = vfs.listStatus(new Path("/")); + + FileSystem localFs = ((ViewFileSystemOverloadScheme) vfs) + .getRawFileSystem(new Path(fileUriStr), conf); + FileStatus fileStat = localFs.getFileStatus(new Path(infile.getPath())); + FileStatus dirStat = localFs.getFileStatus(new Path(childDir.getPath())); + + for (FileStatus status : statuses) { + if (status.getPath().getName().equals(fileScheme)) { + assertEquals(fileStat.getPermission(), status.getPermission()); + } else { + assertEquals(dirStat.getPermission(), status.getPermission()); + } + } + + localFs.setPermission(new Path(infile.getPath()), + FsPermission.valueOf("-rwxr--r--")); + localFs.setPermission(new Path(childDir.getPath()), + FsPermission.valueOf("-r--rwxr--")); + + statuses = vfs.listStatus(new Path("/")); + for (FileStatus status : statuses) { + if (status.getPath().getName().equals(fileScheme)) { + assertEquals(FsPermission.valueOf("-rwxr--r--"), + status.getPermission()); + assertFalse(status.isDirectory()); + } else { + assertEquals(FsPermission.valueOf("-r--rwxr--"), + status.getPermission()); + assertTrue(status.isDirectory()); + } + } + } + } + + @AfterClass + public static void cleanup() throws IOException { + FileUtil.fullyDelete(TEST_DIR); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java index 29fcc22db1fe6..75557456edc3d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java @@ -121,7 +121,7 @@ public void testListStatusACL() throws IOException { Configuration conf = new Configuration(); ConfigUtil.addLink(conf, "/file", infile.toURI()); ConfigUtil.addLink(conf, "/dir", childDir.toURI()); - + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); try (FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf)) { assertEquals(ViewFileSystem.class, vfs.getClass()); FileStatus[] statuses = vfs.listStatus(new Path("/")); @@ -148,9 +148,11 @@ public void testListStatusACL() throws IOException { if (status.getPath().getName().equals("file")) { assertEquals(FsPermission.valueOf("-rwxr--r--"), status.getPermission()); + assertFalse(status.isDirectory()); } else { assertEquals(FsPermission.valueOf("-r--rwxr--"), status.getPermission()); + assertTrue(status.isDirectory()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md index 5fd863325cd98..e65c5458676ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md @@ -28,7 +28,7 @@ View File System Overload Scheme ### Details -The View File System Overload Scheme is an extension to the View File System. This will allow users to continue to use their existing fs.defaultFS configured scheme or any new scheme name instead of using scheme `viewfs`. Mount link configurations key, value formats are same as in [ViewFS Guide](./ViewFs.html). If a user wants to continue use the same fs.defaultFS and wants to have more mount points, then mount link configurations should have the current fs.defaultFS authority name as mount table name. Example if fs.defaultFS is `hdfs://mycluster`, then the mount link configuration key name should be like in the following format `fs.viewfs.mounttable.*mycluster*.`. We will discuss more example configurations in following sections. +The View File System Overload Scheme is an extension to the View File System. This will allow users to continue to use their existing fs.defaultFS configured scheme or any new scheme name instead of using scheme `viewfs`. Mount link configurations key, value formats are same as in [ViewFS Guide](./ViewFs.html). If a user wants to continue use the same fs.defaultFS and wants to have more mount points, then mount link configurations should have the current fs.defaultFS authority name as mount table name. Example if fs.defaultFS is `hdfs://mycluster`, then the mount link configuration key name should be like in the following format `fs.viewfs.mounttable.*mycluster*.link.`. We will discuss more example configurations in following sections. Another important improvement with the ViewFileSystemOverloadScheme is, administrators need not copy the `mount-table.xml` configuration file to 1000s of client nodes. Instead they can keep the mount-table configuration file in a Hadoop compatible file system. So, keeping the configuration file in a central place makes administrators life easier as they can update mount-table in single place. @@ -55,59 +55,59 @@ Here `` should be same as the uri-scheme configured in fs.defautFS. For **Example 1:** -If users want some of their existing cluster (`hdfs://mycluster`) data to mount with hdfs(`hdfs://mycluster`) and other object store clusters(`o3fs://bucket1.volume1.omhost/`, `s3a://bucket1/`), the following example configurations can show how to add mount links. +If users want some of their existing cluster (`hdfs://cluster`) data to mount with hdfs(`hdfs://cluster`) and other object store clusters(`o3fs://bucket1.volume1.omhost/`, `s3a://bucket1/`), the following example configurations can show how to add mount links. ```xml - fs.viewfs.mounttable.Cluster./user - hdfs://mycluster/user + fs.viewfs.mounttable.cluster.link./user + hdfs://cluster/user - fs.viewfs.mounttable.Cluster./data + fs.viewfs.mounttable.cluster.link./data o3fs://bucket1.volume1/data - fs.viewfs.mounttable.Cluster./backup + fs.viewfs.mounttable.cluster.link./backup s3a://bucket1/backup/ ``` Let's consider the following operations to understand where these operations will be delegated based on mount links. - *Op1:* Create a file with the the path `hdfs://mycluster/user/fileA`, then physically this file will be created at `hdfs://mycluster/user/fileA`. This delegation happened based on the first configuration parameter in above configurations. Here `/user` mapped to `hdfs://mycluster/user/`. + *Op1:* Create a file with the the path `hdfs://cluster/user/fileA`, then physically this file will be created at `hdfs://cluster/user/fileA`. This delegation happened based on the first configuration parameter in above configurations. Here `/user` mapped to `hdfs://cluster/user/`. - *Op2:* Create a file the the path `hdfs://mycluster/data/datafile`, then this file will be created at `o3fs://bucket1.volume1.omhost/data/datafile`. This delegation happened based on second configurations parameter in above configurations. Here `/data` was mapped with `o3fs://bucket1.volume1.omhost/data/`. + *Op2:* Create a file the the path `hdfs://cluster/data/datafile`, then this file will be created at `o3fs://bucket1.volume1.omhost/data/datafile`. This delegation happened based on second configurations parameter in above configurations. Here `/data` was mapped with `o3fs://bucket1.volume1.omhost/data/`. - *Op3:* Create a file with the the path `hdfs://Cluster/backup/data.zip`, then physically this file will be created at `s3a://bucket1/backup/data.zip`. This delegation happened based on the third configuration parameter in above configurations. Here `/backup` was mapped to `s3a://bucket1/backup/`. + *Op3:* Create a file with the the path `hdfs://cluster/backup/data.zip`, then physically this file will be created at `s3a://bucket1/backup/data.zip`. This delegation happened based on the third configuration parameter in above configurations. Here `/backup` was mapped to `s3a://bucket1/backup/`. **Example 2:** -If users want some of their existing cluster (`s3a://bucketA/`) data to mount with other hdfs cluster(`hdfs://Cluster`) and object store clusters(`o3fs://bucket1.volume1.omhost/`, `s3a://bucketA/`), the following example configurations can show how to add mount links. +If users want some of their existing cluster (`s3a://bucketA/`) data to mount with other hdfs cluster(`hdfs://cluster`) and object store clusters(`o3fs://bucket1.volume1.omhost/`, `s3a://bucketA/`), the following example configurations can show how to add mount links. ```xml - fs.viewfs.mounttable.bucketA./user - hdfs://Cluster/user + fs.viewfs.mounttable.bucketA.link./user + hdfs://cluster/user - fs.viewfs.mounttable.bucketA./data + fs.viewfs.mounttable.bucketA.link./data o3fs://bucket1.volume1.omhost/data - fs.viewfs.mounttable.bucketA./salesDB + fs.viewfs.mounttable.bucketA.link./salesDB s3a://bucketA/salesDB/ ``` Let's consider the following operations to understand to where these operations will be delegated based on mount links. - *Op1:* Create a file with the the path `s3a://bucketA/user/fileA`, then this file will be created physically at `hdfs://Cluster/user/fileA`. This delegation happened based on the first configuration parameter in above configurations. Here `/user` mapped to `hdfs://Cluster/user`. + *Op1:* Create a file with the the path `s3a://bucketA/user/fileA`, then this file will be created physically at `hdfs://cluster/user/fileA`. This delegation happened based on the first configuration parameter in above configurations. Here `/user` mapped to `hdfs://cluster/user`. *Op2:* Create a file the the path `s3a://bucketA/data/datafile`, then this file will be created at `o3fs://bucket1.volume1.omhost/data/datafile`. This delegation happened based on second configurations parameter in above configurations. Here `/data` was mapped with `o3fs://bucket1.volume1.omhost/data/`. @@ -119,16 +119,18 @@ The following picture shows how the different schemes can be used in ViewFileSys +Note: In ViewFsOverloadScheme, by default the mount links will not be represented as symlinks. The permission bits and isDirectory value will be propagated from the target directory/file. + ### Central Mount Table Configurations -To enable central mount table configuration, we need to configure `fs.viewfs.mounttable.path` in `core-site.xml` with the value as the Hadoop compatible file system directory/file path, where the `mount-table-.xml` file copied. Here versionNumber is an integer number and need to increase the version number and upload new file in same directory. +To enable central mount table configuration, we need to configure `fs.viewfs.mounttable.path` in `core-site.xml` with the value as the Hadoop compatible file system directory/file path, where the `mount-table..xml` file copied. Here versionNumber is an integer number and need to increase the version number and upload new file in same directory. -The ViewFileSystemOverloadScheme always loads the highest version number `mount-table-.xml`. Please don't replace the file with same name. Always increment the version number to take new file picked by newly initializing clients. Why we don't recommend to replace the files is that, some client might have already opened the connections to old mount-table files already and in middle of loading configuration files, and replacing files can make them fail. +The ViewFileSystemOverloadScheme always loads the highest version number `mount-table..xml`. Please don't replace the file with same name. Always increment the version number to take new file picked by newly initializing clients. Why we don't recommend to replace the files is that, some client might have already opened the connections to old mount-table files already and in middle of loading configuration files, and replacing files can make them fail. ```xml fs.viewfs.mounttable.path - hdfs://Cluster/config/mount-table-dir + hdfs://cluster/config/mount-table-dir ``` If you are sure, you will never do updates to mount-table file, you can also configure file path directly like below. If you configure file path, it will not check any highest version number loading. Whatever file configured it will be loaded. However file name format should be same. @@ -136,12 +138,12 @@ The ViewFileSystemOverloadScheme always loads the highest version number `mount- ```xml fs.viewfs.mounttable.path - hdfs://Cluster/config/mount-table-dir/mount-table-.xml + hdfs://cluster/config/mount-table-dir/mount-table..xml ``` Note: we recommend not to configure mount-links in `core-site.xml` if you configure above valid path. Otherwise both mount links will be mixed and can lead to a confused behavior. -If you copy the `mount-table-.xml`, you may consider having big replication factor depending on your cluster size. So, that file will be available locally to majority of clients as applications(MR/YARN/HBASE..etc) use locality on HDFS when reading `mount-table-.xml`. +If you copy the `mount-table..xml`, you may consider having big replication factor depending on your cluster size. So, that file will be available locally to majority of clients as applications(MR/YARN/HBASE..etc) use locality on HDFS when reading `mount-table..xml`. DFSAdmin commands with View File System Overload Scheme ------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java index 03c29c927e5ff..e7e74d13763c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java @@ -116,6 +116,11 @@ public void testListStatusRootDir() throws Throwable { assumeTrue(rootDirTestEnabled()); Path dir = path("/"); Path child = path("/FileSystemContractBaseTest"); + try (FileSystem dfs = ((ViewFileSystemOverloadScheme) fs).getRawFileSystem( + new Path(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY), "/"), + conf)) { + dfs.mkdirs(child); + } assertListStatusFinds(dir, child); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java index b3ed85b45827a..3060bd6722e3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java @@ -191,8 +191,17 @@ public void testListStatusOnRootShouldListAllMountLinks() throws Exception { new String[] {hdfsTargetPath.toUri().toString(), localTargetDir.toURI().toString() }, conf); + try (DistributedFileSystem dfs = new DistributedFileSystem()) { + dfs.initialize(defaultFSURI, conf); + dfs.mkdirs(hdfsTargetPath); + } + try (RawLocalFileSystem lfs = new RawLocalFileSystem()) { + lfs.initialize(localTargetDir.toURI(), conf); + lfs.mkdirs(new Path(localTargetDir.toURI())); + } try (FileSystem fs = FileSystem.get(conf)) { + fs.mkdirs(hdfsTargetPath); FileStatus[] ls = fs.listStatus(new Path("/")); Assert.assertEquals(2, ls.length); String lsPath1 = From b2facc84a1b48b9dcbe0816e120778d2100b320e Mon Sep 17 00:00:00 2001 From: Surendra Singh Lilhore Date: Sat, 20 Jun 2020 19:55:23 +0530 Subject: [PATCH 044/131] YARN-9460. QueueACLsManager and ReservationsACLManager should not use instanceof checks. Contributed by Bilwa S T. --- .../resourcemanager/ResourceManager.java | 2 +- .../AbstractReservationSystem.java | 10 +- .../security/CapacityQueueACLsManager.java | 111 +++++++++++++++++ .../CapacityReservationsACLsManager.java | 46 +++++++ .../security/FairQueueACLsManager.java | 72 +++++++++++ .../security/FairReservationsACLsManager.java | 42 +++++++ .../security/GenericQueueACLsManager.java | 55 +++++++++ .../security/QueueACLsManager.java | 116 ++++-------------- .../security/ReservationsACLsManager.java | 44 ++----- .../security/package-info.java | 28 +++++ .../resourcemanager/TestClientRMTokens.java | 5 +- 11 files changed, 402 insertions(+), 129 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/CapacityQueueACLsManager.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/CapacityReservationsACLsManager.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/FairQueueACLsManager.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/FairReservationsACLsManager.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/GenericQueueACLsManager.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/package-info.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 48cbd8f6fc50e..836a5ece80bbc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -438,7 +438,7 @@ public String getZkRootNodePassword() { protected QueueACLsManager createQueueACLsManager(ResourceScheduler scheduler, Configuration conf) { - return new QueueACLsManager(scheduler, conf); + return QueueACLsManager.getQueueACLsManager(scheduler, conf); } @VisibleForTesting diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractReservationSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractReservationSystem.java index 5b8772c85419e..d9e4be9e5225b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractReservationSystem.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractReservationSystem.java @@ -50,6 +50,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.security.CapacityReservationsACLsManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.FairReservationsACLsManager; import org.apache.hadoop.yarn.server.resourcemanager.security.ReservationsACLsManager; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.UTCClock; @@ -173,7 +175,13 @@ private void initialize(Configuration conf) throws YarnException { YarnConfiguration.DEFAULT_YARN_RESERVATION_ACL_ENABLE) && conf.getBoolean(YarnConfiguration.YARN_ACL_ENABLE, YarnConfiguration.DEFAULT_YARN_ACL_ENABLE)) { - reservationsACLsManager = new ReservationsACLsManager(scheduler, conf); + if (scheduler instanceof CapacityScheduler) { + reservationsACLsManager = new CapacityReservationsACLsManager(scheduler, + conf); + } else if (scheduler instanceof FairScheduler) { + reservationsACLsManager = new FairReservationsACLsManager(scheduler, + conf); + } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/CapacityQueueACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/CapacityQueueACLsManager.java new file mode 100644 index 0000000000000..68a4530d616fa --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/CapacityQueueACLsManager.java @@ -0,0 +1,111 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.resourcemanager.security; + +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.QueueACL; +import org.apache.hadoop.yarn.security.AccessRequest; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This is the implementation of {@link QueueACLsManager} based on the + * {@link CapacityScheduler}. + */ +public class CapacityQueueACLsManager extends QueueACLsManager { + private static final Logger LOG = LoggerFactory + .getLogger(CapacityQueueACLsManager.class); + + public CapacityQueueACLsManager(ResourceScheduler scheduler, + Configuration conf) { + super(scheduler, conf); + } + + @Override + public boolean checkAccess(UserGroupInformation callerUGI, QueueACL acl, + RMApp app, String remoteAddress, List forwardedAddresses) { + if (!isACLsEnable) { + return true; + } + + CSQueue queue = ((CapacityScheduler) scheduler).getQueue(app.getQueue()); + if (queue == null) { + if (((CapacityScheduler) scheduler).isAmbiguous(app.getQueue())) { + LOG.error("Queue " + app.getQueue() + " is ambiguous for " + + app.getApplicationId()); + // if we cannot decide which queue to submit we should deny access + return false; + } + + // The application exists but the associated queue does not exist. + // This may be due to a queue that is not defined when the RM restarts. + // At this point we choose to log the fact and allow users to access + // and view the apps in a removed queue. This should only happen on + // application recovery. + LOG.error("Queue " + app.getQueue() + " does not exist for " + + app.getApplicationId()); + return true; + } + return authorizer.checkPermission( + new AccessRequest(queue.getPrivilegedEntity(), callerUGI, + SchedulerUtils.toAccessType(acl), app.getApplicationId().toString(), + app.getName(), remoteAddress, forwardedAddresses)); + + } + + @Override + public boolean checkAccess(UserGroupInformation callerUGI, QueueACL acl, + RMApp app, String remoteAddress, List forwardedAddresses, + String targetQueue) { + if (!isACLsEnable) { + return true; + } + + // Based on the discussion in YARN-5554 detail on why there are two + // versions: + // The access check inside these calls is currently scheduler dependent. + // This is due to the extra parameters needed for the CS case which are not + // in the version defined in the YarnScheduler interface. The second + // version is added for the moving the application case. The check has + // extra logging to distinguish between the queue not existing in the + // application move request case and the real access denied case. + CapacityScheduler cs = ((CapacityScheduler) scheduler); + CSQueue queue = cs.getQueue(targetQueue); + if (queue == null) { + LOG.warn("Target queue " + targetQueue + + (cs.isAmbiguous(targetQueue) ? " is ambiguous while trying to move " + : " does not exist while trying to move ") + + app.getApplicationId()); + return false; + } + return authorizer.checkPermission( + new AccessRequest(queue.getPrivilegedEntity(), callerUGI, + SchedulerUtils.toAccessType(acl), app.getApplicationId().toString(), + app.getName(), remoteAddress, forwardedAddresses)); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/CapacityReservationsACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/CapacityReservationsACLsManager.java new file mode 100644 index 0000000000000..531d2a315331b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/CapacityReservationsACLsManager.java @@ -0,0 +1,46 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +package org.apache.hadoop.yarn.server.resourcemanager.security; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; + +/** + * This is the implementation of {@link ReservationsACLsManager} based on the + * {@link CapacityScheduler}. + */ +public class CapacityReservationsACLsManager extends ReservationsACLsManager { + + public CapacityReservationsACLsManager(ResourceScheduler scheduler, + Configuration conf) throws YarnException { + super(conf); + CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration( + conf); + + for (String planQueue : scheduler.getPlanQueues()) { + CSQueue queue = ((CapacityScheduler) scheduler).getQueue(planQueue); + reservationAcls.put(planQueue, + csConf.getReservationAcls(queue.getQueuePath())); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/FairQueueACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/FairQueueACLsManager.java new file mode 100644 index 0000000000000..688d46850b223 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/FairQueueACLsManager.java @@ -0,0 +1,72 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.resourcemanager.security; + +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.QueueACL; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This is the implementation of {@link QueueACLsManager} based on the + * {@link FairScheduler}. + */ +public class FairQueueACLsManager extends QueueACLsManager { + private static final Logger LOG = LoggerFactory + .getLogger(FairQueueACLsManager.class); + + public FairQueueACLsManager(ResourceScheduler scheduler, Configuration conf) { + super(scheduler, conf); + } + + @Override + public boolean checkAccess(UserGroupInformation callerUGI, QueueACL acl, + RMApp app, String remoteAddress, List forwardedAddresses) { + if (!isACLsEnable) { + return true; + } + return scheduler.checkAccess(callerUGI, acl, app.getQueue()); + } + + @Override + public boolean checkAccess(UserGroupInformation callerUGI, QueueACL acl, + RMApp app, String remoteAddress, List forwardedAddresses, + String targetQueue) { + if (!isACLsEnable) { + return true; + } + + FSQueue queue = ((FairScheduler) scheduler).getQueueManager() + .getQueue(targetQueue); + if (queue == null) { + LOG.warn("Target queue " + targetQueue + + " does not exist while trying to move " + app.getApplicationId()); + return false; + } + return scheduler.checkAccess(callerUGI, acl, targetQueue); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/FairReservationsACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/FairReservationsACLsManager.java new file mode 100644 index 0000000000000..09f147f89ea14 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/FairReservationsACLsManager.java @@ -0,0 +1,42 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +package org.apache.hadoop.yarn.server.resourcemanager.security; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; + +/** + * This is the implementation of {@link ReservationsACLsManager} based on the + * {@link FairScheduler}. + */ +public class FairReservationsACLsManager extends ReservationsACLsManager { + + public FairReservationsACLsManager(ResourceScheduler scheduler, + Configuration conf) throws YarnException { + super(conf); + AllocationConfiguration aConf = ((FairScheduler) scheduler) + .getAllocationConfiguration(); + for (String planQueue : scheduler.getPlanQueues()) { + reservationAcls.put(planQueue, aConf.getReservationAcls(planQueue)); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/GenericQueueACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/GenericQueueACLsManager.java new file mode 100644 index 0000000000000..5f3559c65e8a6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/GenericQueueACLsManager.java @@ -0,0 +1,55 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +package org.apache.hadoop.yarn.server.resourcemanager.security; + +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.QueueACL; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This is the generic implementation of {@link QueueACLsManager}. + */ +public class GenericQueueACLsManager extends QueueACLsManager { + + private static final Logger LOG = LoggerFactory + .getLogger(GenericQueueACLsManager.class); + + public GenericQueueACLsManager(ResourceScheduler scheduler, + Configuration conf) { + super(scheduler, conf); + } + + @Override + public boolean checkAccess(UserGroupInformation callerUGI, QueueACL acl, + RMApp app, String remoteAddress, List forwardedAddresses) { + return scheduler.checkAccess(callerUGI, acl, app.getQueue()); + } + + @Override + public boolean checkAccess(UserGroupInformation callerUGI, QueueACL acl, + RMApp app, String remoteAddress, List forwardedAddresses, + String targetQueue) { + return scheduler.checkAccess(callerUGI, acl, targetQueue); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java index f13608c0ac6b3..290ae7c5d3a09 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java @@ -19,35 +19,26 @@ package org.apache.hadoop.yarn.server.resourcemanager.security; import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.security.AccessRequest; import org.apache.hadoop.yarn.security.YarnAuthorizationProvider; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; - import java.util.List; -public class QueueACLsManager { - - private static final Logger LOG = - LoggerFactory.getLogger(QueueACLsManager.class); +@SuppressWarnings("checkstyle:visibilitymodifier") +public abstract class QueueACLsManager { - private ResourceScheduler scheduler; - private boolean isACLsEnable; - private YarnAuthorizationProvider authorizer; + ResourceScheduler scheduler; + boolean isACLsEnable; + YarnAuthorizationProvider authorizer; @VisibleForTesting - public QueueACLsManager() { + public QueueACLsManager(Configuration conf) { this(null, new Configuration()); } @@ -58,41 +49,27 @@ public QueueACLsManager(ResourceScheduler scheduler, Configuration conf) { this.authorizer = YarnAuthorizationProvider.getInstance(conf); } - public boolean checkAccess(UserGroupInformation callerUGI, QueueACL acl, - RMApp app, String remoteAddress, List forwardedAddresses) { - if (!isACLsEnable) { - return true; - } - + /** + * Get queue acl manager corresponding to the scheduler. + * @param scheduler the scheduler for which the queue acl manager is required + * @param conf + * @return {@link QueueACLsManager} + */ + public static QueueACLsManager getQueueACLsManager( + ResourceScheduler scheduler, Configuration conf) { if (scheduler instanceof CapacityScheduler) { - CSQueue queue = ((CapacityScheduler) scheduler).getQueue(app.getQueue()); - if (queue == null) { - if (((CapacityScheduler) scheduler).isAmbiguous(app.getQueue())) { - LOG.error("Queue " + app.getQueue() + " is ambiguous for " - + app.getApplicationId()); - //if we cannot decide which queue to submit we should deny access - return false; - } - - // The application exists but the associated queue does not exist. - // This may be due to a queue that is not defined when the RM restarts. - // At this point we choose to log the fact and allow users to access - // and view the apps in a removed queue. This should only happen on - // application recovery. - LOG.error("Queue " + app.getQueue() + " does not exist for " + app - .getApplicationId()); - return true; - } - return authorizer.checkPermission( - new AccessRequest(queue.getPrivilegedEntity(), callerUGI, - SchedulerUtils.toAccessType(acl), - app.getApplicationId().toString(), app.getName(), - remoteAddress, forwardedAddresses)); + return new CapacityQueueACLsManager(scheduler, conf); + } else if (scheduler instanceof FairScheduler) { + return new FairQueueACLsManager(scheduler, conf); } else { - return scheduler.checkAccess(callerUGI, acl, app.getQueue()); + return new GenericQueueACLsManager(scheduler, conf); } } + public abstract boolean checkAccess(UserGroupInformation callerUGI, + QueueACL acl, RMApp app, String remoteAddress, + List forwardedAddresses); + /** * Check access to a targetQueue in the case of a move of an application. * The application cannot contain the destination queue since it has not @@ -107,50 +84,7 @@ public boolean checkAccess(UserGroupInformation callerUGI, QueueACL acl, * @return true: if submission is allowed and queue exists, * false: in all other cases (also non existing target queue) */ - public boolean checkAccess(UserGroupInformation callerUGI, QueueACL acl, - RMApp app, String remoteAddress, List forwardedAddresses, - String targetQueue) { - if (!isACLsEnable) { - return true; - } - - // Based on the discussion in YARN-5554 detail on why there are two - // versions: - // The access check inside these calls is currently scheduler dependent. - // This is due to the extra parameters needed for the CS case which are not - // in the version defined in the YarnScheduler interface. The second - // version is added for the moving the application case. The check has - // extra logging to distinguish between the queue not existing in the - // application move request case and the real access denied case. - if (scheduler instanceof CapacityScheduler) { - CapacityScheduler cs = ((CapacityScheduler) scheduler); - CSQueue queue = cs.getQueue(targetQueue); - if (queue == null) { - LOG.warn("Target queue " + targetQueue - + (cs.isAmbiguous(targetQueue) ? - " is ambiguous while trying to move " : - " does not exist while trying to move ") - + app.getApplicationId()); - return false; - } - return authorizer.checkPermission( - new AccessRequest(queue.getPrivilegedEntity(), callerUGI, - SchedulerUtils.toAccessType(acl), - app.getApplicationId().toString(), app.getName(), - remoteAddress, forwardedAddresses)); - } else if (scheduler instanceof FairScheduler) { - FSQueue queue = ((FairScheduler) scheduler).getQueueManager(). - getQueue(targetQueue); - if (queue == null) { - LOG.warn("Target queue " + targetQueue - + " does not exist while trying to move " - + app.getApplicationId()); - return false; - } - return scheduler.checkAccess(callerUGI, acl, targetQueue); - } else { - // Any other scheduler just try - return scheduler.checkAccess(callerUGI, acl, targetQueue); - } - } + public abstract boolean checkAccess(UserGroupInformation callerUGI, + QueueACL acl, RMApp app, String remoteAddress, + List forwardedAddresses, String targetQueue); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ReservationsACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ReservationsACLsManager.java index be2be184f3673..6fc9953f79a90 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ReservationsACLsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ReservationsACLsManager.java @@ -24,50 +24,26 @@ import org.apache.hadoop.yarn.api.records.ReservationACL; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationConfiguration; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; - import java.util.HashMap; import java.util.Map; /** * The {@link ReservationsACLsManager} is used to check a specified user's * permissons to perform a reservation operation on the - * {@link CapacityScheduler} and the {@link FairScheduler}. * {@link ReservationACL}s are used to specify reservation operations. */ -public class ReservationsACLsManager { +@SuppressWarnings("checkstyle:visibilitymodifier") +public abstract class ReservationsACLsManager { private boolean isReservationACLsEnable; - private Map> reservationAcls - = new HashMap<>(); - - public ReservationsACLsManager(ResourceScheduler scheduler, - Configuration conf) throws YarnException { - this.isReservationACLsEnable = - conf.getBoolean(YarnConfiguration.YARN_RESERVATION_ACL_ENABLE, - YarnConfiguration.DEFAULT_YARN_RESERVATION_ACL_ENABLE) && - conf.getBoolean(YarnConfiguration.YARN_ACL_ENABLE, - YarnConfiguration.DEFAULT_YARN_ACL_ENABLE); - if (scheduler instanceof CapacityScheduler) { - CapacitySchedulerConfiguration csConf = new - CapacitySchedulerConfiguration(conf); + Map> reservationAcls = + new HashMap<>(); - for (String planQueue : scheduler.getPlanQueues()) { - CSQueue queue = ((CapacityScheduler) scheduler).getQueue(planQueue); - reservationAcls.put(planQueue, csConf.getReservationAcls(queue - .getQueuePath())); - } - } else if (scheduler instanceof FairScheduler) { - AllocationConfiguration aConf = ((FairScheduler) scheduler) - .getAllocationConfiguration(); - for (String planQueue : scheduler.getPlanQueues()) { - reservationAcls.put(planQueue, aConf.getReservationAcls(planQueue)); - } - } + public ReservationsACLsManager(Configuration conf) throws YarnException { + this.isReservationACLsEnable = conf.getBoolean( + YarnConfiguration.YARN_RESERVATION_ACL_ENABLE, + YarnConfiguration.DEFAULT_YARN_RESERVATION_ACL_ENABLE) + && conf.getBoolean(YarnConfiguration.YARN_ACL_ENABLE, + YarnConfiguration.DEFAULT_YARN_ACL_ENABLE); } public boolean checkAccess(UserGroupInformation callerUGI, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/package-info.java new file mode 100644 index 0000000000000..dcc2d87103120 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/package-info.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Package org.apache.hadoop.yarn.server.resourcemanager.security + * contains classes related to security. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +package org.apache.hadoop.yarn.server.resourcemanager.security; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java index e700bfd1e873e..50afced670e25 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java @@ -544,8 +544,9 @@ public ClientRMServiceForTest(Configuration conf, ResourceScheduler scheduler, RMDelegationTokenSecretManager rmDTSecretManager) { super(mock(RMContext.class), scheduler, mock(RMAppManager.class), - new ApplicationACLsManager(conf), new QueueACLsManager(scheduler, - conf), rmDTSecretManager); + new ApplicationACLsManager(conf), + QueueACLsManager.getQueueACLsManager(scheduler, conf), + rmDTSecretManager); } // Use a random port unless explicitly specified. From 17ffcab5f621400bd8bb47dc8fb29365f6e24ebd Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sun, 21 Jun 2020 17:27:28 +0530 Subject: [PATCH 045/131] HDFS-14546. Document block placement policies. Contributed by Amithsha. --- .../markdown/HdfsBlockPlacementPolicies.md | 165 ++++++++++++++++++ .../src/site/markdown/HdfsDesign.md | 3 + .../resources/images/RackFaultTolerant.jpg | Bin 0 -> 55017 bytes 3 files changed, 168 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsBlockPlacementPolicies.md create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/RackFaultTolerant.jpg diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsBlockPlacementPolicies.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsBlockPlacementPolicies.md new file mode 100644 index 0000000000000..4550f0441b62e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsBlockPlacementPolicies.md @@ -0,0 +1,165 @@ + + +BlockPlacementPolicies +====================== + + +##Introduction +By default HDFS supports BlockPlacementPolicyDefault. Where one block on local and copy on 2 different nodes of same remote rack. Additional to this HDFS supports several different pluggable block placement policies. Users can choose the policy based on their infrastructure and use case. This document describes the detailed information about the type of policies with its use cases and configuration. + + +## BlockPlacementPolicyRackFaultTolerant + +BlockPlacementPolicyRackFaultTolerant can be used to split the placement of blocks across multiple rack.By default with replication of 3 BlockPlacementPolicyDefault will put one replica on the local machine if the writer is on a datanode, otherwise on a random datanode in the same rack as that of the writer, another replica on a node in a different (remote) rack, and the last on a different node in the same remote rack. So totally 2 racks will be used, in sceneraio like 2 racks going down at the same time will cause data inavailability where using BlockPlacementPolicyRackFaultTolerant will helop in placing 3 blocks on 3 different racks. + +For more details check [HDFS-7891](https://issues.apache.org/jira/browse/HDFS-7891) + +![Rack Fault Tolerant Policy](images/RackFaultTolerant.jpg) + + **Configurations :** + +- hdfs-site.xml + +```xml + + dfs.block.replicator.classname + org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant + +``` + + +## BlockPlacementPolicyWithNodeGroup + +With new 3 layer hierarchical topology, a node group level got introduced, which maps well onto a infrastructure that is based on a virtulized environment. In Virtualized environment multiple vm's will be hosted on same physical machine. Vm's on the same physical host are affected by the same hardware failure. So mapping the physical host a node groups this block placement gurantees that it will never place more than one replica on the same node group (physical host), in case of node group failure, only one replica will be lost at the maximum. + + **Configurations :** + +- core-site.xml + +```xml + + net.topology.impl + org.apache.hadoop.net.NetworkTopologyWithNodeGroup + + + net.topology.nodegroup.aware + true + +``` + +- hdfs-site.xml + +```xml + + dfs.block.replicator.classname + + org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup + + +``` + +- Topology script + +Topology script is the same as the examples above, the only difference is, +instead of returning only **/{rack}**, the script should return +**/{rack}/{nodegroup}**. Following is an example topology mapping table: + +``` +192.168.0.1 /rack1/nodegroup1 +192.168.0.2 /rack1/nodegroup1 +192.168.0.3 /rack1/nodegroup2 +192.168.0.4 /rack1/nodegroup2 +192.168.0.5 /rack2/nodegroup3 +192.168.0.6 /rack2/nodegroup3 +``` + +For more details check [HDFS-8468](https://issues.apache.org/jira/browse/HADOOP-8468) + +## BlockPlacementPolicyWithUpgradeDomain + +To address the limitation of block placement policy on rolling upgrade, the concept of upgrade domain has been added to HDFS via a new block placement policy. The idea is to group datanodes in a new dimension called upgrade domain, in addition to the existing rack-based grouping. For example, we can assign all datanodes in the first position of any rack to upgrade domain ud_01, nodes in the second position to upgrade domain ud_02 and so on. +It will make sure replicas of any given block are distributed across machines from different upgrade domains. By default, 3 replicas of any given block are placed on 3 different upgrade domains. This means all datanodes belonging to a specific upgrade domain collectively won’t store more than one replica of any block. + +For more details check [HDFS-9006](https://issues.apache.org/jira/browse/HDFS-9006) + +Detailed info about configuration [Upgrade Domain Policy](HdfsUpgradeDomain.html) + +## AvailableSpaceBlockPlacementPolicy + +The AvailableSpaceBlockPlacementPolicy is a space balanced block placement policy. It is similar to BlockPlacementPolicyDefault but will choose low used percent datanodes for new blocks with a little high possibility. + + **Configurations :** + +- hdfs-site.xml + +```xml + + dfs.block.replicator.classname + org.apache.hadoop.hdfs.server.blockmanagement.AvailableSpaceBlockPlacementPolicy + + + + dfs.namenode.available-space-block-placement-policy.balanced-space-preference-fraction + 0.6 + + Special value between 0 and 1, noninclusive. Increases chance of + placing blocks on Datanodes with less disk space used. + + + + + + dfs.namenode.available-space-block-placement-policy.balance-local-node + + false + + If true, balances the local node too. + + +``` + +For more details check [HDFS-8131](https://issues.apache.org/jira/browse/HDFS-8131) + +## AvailableSpaceRackFaultTolerantBlockPlacementPolicy + +The AvailableSpaceRackFaultTolerantBlockPlacementPolicy is a space balanced block placement policy similar to AvailableSpaceBlockPlacementPolicy. It extends BlockPlacementPolicyRackFaultTolerant and distributes the blocks +amongst maximum number of racks possible and at the same time will try to choose datanodes with low used percent with high probability. + + **Configurations :** + +- hdfs-site.xml + +```xml + + dfs.block.replicator.classname + org.apache.hadoop.hdfs.server.blockmanagement.AvailableSpaceRackFaultTolerantBlockPlacementPolicy + + + + dfs.namenode.available-space-rack-fault-tolerant-block-placement-policy.balanced-space-preference-fraction + 0.6 + + Only used when the dfs.block.replicator.classname is set to + org.apache.hadoop.hdfs.server.blockmanagement.AvailableSpaceRackFaultTolerantBlockPlacementPolicy. + Special value between 0 and 1, noninclusive. Increases chance of + placing blocks on Datanodes with less disk space used. More the value near 1 + more are the chances of choosing the datanode with less percentage of data. + Similarly as the value moves near 0, the chances of choosing datanode with + high load increases as the value reaches near 0. + + +``` + +For more details check [HDFS-15288](https://issues.apache.org/jira/browse/HDFS-15288) \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md index ff43e45191880..894ff7e78e863 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md @@ -139,6 +139,9 @@ then that replica is preferred to satisfy the read request. If HDFS cluster spans multiple data centers, then a replica that is resident in the local data center is preferred over any remote replica. +### Block Placement Policies +As mentioned above when the replication factor is three, HDFS’s placement policy is to put one replica on the local machine if the writer is on a datanode, otherwise on a random datanode in the same rack as that of the writer, another replica on a node in a different (remote) rack, and the last on a different node in the same remote rack. If the replication factor is greater than 3, the placement of the 4th and following replicas are determined randomly while keeping the number of replicas per rack below the upper limit (which is basically (replicas - 1) / racks + 2). Additional to this HDFS supports 4 different pluggable [Block Placement Policies](HdfsBlockPlacementPolicies.html). Users can choose the policy based on their infrastructre and use case. By default HDFS supports BlockPlacementPolicyDefault. + ### Safemode On startup, the NameNode enters a special state called Safemode. Replication of data blocks does not occur when the NameNode is in the Safemode state. The NameNode receives Heartbeat and Blockreport messages from the DataNodes. A Blockreport contains the list of data blocks that a DataNode is hosting. Each block has a specified minimum number of replicas. A block is considered safely replicated when the minimum number of replicas of that data block has checked in with the NameNode. After a configurable percentage of safely replicated data blocks checks in with the NameNode (plus an additional 30 seconds), the NameNode exits the Safemode state. It then determines the list of data blocks (if any) that still have fewer than the specified number of replicas. The NameNode then replicates these blocks to other DataNodes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/RackFaultTolerant.jpg b/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/RackFaultTolerant.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c05930389235bc3b3c409b6d9769ed3a27c3f70 GIT binary patch literal 55017 zcmeFZ2Urwc)-GD)43cvY5JZw9K}n4wNd!c4Rx&iuC?HW9QG(+ysZ|0lP`Olei&-w3jp65QUCx506IK^(@Q`1OL+Le8St|NxI2Xr{ycXb{Cv9U zIl?@>?Oa@VE{a{gC?*5o1Ng7VezveB0Q>p#v(;%=;C6x}+jI2vbe9zuck>XtXA85o z6SIN2iu>Gi7r!KSQ5=wm__*J*f!cZUSlc-`xhe4OG`8~cIN2)j8cAtg)N)t0b97So zdtj&Mr>$?}2er9s%L`HDk@u1HadmgK^SsC7gzUz8A+kPro1hJjjpZ{}U zb|9Po-tRxL;PlE*9{jDTr<47E(e(7npG{BilD+NZW9MRg!^zdo%>#U{0`C>+E4(~^ zT{8KHOP8fC@$#H@Agkee&%w^f$=1{H&l~m_T4*Z7$|KY&@|2XiM2WRI7x^3Q|e|9nrC<6re z_@}?%MhN~AkrELR5)zSRuxJOc0@LLxFEA~FUl3Mz*GKZro^bHJ+ zj7>~!Z0+nF9G#p!JiQ)z`}q1jc^VQL7XB|R;v^Uu~xq0~og+&#WRn;}M zb@dJH9i3g>J?}sC4v&nEjZb`@oI)-xeOX>v{kpb}-u<@seg6RSU2vcSK8 zQ1%O5v>;vhgoFfyB&T%Y;d`GFPD@C1_7XAOHC>W>_vv{h9+NU$kAGd>M#gvf4vNwG z!4NqUza;V;`joVvl>K{z1^>UI?C*sAMb{KSNq`3mkAN0{0Ea#McE_oKG#m4k-tEyk z`WW4XXtg`dsLVtU4_|L&-&OleqsnKQY7e85?fDZr6EyfI@=kyb!GJR-z#AFV32*|C z`E%o>G>!yLfDa`B>?c6^Wz`d4m<2jkK+p?ECgMnKaO(3x5L8O}39zIO?-0b&{`0+- zg)u7Dn3OI*_|uZ5hEaQ46WcRg9J-PfH^0y^>Rz+ILimyz;9tOZl1>=CADMXb!q!W1 ztb}(P%^@G0<^=ew<6j453P&|~#vT%FWMM=SvFA^KkdYktpt$)7&}N&3G&xfDmTf5v z$En(bgZlMewcnm#kbeSD-E29CjB-J7P!8WNwH zh59g02>BX~g`5EGmOUpxwC)K&nSoNqh0k}K0N8VzaLkw!cx`PM@|C#^zEGos%EmEk z$DaU7%^oKJQ35=KYnZcR{YC5-dxzh~amvD9NBcjfRIMt^{V^u8YoA6I>bbsRN!%K=NhBPDoGEhxDojy=RN+bGcb8_)iQfc;7t@RqIq)AE13 z4E_r)8D(Mb9%td0F?1d6Yy+V!>=?CQ4V^O|QEZYHV+bG)PF| z)TQ=HeRZ~2PXONhkP~3j1GR(8a4jGhEBI5~r(#FNMq1HeMbU!$(HPe}pT-V9wFuAm zP2!7kFE+mylOoP)sEkJyA^8pQf*XV0*%IHInNazC!3aKsfBA~zgn!On)HxOz5RtSgA?H3 z1@;6GaL`p@Y=3mcDBVYM|0xO<pL-h-7RK`S`gs68#P_|;mz`P_1 zZdUTo>Ae^tZXYUoYe3n1(JVo8@cBB_6Z#d-j8#}9Kr+<$=hUGU8fQ$*`rUfbPotV5 zFC7)ERX;DhIz!sl##d7&*fCEgsYB6{Ciw$qQ!`R6TzY4Ga@k1zOP!I4(K&BMQOOuQ z>**KR&U?_BoUOrO`SF9`i1Rrng8@wZ7!N)3jN)R85YA9L{v|a3u?FIr)GBC=L+5Fn z>j@nU8|6=ciJN-_mLZT3ZoVj-{IMD+ufq4fI-A9y@Rm@kxOX)tz#)JzLKx&ky6}zj{{29!XPA(xOdF7(D_x(WkbCrP`$bPvAE$^0$E z_(9;5M5fDw3*y=UH~6t*qV|{PpV;pe;*knaQzMmb1jyayHqu-ig><1{|gC3yU zf^q-8gVRD7AcQeT`IrHNVXP2r;nB_2n`5oDe5tF^tgkNhxZdaO@(Q{S<1b`z^Fwx% zy5{FFH_&Ri36-K23pKd~n_*jzny9A2{inxpjU!IZp>rT z)v%WxLR!CGT>|~ED}D!{+$NH+w?`HE%mZvH>net^%K4HHO*xWLYqfq+<9MO@zWaN; z=c8Z7>=%>TJTSUzdqHgu0mYcmgl{dQqZH4!G>w`n1nSz{_jWr=<$UjPyv=Rke95O* zQC|xA6VrhA|AKPJZ#-`{1dS7d%}1I$*fcP5QHRFoNtc>iYwp^Cp=?h$tlCEm+1yfF zVQ1Hvc0D8jwf6HA< z&;f5E&J(bA9qwRxaSY?t@uV-;#`R{ypLL%AViHFl({lGMwHC61lPx7s_K(Xj>0N=+ zV^aOv?G)3E=@6!MKU-0e_~N%QQ^Si_-Y6Ld71a{}$?{n$nA#J7zqlK|Pt<$@4AJOd zKyQk_vF!xdC$0ofkSTHG|0cB|&@fLTVTaT{ry&S67kKA~>h-GIBuVfNX&*D(lS~L0 zri3BjyRN@a0!wc+ZfP+&e#wf(d5)HCCHuAD(~?bQjgch}T#Ecukj%pn)mx~FDc^{E zMg7L`bGJF==A*3WY!ZH`g-*@QSdv=q;Al&)+vEu``zX`3Y%KN@Rs4ACs&b@mO&+5p z$NlRP0-b?hRs{4327;DZY#y6Se6^0FNi!~$6gEuO(xxbeJ`-Tt&&^HP>bUW-pMRk% zXx-*2e=(jv^0=(I0F$E2?3WcIInQEl?p!{vFxE8eIQ}+uelwc99~X69^(?9DZhx{w zu(vC)@oy~cQqXb#wqe9g(nKL1jvJT1en|Di*$Sqr8C%WndD*u}>FmMw3@IVU7b90C zE`xu?@l=~PA5bZa15YJgh0Q0vb5eB1St%=Km95vZ*XI1t-K^i~uLES5HF z<5@m>(^R|&!KQFOg?yg?yBxl(#8s@q0Lsm08mBFcgdGQ0&mUKSO6LN)7W>kueg23D z+InPXS*%3Fp!7%z+;;wlRO}P@zJTNWp)!?UH6T-0=HG>*u0ltbHW|^I zo0^;Ut(whFy`}5KTu^_1;hN!HWqfKsTYOm&$~%CQWLo8g%kBx!Ez;yCa13Ju$J7lw z$3MzJo#zKZYJsum;6~XAFrUtiQ$uL>vz9!1F2@PCKwx{);O%K(VEKB&;sj`whG1jW zutLRIhn!yXN8i8@>izdM-)u?$^ABmtCU7?WzxI{=%d_^HhSC(C%aKe#Pk>7+S*W}+ z_*bt9_)b5l3i-g*L+-qa3$yUXVL{(?SHaKHv7f7#^T|f_3Gm{Z4o1%ivM0OIdaweT zf%vc7B!UC|OA21kQEExH5M5Z8pXH;1fog~yb=&tDwQnpqHq=&Dh@_9_G$Wjs_oJzr z*!E`E?B~pqj~_b^6HQkOv(v70IhRzaaik}@-ST44y`}bhWVV|%X@t}2K@J>0c0B?1 z@GMLJlJ;-r@uy1r#fk!c)ql@(dzY#`|x)Hgh)~3ie?#e$=t!3DuKj@m>7`iAt zJ-KtU;u%3Vy1Ijp#DK){f-!yc4#ukWyS5?^d?E4#s6-gN36d|yG4CAf#fEcZctNxL z6@(9tP5}L-MG_oo>yhlA?tI`0Fm?s=7+gu>xKFYFO`TlsI^0K~%O+G%E9nbabdKZM z79&z}TY8o?F~jVRE{MBL?S*koF70l^{9}bZB>tmT^*tT{qZK+g?^F_94`jCxHn$aNGej6u2CMbKWxGmt(y~U zye#*|I;RYUsEU4Hn#8rF0{9wX#pe(6xz9jQVCXdo&Qpp5TwzZq4VD(Z@AZd%Z$CZ( ziteKcP5?ry;|y+8D}4QBfD(x5FrNSpASCeL(#d#W>iuG@9;O(n1;^1^Ks!un!bEp+ zprw9SUL|j@c*gL@oHtZ;FABA{_tK3&WAVc}vrh$J&^;YZbnBPJ7SIj(EDWyf=?mZg zuL@Z3j01(y$sr30Y+7cWrd`Qld3+`I>o}zK^-cEz3fg3AC2|SlIDmh#R=8Il4laOL zE3(pDe0A$Elja+4&ty9&HF*xmx0ngpQZB}+cg5{n& zc!|bin|o9O2l}IKHkBirul&=~pJ~&bAJV*hX%isK$_0FTekF(-_0P0;RYTFW?tCew}1IUcmacCG><+|ASfig=j_GPs?TWs%I1$==E4 zfrhP3H^9P`)797s>U{;M6^7>2+>L$O=u$}CP!laYU+8h}*pF*A*7?$wCEq`{8g)ZM zj%iFMa0I7zOiHIVHWfbU1xXEreDQy%P|#ED6SG4P+T8D@@CAia)2I8({r)~b!uBn6 zR;*%w{98}pnp1cIw;RRTVL;2c^?5&=Yiv=jN#_Jm9tUk_&^`nkt@_Iox0WXV{)ibr zaw4DV+GkD5$l_Vq#nnzD)2^J_x28Ho*fZkvH{*qcA|%fyz2tg8`d%S?KV;d%?7A~l z`3Y@kGH0$p#t{t|S!P4N=%ZSXV*7SrXM2Htls z*V{zB^aQx3ReW`@La_{>EAM`4D1)YGPJj!i=Je8H)}h#`u4lh?0xVmA2Apvjg36QE zI{{QWKr>6=2*DPEj!jPl{A=B*iht)yasniI;;oMwJqPjg7igVhW*X2wQ@sL(Ulxne z;}QZ>3&>O3fLFzhGZ+M;XiKm+a+@y5p82V9mws{r^fo|{w|w$(kzt@+As&SPFjC?w z{g;DWFH4kp7RVlO3pEkuWyP}2B{_suBU*U&z1YlnM^x;te*DAck~7&i-rG^KQV^4c zw%r93KtSOY&9Ba@rNI*H#}!0K>puAee80EZ@b9Aen~45*|I7a?I0}sWKD6#axrssl z<@?Q1)*w zV!Xoqt{idwWOm>9?0Aw44JpIhLg$p^OohSJ4hLo)6ch~-jPuWU&tvPeakK*<4z_n2 z#|?rGB#%yjzBJ&w^N&34&*PjDtG5!h(efHF*Ec=+7HcVf#%bM$Yi=U^{;9llh0`K_ z0lFGYi!83L??UIla-(7bV4x(!ekPbm<#ur9pesPS3I>{o=YQsWp0t27rXTR#&OhSl zFX*?4WxfX6H+EwbKRZl5Z^r65UCb>oeh;!Os<&n7+)*8F+l{+s8LM@iN|fIL9U#+K zblNJdCPVvEr&1`}x|%NArLmNzsTqF^FJ+h3p}R^ha~FJm`&D!+E`qxQG$}kgtv@`} zuzXqUC&1U&KQlYjpfOC5#QWp}-*+1exVlIs(}(sddyN!TJlGoiIu#Q);6AWl@0RlD zqcjR!FVr(w{T-zJ-b@63>uRc{WoS+zn)K6U-30Hu|2Cq zPH~kyJLGoJ6#L(?|EL<` zCse8@Bm5~)3KQ9-$TiX=1x}c-c!6T?k{m7QgQ7dT47HiwL6?p~uaVvcMQcnSN2*|i z74WkHLZh>C<>qpNZ;&Mm1&1{NwsR$ce=ou-=fHdKd)wPcLcYSax!v}_nIzj&^5Sz#&e zsrdQ6M)I5^1XjS*&?CJm=JGxnNJWGW=9c73JX})9Rqn6!DELl9nPaN|g=EuAb~N*n zTDT-4BDsc%xi;+v?}%!KBztwaVV%i$_5!iVY->0Edz9qw-^vi;zuRux1f5`T{WipQ zs#o_##?r1S3XCjXlxm(!5KC0|)ochGoISpi(Rj=w6RO%mY0ivrdxo=dy@ z!1k9pmSux+xZ}VNMtSj5yfO^Fx8Z783MS%I;qMEP@FNba+zF6x=DaSi_fHv~-7Go~ z2O7f4f>~&_&a6X|H>ZIjCIzP*i<~%)Pz2MN!~JxpAQ@;FuZl;)cW*X<+`4Nve|%;S zG)UaLnoXeYx(kIIpGksur*UKopaL7SyLdaqUfU?VQC~n(##e-1*(B@t6$}c5V2s)W ziBQ|oi^ks(R2zqZ88D|ud6r*OO7_30(G?eXJrf%z(F=RYlUB#fH6zu8ra!v5m1JTf@;+rqE86 zBz|`G)^jFd6G;*>g+`Yei|mL7!77e?J4`0@?$5|B=MR6M#+*r*n+i zXTa;gavc|n2iSP!eD$8=tRM9{H@F3JD|1;5pY6YFHYNAol|1kLiZ4(~%}?FLF+3zpIqAv&_oOk?F|lL-!~0nJ6dH2EMv zr;U}FF*~w8xDU`DhrwUM=VLuIL5?V(X6RJQ{WDjOKzV{(x=lRLCQk2cDGuiCoEu7y zGYh>XYtO<&aze(94Hs2HUbgSBr1TnNyHqc5`>+rYv>-q~AoMlnK8S#)?_lYy!1)>h zK^6pZzz6L?wN01;s&s1*1N{|@0>RIJ76)T2bTJ(g#m&-0U+CjDtyeb`a1~ZyKIc5) z=h~Iqnl5s`gMqecC_inJOHEFOkoHWMX2D$=w^N|Bw+X~C1Q}7axHi)r+@w*_#IG3F z-|^SK;jsU^|7nDd6X9RR6{)~rGc_B&Mkxh4dcS3^z}a6kkrL;$KRjdJ&sjw)de^cJmt$U?%b_RnL$^{Xj) z^SkrEQiDGOzSE%ZzuNzAMqnA2=9#J*#hI|?7%ODI{VSsgWTijvlNqHwic|3SWa*LIXEWpi&Z?;{jL4iL#BIxR_d)?`tTO z*VNP`4#Y_7$B#J5C7BG;N#NO$5Jqelf0?HY>}`=kF-$Q8a*yv}s*1I!)y1m5BDP)m znZM1h4bO#(@m(KMmNilDdQM_uXAk^%`if-G8$yGnf-02gVPGAoK992zgbCe55_PH% zipa73)vRx?wwHgUQd~j~1ee}T@NE)*)kW!S@#T>sqds6;e$E}>SZTU!hM_^*7&H&% z=`c>0uB3_CSB)-@BRD=+r5P;exR*BGxPwf9zW8#a|MX^W4&xHkQxoN}NL`4^GDMYS zFAlXDJ5<&OZ*OZ|bKT6%2sloWP_0y7p^-8$56V-Lxu*j75h`+EoZJu1?>lAECw*6M z8M^xn=F+__PdtBy;2^U4@k~7n&p{~+W2gx7^R=Ebh{t(Cuv$L$PmZK9**C`Igd6fb19P>np}n~KtH9+OCD{?5)g`~E9e++lXkI@@nvsoBO^*;-Pe#f9e zpW_1PQq*Cug4G40t+*ht-s4n{{M^A{{1s$r8VrDrWBiP8(axt!Z;Ak?&;m}h&#Src zuj8+13{P9<$dj9@9_6noaj867+M$OU9X=iwlkAt%scB3bhEz={d`ytk9(vzdQosPa zgTL^STIb3c&`H%%&SM0@m7M_PZl6c*6v02k48Xwu>Gw)Y=*u1dEDK5nMuvK$ z89^*R4)l9ot$-m|urK6?KrBWB3r6?X`oWNea2LL-CF;)2f_+pHpfFq|*UYSL@zB52 zKs3Li&$ra=ZNFsq8O6QVXI+bY?Ay^VAK%m|78=TC^kzKZJouk>?9&Pw3a42T-9h`Y z?ZsqQG2!&)!8y|T@uCy~5$~8jOp&&^9%-7GLQTsuN*vj$Gd{}3 zR|gNDw@z>@9UpHuOmnTMh+{kUx!zyPEw=3Jo%K*njO)WLqHg~;dbm-Em<`;MG#MP( z7cjk&h8kcC@E!Ba;t*kzG&~68tgRV#sQb28oXrqxEXI5~0W3$p_X)Gh%__K-*&Gq54>esbmT+|-=qJ}r`E;v$+05yOI$8ZW|JEjk z^D=gf2V7u=fD94?W|6bu3z<4NjcgnpoD?|x5~heOC_Q&1l|8M&nlvtX*VKqA^j^|CelBWNnzE<`|A}BsK2Cmb{)o~k z>(GM8zc(skJzHR;QQo}!xlmnQ4&w8p->aPdut@*xEL#Z1{XHA0D%A&SY7g?J0klYl?MP_RLOfhrpS-1Uf#4DG; z+{G77ER^q7bVul1i_!5#!*Ci#Q6T(4;Sa|1Tv=eDqlWhr@R-U0Jf;h}s)Mz{ z#^Pyz>4Z*m$%`8NLe*(X`ZMQ=p5rsV?Qtq72^7UF>5XhlH7X;yH)n5tyFKD*A+hh| zT9%zmkyjhN9M?#8;NswD6U|AcPc$tMBnv+Jf5NKTKx)kT#-dt~$oIw#gUS{enPiXl zaP;GI%^zzvAF31d)O9u5ME5m9A5eM!tWJD}1!L|vNl0!iFDQ@Y41vdsFgA?lB5^b_ zmd%+>FkIAY8d@kKrNFJ7qMg9HK-y3Fwz(78k=-Z~(O26yrJqa%#$^U*Zk%RpJNMy6 z5m;Qn`ijN?)OiFS0{mso0texg_HkP=3VfFKE)L{G@q!VtZLQTh9XgzIPSOpQm4!E^ zx{BwV9yq5FXXmRFbjGu0l;+geMhkQ*weZ=?UE{Gyi(CwF73>iY3y-jvzK$j@D)_)( znhY@RI<#dGVS=?TFWg{~hA?7Q zjB6q6E8& zSp!IxMAepvx?-dFMytPyh=tU=hDumOl8zhdh;6aSR2$!YTB-Wp$D)LVU<(pH&nPd0 zLa2>=*P)%YREENRIYZtUJ59)Z8*pF$RGuS^Zi>2@&Be^&VWP-!{Hlp$vCxHQV`IQt=2x>=usi z^SWtRr>LN&IOp<{Df1U%XX*Lhb|xcRJ`JAfW4+yJf46@vQyjzXk2>h$0w$_HG%2GA zI=urWWy`9I=Su{^*&Ib=GEX*4fme;DGbzSdipRvA8kBDoI?aU%JoB8bH zr$gi;Zh=~D`!cbeH|m+;(QcZY?IhCOP894l^d^BKw`T5~`Nhdqnv1U~S%LzCb*)~? zrfeQB!p{WAqqXL$b1;>LXzE1NhBg;lQ$p|h_&P;-;d+JY+fIQOr5`V{tB?=N7nQtz zYw>sv(}?mgLur>HzACZ|in}N>s4dP)AL*9Mp4+dqlcdUNXv+IEXT01Hxur>Qv;680 z)OHEuW8fGlR4d6ztbeJ|yc*UOegq0uHu$wuRZS%@__%gjxCYO)0O}X$|KjGRY z86rNAI{>c%y@avSL6=3AK?7NywBrPld*}6rx2wLo&S~A8EP1obwdfF4qN*A!lq11= zA*1L+$g&wy@L4s4e0JbI5=Lu`Iq2kK#K0t{MomR3EXWo{@8X6W;ume{+wYL^$LQ9H zn5B{zpTRp2>;NNcOGZqFccIBwnUt(4S?BHpzlZsG7EZP9{OO%;gXTRPkBzf~rrjUD zjEdH76I>2ndZ8|SM?4F!=xhjA`krYocM2CA3pEy5hwg{r-chz z>DbUYM}w*j7inS^9RQ(MHa#cr+aK(pf?>mq6oe5KN{N=D9V+w}5}O~!z90vVJ`Jyl zm)@%jZ%w*DM(-yfP5Qn!j`WHGDWP`>cQ~9@k~<`jDXY`+TtnJ`MyRpql&1gHvM){o zsb`-to!9;H(1~9KPygjxL_z=sYP`<~y)~Dz6iet+dk5XWpU-B72^Q9TQOYwouXW>M z^`zCgdj{XE{axFp3`J`N-52+^__{5HF!vA}Q!7a&8>V4F-(#sTT%=V;S7C1AHp5 zG+D)Cmyc0v<0i62pQ|qBrZIPJq1R#m$}DaAz(?#rxuR-!+@Ps0ba;Ekcg&c@(1g|I z`g>!yb7?`4Yr-^LisHioj7|<-C9}s76n#e)5+u8`2QxFb-nPHEtv<{5=F0-*mpDAB z9|uq*f(;sYWx~xR;9QAgLsobdjB(8`sfmki6nWoUUVHzUZ{f!+&Pn@NPZ*tuE`Cwz zxASft`stkzMl2M$aSUm*6n~;x{^Z^Gh$7|~-m+gdWpC0Xj+y{KZjC4l(ql!TIE z$Eczp#!3@KUAp8{?f3=Nl1Me%wVmzC{;B+2rR)r6dGTcz#~i}*f){)W-e9VcLM=R> zF=wUfth!{Tu1#$1p<+`-ZUjTFD9*l^c}FB#w-85qM=d$94??-u%)KDcxKNx$HSkJB zth#J+U7DFKAo-1L@~(SS`Y4$V)ZwYfvwC%aAc-fS!xc5Xxcv-u{h((J>3U~%F+Xg3 z0Tq*Fo1F0I&CabST8TGJYPM#Eq{c>T>j~KPr5^^5;vHCZSVaY%!L&L_4LtRNZ#kX~ zokM;(k{pZ`yW3AebBXzEa51CB{bn*BRv9YRI2o5v9MMz(D0gNN)gYKpZaiz(03 z9Av7g5Y*ol{?=%QIa`cTLmsrH`}Y&vv7*sg=?ColULzE^S_-U2JPTALKf0Bzd z&Cn^}$^GWr@72ZdT)qT7qDQteuPI)vX-!veI5L08Ji+GL;p>(z#`7Q{|0D6kw|sm& zOFZ%Yt`i*y)MewL?*i!jQ2DEclL1N%nADON)iR;(Lu(Jxq82k*65G%bS|>e^O-+&-MkhrO#|Wry6# z3?nfn=#&WR33(@)f9MVB=<1+*Nq61f(Pk=1fqcbM zJaSliWGT*uG3pBB#nd0?woG&-OM6q;9j70sMLzbfdUtl_oN5hKbMB+I2g=mN`K)%-S4Ja<`-F!pQg}AMV42Ni*NlTqBWG!<3J7v~g`pirGWo z8!hgOWeyJ1Os$w{$R9cJDB8Lh)Pxy`)N{x#8$Za2foiwPwMh6do7bh>$pqQau%GXq zwi;nSBAGrMt_kmo@|t-o1bZB!p& z!jEY7B=+a6=ra$iL#!vc3)>FzDWMoTDbMJ)a=M(34Tmw34-12Eg(Rh&49iZe#eb5 znuqU}{SgIi{Y7*^5(4OSS; zTNqg+qd5F_|ss;D-&-~_16bX*;U>;li zM;Y92V~Z@ox_VSMaWAkuCiH#MTQ-Cmh6Le)Dq=l>=B^YX7Gb+wI z>g9iQRBCj)p*FmWe#ZRPa7Fji1X=gVc8zd_?7Oh z2-h15Te-B*Ve{)tr(7a@s)dzV+4*8gWxVScdzQyG*7QicS+AIeN_5< zX#WcLZiEC|X9k}K(O>c}p4=XDp$?1;FTK9Y8{z0V2|!2!h9l|$VRL76g{5Y!a$+kFfS@Zh;?11zfD3FzQo>ujXWNT&4s`9oIfNn1&dKn>45yxQDD2p z2i#CTHY*lOuo``nCM@k!J#jUhITTi%?sX$fx8BTGj8EU*&LF;`mofDP;3LxcK+Aa0 zO`ZyATc^clMYBfcj&td=9=y&E+mW7eQ(S2V-=8DV0s$&7#PJU+RQ3_UKOB74tb;r0 zHUEHcV@M+4?KZDKGzC<@@9MzRXb}W?X%O=DD#)V+b2!$(-(jCP9ek(a$m(df6{)sQ z{nvr!XT@c>(+Extoa680d5?@on$*0r*QQ**x4iZjD|C?PfL`crSK8@>Fe33AL`6k)?RuQOEt>|q>O&Ms)> z_I4|bd^MPLeE!>+q#<}aGjit;Sj)nYfN&a3 zjev%%NO_`y(!EP&vlPv$y}I`qvSLBwz3oVNn5{2rlr(11;Rb5s)-MheW2MqW zRTnHQb5czGML+hMJWar#m&4!mme#h2^~llzT@>+-`8~W52#Okl17XbTSOrkroFF;` zES&(0js&0f;G9@;6d24F=VBs}+ZlzAQeAM94saWWmg_|qx|)qSAKOF`eF9WC8LX>X zE*xjAw2UFFOfatJR!X9|vfU?4eG|SL2OHWnO5p3;N_$lKk?+m__MBe^5_DyjeTli9 zn|dwIY04-@o-uC?g@3lXYOItu;-ZG2b8(P_hCFpn8kkE6`GM-eMaVpDMfpNcfF9vD zzjXC4Xv+BguE<7oept)-0524|kEZ!s^o6g|he_M{_j6`)N12@Xg(x*e;>-2pr*0L| zC+o)twI^DZf;A6TVXa>Y^56>`aIBV9`N)=M<9(uxD#JJ-eieK|B^|vW;1z}EXshLjYDwg)p(1M(HFW!Z{LgIdqT|R%g@%nkh%7t5oOg9_NK<>d4}(by*{VwM-87cR!iL&G9N#ur^M7-nRRpP)|CDMd4>vnUfDC}^@iHH*uF z=b3FBc73efT69(VAeIGp^>r&I&iZnkJgm;wXr3f8B0)DSw$!i_4grC(2unGP*&82k zOGWyT9eF;kxP~i3UhMDL??8o;GIASZ&aYIM(4M)_vBEOHIy0>g&^R_6=i%5)Fxn%X z378jD((A{2@3WbV1a`L6b2FK$X@9J*zp51Gxlm^lbgdXdt$4*TQx+pi)7e)qx(B1K zGV;D-F4dDxXVxZJxC)!9C$gCz!ITf^EoL$vzii?0p}0MSb$z|o!kyBEc0LT59e-Bj zlds?Wcx$`VwX{Pq`$1x#XgkG~K$wt`jl_j0H@EqYd7^;(NW`R23zrF$%^)R)sJdO? zwY{a>CH@qhOQhWrL%}in7q8!b88oe`B-@H{Y7Jd#pI|%k30B`+Xw?$k${BGR#>_TZ z-w1K}cpaIMEjMO(x&GR{I5H}$sW7C?x~XdzR^Z|F)!W0LYVltzlZzM)%)4*uxj5!c z*;y|?tO#*ox*Y%}GAdR(2S_nrF^la39mCGaezPsr4ms;BqNCMkB{F0e+ZS1P%0%ZZ zkaik(Zs!m*C`4p?Jd|zS>;d7)wN_faPMJd@aI!}RP|<`1%nFdGmm*Z=a^1JyzS(}t z{vMBiJ1#rffZvv2s0u#y1ew{0?AJ4|LbJ1TN|h!rUadMRaDxxC+m&m2KeKy&YerR< zX@D_fbVA~m_)_rI(maQklDERU#f2W2 z`P*cnNjAICdbd$EA#Afw-gD<_BK1S3P2r5|Obs#Nwe-U8vfN^3YI5m{3KHbPQ~WO^ zfI0a?)CdS~GP%I^et*22M_ zAuDQRST$}_Q(K*&v$-X+6)mR>rC^xjd3rOPdIqUMPRszfthH9d+oR2tC!``_EN#ab z*=jJU(P7gWRa3c8VK-EEP&bCeE|a?B{6IESjP^w515AorhF1WXb7W`F zs(Dn%Pzge+R+(i*6cQEq#i3US*}lQU=PMl|su6TP@J>I5DZX_@j$u#r5*Ng3@)a?$ zO}8&e397~~O0+mi7~feVs%rw8#%eLlXmK@k37kPh1bfM2DZTW;A?{m83R`ohpNruH zlXS9k;RDe~$%2|QM=|#5AZRXW*9RSIa6F;Aj-`v=5DX?= z@^Mn5u?+CWry-Pf?gfqIjZqUCNSbF>6anmu>@=ZV9L-fDZrMq%YTi~>JvU9Ay6lOj z2+Gvce!pUWeMObh_(=kECHeSS3y+hZeFCcg&4z47PiQV`JD-s2{Meq{d6s!g^PJwA zds?F^Fjmt!tsq7`V`3Z2c%;>2N4FvkW(cf`xj5`8PGxF&O=@;OwPX&lpIh>fclKVE zX1pcA6+0!dr?2;US3PWRlfaj_&!7A#5uUc7Y76zsf;}H0B#OG$7epr9hIm;mKFPjF6B0mH5&%JY z^@MssXrK*>VRqid(1Ti9^U41FHcG1H_P6R~2_mFMt5%|^(^T(dWd%2%ASMI!8~wr8 zhAc6%FeWRX0%y_1EjxQX_ubyJS`spv#>*!5d|P-gl2|9}pMeF@N6G;XSqrd^La8He z8qLM62mS?{Rr55{GvWNLsTCiKmW{gsH{qjvEnz*t6J%%EFAR) zq%ydO?@58}aSy@!!r68(n2R6F3scP&X#o_}B}^{A8zzPbLy9DCEmrvb}HC zBYA1lvG)u2yzQ6Zl8}c9KCrG^U*uevXQETI&KCIct(wA@TKB0s6S07TMG4giR?TFK z@@>2q_Vt`*3l5#l@Gb=U`btL!Q>J{)xaah=6}4LNjAp(T)XB-$<&IglOUXId7JHcC znT%Xs!ncGIG~%0&(HG{;K&Pub{YDF#YVUA$+Oz+1S3^w*6n}m$choIK`hHF#;oV-e zY(XOV^bF3-f zn$Ni2)dlH7B#h?6vQODkyStB@cr{cud?+a9d^WqQrAdO+@nDCG(?xBQXV$(e$61OO zOrU{0Xrrf|u+VvmfSWk3tQ#Yx^5T;HM5C2DZeXoI(~C)-k7Nv)H^aH9gxy?og7LQI zDHKI5FgsRgnJ_N|wI(u_-pA8a;rok5&G$FWv*phTDB)dErOXAtEoRxoxMF@k2IJ*b z3{89B6R+|q66WjmaKOnu-q$e9by`IziXW%vz9{e@KNzn}9I5uS=|0N8BYthjhq=SB za(?5X{r-kFnrge$J0PLltZ%yU+16S6R9b`QjTi5d2RG`PeaJsf47{*D^>M;4V>{B{ zbIKuHGF3{rm45wl>HF4Tl~Bi$DH`7X+dpm&wv*lc*!8)98gGv{_DCNqJiaG>2FjEv zUMp)K0wbFJ-Z}oQy_oPz30*fQUMl}x12&$vbUY1y@>&#hDK8e?a?suS`O;;lOwKWz z;iabgyb68na{ghWl;N@TeA+_{ceQQ!m#%O*pvCu{(K_MZDL9R8G!8Ab4DD&*vkOD( zbB)EsJ!jP|eLY;G7D;s9UL<;+F&(Izd9WZ8)MSNpwOW*JUAp3_si4`_@Wrp{p)BKQ z?IqLh8tL(N=>+vRB0p;R-`=M7=ynGb{QlvEaQwf&82)b-6#n_H2~w;@5_7Y!I~Hm3 zo#I8cZDXX)Ls#jI%|Z_m4H0*#iIgv7to+fz%ro8v-Rg|96VX12`<`KzXn}e}*P`%- z0vPjm8p`eg^8HXCcxs%0e(Fa%t3~o;s-11`<=4QA=W#SP_T*&pz||r)u45pKXvsDX>bT`&n%?R=F3`yc>N&~(vKqiR~+G@uy&-i3%jAhq0GZ(|e!y?oO zo>!XxhyzpgM0ySDS`*U@fyyCl40lk5%dwycG))mD%dVL!t49Uu;_bDmy%V zhvdHv4*T1SK_Agft@#+eYopbrhF?sb(p;-*Q@kll+RxkE+5U2#A9?Kxka>kM-@K_quR-^Q zw#9Hmci-0G> zG(G?EhU)`;f(dyqw0CDSc?_SjZQLh)wK66hs zLQ?f%{ByvfJo(@XV0%Okr2TD)fdyz_QB1sE`)OGz#`^@&t{FQm)%~UX^UvzBKkLm1 z{snSm`7g>p|Nb3Ur(b&X4xJLUmjyUVKxIcqI;<0wVd+qfvRtQgN!HKk&d29Gbnv{* zsL`hjdKYX?Tnb>JZ?&gpN_`c*28<_dPSe*qS^5I{149vly%?XoB3R*S5jTAk0dpsU zvLjo|Uonf$*^uptmdg&uQ!|UM`{kB0s@LZXXM5k3ZGMa5<{pLHrstuVQXl1EK9sub z9ow$&Z+KfDc$X(P#;n|6hC$N0_9b0D8M|C)bbk}1u&ed|u=k!}O>XPDa1a!wgLI@w zm5x-U1woofm)?{PAtKUiPz0nm0Yw3k-lcbtjvyjkItd~IA_-Cggm?yX&AHavbDr<) zbDh2R{`OaXcu6kEn>X(m<0<$3Joj^dko@7adh&zj)XCb02c5O6sHj{smTT|2wW&I< zA2xoZxV}vtWR9*3Z#9G=w{#7XSq%_eU*bRd+eVHg_7Z1{Uyf%NSGCu@n(jd)qEX2+ z)?uGEF51c$Ag}4-4voy5x%_sFjDC7GW5_GrgXy#ORSRjm%Um?A4?~&h7Ut)098+Gp z7j3MavxLRg14?Jx^XW_N_K#=Ch#{&Eb|BFMXA&{*<8N)*fySm=f1D%mzkC?#<}h)8 zb|g)oPqTCLF@N6!rD~4pk)`tpSIVc%4P70Pvc55O8tMJBX@$tr8L9K-m*b@37z{Jy z2a4YcRHr4M2KDyFEJ3}MIYEds3n8DNWXtX?A_)7x8g~5d$6R*d7{`-j~KKY*U!tUg~L$`h)!Q~lhD0V zZMAKnp(Xp+(sF}y?1IRdw));3bHAJ<)Zvnx68y;{Q`xmG1S%n7Q5*xB>!{_gvWJ(K zdBGtr0ADbzgZYb!R*KsG%sNaW(noRAFbuwKNGx)Dj1Ob@?u|4nuh`ABu%6!ACV8o# z)~}6T{ho#z*NomX+*DO!U zD?T-(x}|6o%ekN5c{^C4LJ$qG~x}4)j+I!S0`)F$SL=BHRU+Y^tzQrsZ zRgQey;%(@Ml0ism*h;6#NpHa`Qdb+}SBxy8?0Jw>{4wFM2HK62jPCmj-3>E7+OVjM$;1n6cD`tzv^LbGhnDg&QAk z#16zsXjbWb64QZUH}^HiTD^;5z7MfWy#c1v!fy8_xvG*!PQ)$56o=(n(>@?mLv5pn zE>i!zd^QZ?;IdR#H}02ASnkKBP1$LPjWI`mu;vLiziztV(uJ9so#zm|m>-LU&lnrz z`ygjU!x8NA?ij9>!f>wzldc*+nXcyk$L_7_E-775z0`&Ml5j3phj@zb zOX1KOwwLJ2K3Vst{jouYC|J3ma(cHpyuCr8rrV{k_N@k|KJEDrgYZwpK8_rTy>DBT zB)r{cA8zYnLf%wi#m#VM^Re;MUA~J~WTW$<5b{aNB<5~BNVCFd-o)laEvhowj07ZN z>w2jlAa$&nY9{1~MI(1bFJ`I<^BKX7<)U~phCgdmD^cHe9gH8Cd!E=@^2N#Lc;9Wz zy*1dQQs(L2=UrI2>r#)eB5fSNt!f-6UQ-{M5Iw)$O-^P&Yo`2y+O$&78+RSeqJ(Zl z>&}Qu@k5c+8ApNavZf=?r5Bl@mwb_KUQMY%`sooG3> zx_<0=6hYz#oK1)82u;w{Px4YIQFiP@n}`5Tm~nZLrSO<*;-aaxLoi=r1*fsul*VPU z=%%xYg4I=0W9y)Q!YKmI{Bd@zmyLH~m*twboLum^_3!wBw2a5H3-9LOX_|x#E|SH2zaVtj?7jwb_*VJEMcB_d4fz&75srv+@z-d?U*2DJy!%ES zo}XF&2xt;TL1D++q#NzzU?wszHZ$SY$fFk zr^vC|xiE238o~*)KHHA9b1A^4Sfkq_nkq*eimk9tY6->@%yi=wg{sl#>KpkEs?;0H zZYxJ{hhCs|wW*4K6pUoCIbRqjZ$F!$TJ1nGrXYrWJrX10Q& zX(eUf_059ad!ZD&+w|D%&gQKz2ObMpV?12Bm&5x4ce1&zYnR@g?SrkBY2J%E2=>yW)fA4E{~|GoSFajNwpB` z+cR|(E1r34Ys)t7hY^v-FiYV!)#@Fw?xbXe+*?>mWE(qjDK?1mQo}H^^5RJGu!r+l zNsLju$bHTRfzs82b#4b@4}-N=cPZF4P5>#X5YlZjArD8j`D_eJHl~V_$9m9?SDu~B zcoH!smc#PF;NpFxMA{JC0aEOAYzUi8{b0>fi}evv*0F4M8l|e;Ah9IMfyN9+YIk0bFm2V zfufQd1{bV7lXZPj>@zW%lTuqMHe*x!S8ZAKY7nW`qhsdC7&FKbN5Pa{Csio5#jqSJu`AziB z^Q5-WcabWUiW_Q{Md-@L~RY-vmxdY_h9!gIu*C**{>KIl;w#(eke|v z&G+3YuXW670ejqg(ER4J>E}6D3OCL`(JD*-eS>#Z=A2wNhXqz#Ow95q$}~6#UtrR= zU==~~*d(NtM6V)y_RUE^XdmcSp&v9wt4`EC3U(ncj=&@#yhew(-0!~ThYyT>Y{{rjv2HC6}Z-!ZWSm|>S^HnoWqiEAF^-p?NOWqJ!@ghIX%e6^_& z?HRe~_Ywu|fs$<=bv*S}fNF&4G>^^onG4ApR}6{0y8ZRqy>RY^1!jS3O1>B9YQm*< zz5N@y5s%ZdDOd zK>P3X{U0>`Z&bdJO?71FXHJbbSU7TEMU~`f+Ic!9MhN8EnLN;KYJ|P<3vzRmQJ0D@BLl zqedb-&=;Klo8H7Zr$`*M0Bd-&YIHY{QF2(IkFKGvrcNoTn>kjXBpyN-DyZ{&L)B@b zTSm#`#}1AKwWMJlkQb?$maO6Y02j@nQE$^RdK(xvAl32znn&}m-z257Cf_H~r7Dv` zb;8hhvgBDg4ziXOmc90mkg1H~vacQtDsk*{wn_g@1p5y$>=m3ZI=wUdV&2=7-Vmno zAm#ltMs;xI1$(gz((WX>N{i-BtUd|1z9Rm=x`+S$g?fjDF57f3MIJ3r$;&Knv8zrR z^M+cR&6eEavdw=c_T`MxL%C&>;D4C{6Rjc3C;h@?`~&*r7e z<|V5;!SbA^oAZ7$PJ1TNm5Dc!oaas?Zu87Wk1bMz0wpL=^^sw_3_1dtLH*yUeoB5T z7bqY?0HL7!@+v@DN)pkn<)@AZK`YQ6n)i$R1p#s-lSRLY2Be`AP6+TE2xKFM@)v{y z;9=y#NcJN_oD#6-9w>JFjZ;vrwGhv2(FNTlJ{2^ISm}Y{!qjn+4Nnl?C{M+Q$oron z4$qyoh3{lzUEI%u>OOX$XT*{KnmTvL+5hjjQ4CP&Vafg%eZCa>V2c3BPJ?}U8;!f_ zm##Z0C7t9pZgEPx^~~C0R>yXt+j{IdLC@b4! zCQ9;qsXwtBW%u~dOZU9%(JSMf)~g?j#x2a>v#rpMN`LAJUEwft|Lb%7T`%e{s}B9^ zg8!}<_19$wtM{iB|3fqC&&!_smlgkAHR{jHp78(lJM9lr#1?|JiRw1OcL!@bj;&e1 zhTsb);X6RMP$tU#NNo3#NZRFUi==&!jt4gkhlOlQQpah<{hS)tp?<5q1IU;m# z?xnB2ZF#ln_ZOGW8HneF#gjtAv%hYS&7lY=O6>R{ z3Iy^?Pc?in40x@MF41Ga|E~)G#eASg5&1~zLiF`Y;Z|&Jg^i81NL0RQ3=1qu_{qV6 zNTq~YFYDtHsuCvCGp1>T*9e&+@${5{Jo(0+0}6s~FX2@0ZgjH6KA`(j>E5%K<@~kv z<)8n69sM>4w6`jq0E>PG)bXMC1D)wkGM{c?bEo`sGj`muri!4t^mPdE1dk*T7+OSE zCno!Za2JZZ0E&yB=FTZWYpPf~XahzhJ*`36GAVH%VwVyWdZKyV@tg^NetV!Z@xBjN z11iDnD`1{Fkaw{o_(w(#5(`xK1Cr`|T<% zt(GTs$EtBdRnh8lh7YUBO0SY8-XMxU&z+$AF(kFjzMvvrU_wC8Rf_89%A2yDo$P4D zj!8Qy;P;_1Q=F#=s<>zeygHE?BADgGu#%sOog5sZG`rJbOn^-0V3TLdc!* z(;wj~8YXsYbO_`5u1cejECFS~@$QJq9u=yf9KExIbxnBOv1=-S7Sy>)rT>Dk0(+Xf zTzNuINq|6VJAf)#XTpi1N?~A-i?D*AHaYB_4c5>MftiIOOB6|fc;H<$15h2e!TE$L z-1;xMV5Rb?_kv7MvsHV3o-2KGMdpJRWNUS%V}#Lqsy8?F4|0ezh`T@@M+#z%s?$#D zw*_Fk6buEN>OcV2a>o?(YkxKb8)n?04G2m9ag2cRt6sw;E>R+;f%244)^EckfHHJ4 z3n;kW1leGI%BdInEmm}dEpm>v3KnP}8S$ zLX$f|bRgC2To%Rgmvvyb*wK^hXA$4t`hj~Rfr;Yfl-TlB{oQd=>YX-6LTqOMpPMoC z3le`C-{_-F@G-=p9q0m3)wfbhuyNhe$!c91^XE(q6W{He=PD|8h5*%#7tk*$l_!yP+b431zez!V$llSM z#&^#i;4`p4c2P07)^UQ|Zj4 z_Vpu!F_{JEEeP5M?AQMAgOhM?_7kCudO`MX(|84g&u@P&Pl~n<0MANlIk=hhqb!f@ z36+|vn@Mu;h*AaN3VT9bLS(aXr0<(`(uxu9lKk3zGk2--DW-zwUBrvexP|L<$v#_4 zBrQ!aNYhVOTPXC_8n)GDc?k=gyQ*22@-XGV0w(^&GveN-4CjcK#yb7+SWpM_$(!{Y z-1-N<;~1)`WvSFHmVemfemL7NwB%SPkVp7a$HK!SH0k=T)Et(uw>cp? z;pw=+_?vOwaVZzC-3b2Hh0!vPW;todeAn$nV%F<@I;%^OUgauV9j@a-KSJi1d%f2z zN>6eaNI;PwZU?r>U4557!^n~!c+!%3T-9uL>uvHuxP_%`3$ zcEUb$j_*x0KQkkq_c%l5Bw}~uL@1(d)sJx9nzFK}vFQ_BFU&)v$`;d))8T52SLX;6 zkYHC5TqItgn^Y-Y{)(U_2J=Fu{0g7p0C1DshPJ?W%SPfeDmH(|b!#XA5I?$qjYKEB8dHof}MwJ-&)tt*057-PZ4_yx1>X z7Dv76Gt8OXe$U|@#{C_83>y#DqZAvWOMT1jwV06v}-DShEvPv>8HZ|k>;duDpn}HJuW#zkD}xP?+@U!)ObZDC?$ndhFM^+vu~qM{Vgb z-j(FKtV1rdlSAsH18xQNz7})UC~sx=$`T14Rr$REkrs%eX1lQ*U(p_1KdrMwcIM zYvGrTiTs@}zoM7Z6v?|nW)y52%K688_2ymn8y;v9O<}ajqV-pj#Rdn}ND|e~b3GPV zIE{*G?831w9xlv6(i!egaW@Lgm7i=y%r2R>K0ZIqjWoD6s`TwlsMt9{VEsBAI}WL& zMu4uT5zzJAJlZn{D#Zv#-9zq8U=X)JM5J?Ys#y)K;lm|+gE(s+bQ2Z^4*uTX`A>y7 zzjcQG2VTqn*|CFYI0i(+OGt(jVEcAce-DNUUrpiJu}(b==T?l+_!q@GN^J0jyoXl_ zU-LYY_t`B&qMhDl@A5e#up;2?Q57GeReIHhtiMef71KHJ#>?Porvd!*P3JU+umUjz zXs_e1Bmg*3=4n>bEQ;rCkq;bA2)J(dIb^Z&C$x114E*q;#6Q^rIn3En1V$1}3qiA= zT$p9YlLA%?1*_?AV?4-BQwW=z#5*0la{BZ2fI7A{fTnWffP3lmvcYNl_#Nc3?LO>( z8yL{>0sj-ERN+#~(4&jBKyghKBq|^@B!Rs{{L_~hVcWG^j5pnK7Ui6%$3=NV& z#)K)Ryk2j%|3W8jZmxcjDj>Fq_!8w{GhxzLdO4p>K)$^L|1Iiqo8ZP7XTDIJ5UE;ya&jJ-fuwv@#EimtpD-)U;e9EEPPo(x0@k^ zW2#l9#C!wKJN?5p>j#X~xW4`|kj{PcnAXW>HpeVRf@Xv}F5IA~ZQ-|RcmJuWhzTKv z_+3VO4KQ)NY>X3Il1ntsRr9lNHjIZA$`{HzqYA>iWW|%G(BjQ0wS@xjlC1VlC8jXo z@NQ&-&}EPPA3y$W48%WP|I2^Hp&M6C6DJc2@3!~v5}fg&xdwlk;zKFtD-v*@RUL3= zmkPUeiA1|ch-beo!Gmvo(f08759T~SJUrR6qw68^NkeaJ@H+#aGl%_()a2-I(wu*? zJ*PR|ObT8TXzqOuv<9(P+v>kn<~-G1`;Y!(`!if(@(c&gX7V;V29E;rh_?XMcna1N ztQz9(O-q%Y7(mZqiS;mQFAJ@Q3$;o#i!8vfJP*=Z&%Dd?El03rv_$~d!{4Vv z0QvC0xLBdHO|8ijDM5cWJl4f6-FND%aq^0GEH< zb}pVC+B0&*ErCF)JE_MaoP~+IvaX?_ktphodOYuom0+Ez4GHq6i*LIum=+zHk}m5} zJOq#G0$nb;1=ZR?U0F9LSV?Jx4fv@grtxWB`%;g(m?!i5w@I`}J&AWq;>5A9wrL4K zJMaJ1hyF(GI;8iWxZj@KeW}Zc1sca)_08q8pUpR52za1d0|jOJrFM%*LId_}p46 zjd)jfQz0)O4zJoJp5^)l$pT(6m_%`u9k*+9{LN$pxWS56u-%kDCII^T>Tmw78C`fn zDhtI!faC3%vR?QF;h#QvL3wzHMRY2L9^XX#97SyBsQ=?~hk)atn+}J5GXuU0eGWL$ z1gwcQEpU2H>VQFK?7(IywBT)epf#6&L7-4dEZ-+~=LCF!=*jMtKQ3njtbNzN`Q%eS zxY?DxEA$PB5fJ|7s~ZqQ*vNf*=-L{Hy@`$px2S(XM!>mj5C7wOf@|Q9|Mb~Vq?Ml` zK0K2h(EYZjjqLl|hbqV7>_2~^fn+DJli5uD+YI<4TJ*zfQa`tGf|VqQPc^w99w0k) z*9cKKc3gchfM}Mz6>SaoXRyo?>39nRA?9Ks90E9nvWE8 zQ)V(TCf%_fRBm7*(cmV71WeC@SSJfS-X-OPIuMm8fUnvf?q7%#AL&FKaVrvnm2e#T zeSCHe+6Es*31TMni&K>94#7IYH4+~P))j1{NlM&)0{(~&%sCr^o&|ALA{K;67ha0u zrPTj&H9g||lM?pBE%wt@zXSa_3k4f61JNyn18e(6Wf_X&b?d-Rvj62$N^I%-MToUp z#KEWBEr73%fQpOl9BUU)h4JUR`^(iJ0XF~mwvRk7QV|ttej}uSfjMqzA*3K}yNqVX zUzE1Nxjs2b523iJ8H0pAhUW^q-C54M0a!(~=%4H}D|#6R$eOt! zh2Ft+^RIQeH-zU4%x=>M z@}>K*r=94r4F(Qv_NX9bZl!n6_{>_}E_q$M`@o+G%mI|eP`*Q)$0iuJO)N;Y@$Mg~ zX8ZsXG86R2&h|u0M<&5VI4S1*=HI0}N`4?Hhi57Tv@GK(EgQ)WXxU7_pqK@8M0%Y( z3>JrN-t*vn>AgQ>U=|v0!;e}BRdP=@VRTD)d2N8me;X&E3kck+rvxqq6cD(w30TMM zQ|{IdaJQEbQ~=QRhCnq>Uf@}rjm2E{*_bVYMJ@}(Nx6%V=awapT&28_m>V8gJQ!hc zF@(X{h@>iHc#$9)C6`iQqwCk;xL5H*>g7yfnsOQ@i96w{lXAV!pq=fVr;(Rl^@BY$ z4rpew(_Fy0=l!4el2$ah{qIqWC)x^f z$1nHS{%}jyAvebcqPx3+6RK?(mzSeEsf)RL$dHjgxlw#JN^ou9Sc~4a_kroFdL8nI z;7AQC0t5E?bj;c{JTQ%4HlWCiATFRCd^-Hj1E&I(a^epPRT8U?4`T`Q0&YVy041QFf33nMeVCa$o{2M%p6k^F+<-)Z^Tzo#hhvGrrK3G=; z^vJvI3s@7A+R4k>PDT?#Gg(szDr}_QqSzhJZ+HF{&UP=x{VVKD3*u=zV5Q0&6JA(4?E3Y2iI!|BBY2xg?4@)ER1!KsJ&j8tzyDTmj2LOIvUC3CoY&wAn>Rr$e|#}1w+{#?0yRtZ0xc}E`plKgXassZw<7{ z@ocAvNy6O0t7tY2VqI!6vVR{epzu0G`(97zjz(E-)Y# zc@UERzF`)qS6Nb1pr1s$)tsZ*y{7zwUai{FNQzV9=(QA&$f1R`Gcv*RWYOpmVjL}| z{?YOri(tYTOR;p_v2sz#A*~p#YwgoShKCJ?+$Jse@h-GAOq{-6kfL5zc^@caS&l%5KAbC}?K zi-17QtwA%bS59e6GFKKEl1myRR1hTo_)8vrIGtjL^%OD>&ATl~K5 ze$(U!Mp*>$o*aE};tkM^vG=F2Fv4Nv@NzpS|Ep-|I5JcCJ;CtTecJJ59S_vYU!ZJG zL_U>HmJWj%ixxe6eIZN$_bM-wQZQ7nYa72WIjpu?Zv1s@eQQxy(uih;@YExN^1wrn?kEU6kixvY9|L=Uhf5Ow+j(7uX`?gtKU7L(g z{XxBpaE;57qXy3e+#LEgw|+lywmVHmDRcuFP^3T^EziD^O^-Ev*Wb^^R*$I*Pwg?0 z9f=e+Sm0~1a}sd$O4EmnywAMUSH|!#!m5{+tpR^=DY|3#mJkA60*?LZM3xKNy*5WY z^iZr3a9@2_2*c1$-Bvy$sDlV8dUSjfazRkD?vBOO^yQA&-hG!*Av5i+YUKzc#~y?9 zRJ{mNnwDTLYXD^Z=`x_F`v6sWLKDM|699+}nDnwy`kR` zzRmf&Qzgb1y?wofC%VM^KUd0?q&!w4icPV~KOHmi-#M3s=vzCL;=IO%_Ttoa#^j+W zcY4i29k@I1r`m$FMW%N{>TB0;`dG(O)sOGgGg2FMAcFzJdUFIgX%gVGR)DAa?YSPk z#7AWVmqx1&$T;Ap!uMoOE<%Ary8=>-gj1CKbdg9m+>dw>z@6!Xu$(Oy&oHsHdmGe$ zEx&={6;5XppN=Fg?)diNv;eI0CIS}#hP)agMkP)|jK@zwl)ncFD5qPsLAMux@X_fs zdN>QbDF*n^|H*y5bVji34r`f@?+zQ@!0?5Ocr(Sd#ca;nsgU>NXdT)b2}Q3OMVD5m zb&^~lGRgaNY8iuH zDF3RxK<1!|;DdJ$t`N4pX<86OF@)DG(QL4>j>~K1Zph*@Bm(ozZF+?#i#|#S-RezA zCjLNd-ZP1p@{`dG>n(ObDe#$3$zQ#vm|OHM!zYKV;Vt(qv#=h$#jzolD{a7}@9V@4n|cC-Cml<12)pzg0*dpq8HO zQl4{Oyn^BF$j9p5n&gNub(|J`Q>#73;hhi^`?{V|DUi7ZMfr4*7M&h}#85P$;#kx^ z@twDw^d?tk6t*W5&v$Sn@ddLBxbh)hwI+h4IDNExBx*E&Qr;rRrE1dxQN8%nFoLIJ zp1_8qhEnG8v$&>@$W5a<-B@daQK>^O1@%7srK%~U-%85u=E?GvCbfq-bh#po^dg~W zu8TBoTp_G^)AYXmvY$DS_QD$2(0s%A6*zwwb#_NkwfVh!@9?C4r*VzLj*j9bMxyiR zW1VN7DYizXZL&N!+3dS;9ix3?6qirjOyYG1BX0*=H9~%3B5Ea8ZMrY*tS3cy7QgP> z81JuPs6g_Ex@0`_(`n|xUoKi|(KQRcm%LK>v(NV|Wmnp@fueR6{8%$_!nXOxUl29W z`0wXsgrU9amkEDk5hoQL=9R1>U$td0z$hUovNwMYOiMYT>vCnMt1VrMG*5_?XHiH` zC=;fhN8H=c)4Y^(JyEDL+Cfc|X@qgZB>@wrT8F(+h*NG$W|h$C7|WR`U!HL1s1q6M z;*O^jH!#bi*PsYcW$V^_172lT0%ACjNhhoFU zYo74U(@a zoIi#jcEbHi-Pc}ID!plqM7nQL68BP)&-rE8ob^lTY^Q7jG9E=)xId9GhHn`YYxwEM|IrJ!$XvnZ%JIUwB?}yd!>XK=r!3 z9&xU|wFHKJKeYx;=qw{0LW1Fo^V0SSw6K!1LVuXGpE8k{zi#@*IBj2=J5dqdBBnk( z&6<#iz#Mh z@GR+V@es(Pfec*A5~zs6#JscSsh$$`La<;xjcZ1q)#rQIm0Aq5K7}MZIXvWq@N-wO zQszT^yi7<-g51#bNy`hcUQyHuO)#mtQ~dtUl79F~gu~{AyI7i(@benniC0rh@Rx$L z(IdUtWEL<`0OnTLCTp*3{qgJdSc-wlRn4mB+@Z3ongau`DJr5dER?fzoig6I3y&*GXP|*+|boTES zAf0N4Tse7hh_!jpU+S0MCD(fv79Cyg&Yxly! z%B_=*OiDfxSlaZAM9)%oDM)Be1gY}$NXy0-Qao><>TTojClpJz4l#YkMIWMEB=mrQ ztG2?k7k_pP=Ox*==RAvA(rt$2xGC2frZr?NXk*IX#8-fleTrtAh zI3+Apcj5ka8FGC4K3DWt$J(ZhMIVi!`}UMXPsG^0>{k-`zuB;~lA|uTr;Ya<>88L- zL=sqsmby4wnw>eGbCX?)q7bn&AeO7X`Y8*vBqri`a=&Tk6jY*E%IWn57R(l`)%lwj zJbfR8f4zFxq~7+N--IaiWeins8e&GYd-^EFRW{*i-IlI?2BxCozKCk}(Je`7y;_l& z`$bd*nQr1Gm5KenToBgNq2q$zfG5p<5+qtI)wPkum*)5VgNJ)Ol>biJi<%uliGJoR zU-@Wa%O^m`-$DcXN))uT3fHly*IZ4DF|398`sEpX`cN|e{lNvxWz1mon)mihX=ldC zvGsepz5E`1bM?DZ4RYsRO&w@^UrH<{x-TL2(DE7&l^w}h_aHf}ZXxdagjCBh9F%ex zc1)2$VND%7Djm<5^cqr`iG!|_3q~Y`>V4>>g-~q#ZNYQ#z!G}*Q?yH%{pxA>50B)jGigqM`41#|0pyL75PghT>i~%+x)ES1 zhvEQZv>^gmGJ*k^V2^TuF*-Q;F`#UE6axDfImY(biTmAiXBOqD-g8UL%In3v;4%wZ zCsg%p=h0M(xxX-{^N>8~WLYjDY8bt972&aRa=ma=vLX8eb1NtkgPp&B8|BnkANbVh z3+&;LYjRHkM~6TQ4Z&{u|IUs0;LkNpY|AqskK-Jf%J=-ej(yvry^WIiuJR{Y-AO!e zdxJ!SIES<08zE@2c@cCGK=?R-Z-n{=~(*sH?=WE`p43cIHi^Zb?-fsE03a0KOJXx|ah;&q^(#OArJs(AjA`#Wn)F z<$$xsPeL!Wx=Kp;H81V+9DJt8Vw{h{MecmC2!S8hZ}rPn#l!k@G18>qliTF4r)r4XV?6u}XFNHZ_=u^s^GZcC-T4fy(8Q-m%>&jwF?7~$) zTs{1%&>3C3wjo4p6J7J@3l z^bKyKZ_G(G&H33~88g(<9lyHJ-|f89AK}RM(_k_twyBwM2^~F$7}*E3Z&=F`UhY;q zL&NQASLQKA$`4YdT{rfa(wWMm`5ElqVok~xU?HUSX*)j6jS;Ag;lc-mnqTHMsqEs# zmCxvUru6#oHiF~2!wKLzZ|I3=R?F<}@<|UaA-*S{c|6)!vr7HePUWA90Dc=N`u-P$ zI}`C;p%Ssa2-H~=1AvO7ykBXL4NH)J86b=(MJO7I{A$#vf}kriQd;)4uDV>4&iGg$VVy;Ee^RtPA zRhQbSR5)wsY!_|rcGUHUZ%e{y*0j*u(RFPW2Jv_od4fzR{oW3@*a7W+|A~!B-%5J3#(}xWBbk%j9g?-28TFiS>k%^ zz3GOQ@Sg*u>VBm7dOCR{<2XOA8P{X93!lPCY}VmUcSX9w<7P~&CEZAv2nCA_Q$Y8$ z`|(vULVc~%h88Ciquo6H3(~-Y##d+Y1S`)fsPabt7}`qneBY+UNJH$$N}$jFqmPLe zI*JJ*8kLtrZ}*#1MlNFNbm`44h4;(LmPUd-y*+pu5)*`~Tdo@EMN?6gFp#`?Yj;xK zD&=0d0iDIO02*O$Q{NVjlPF%tROAa3ybWdVE?_?0p^5OhB7(I{#dpEY5 zz~GYTYHppAyFM+e{Zl$7*d9@zZ4|LHwJ3PyjV2Sf)5R*i@G$bSOIPfa81dZWc>WCp zc?hf7jpamouUEx^lorI6cw@V zNcKnO^;OIKL1v!IoA8H22I$_*t(1B5wuQmAwh3~fq|VD4DE_#OPA55~%i+O1D@#$N z%Po}zqy^0O=J=8+8EUIJ|Bunl!3*OeU!DoMbvV99EEbat(*B^R#Z%zC$J(Xl6c$r+ zHfN1eW-T`8O)~1RRHuD^o>Fp@n`B@k#D*b!SiVY2-t2bL0 z-LN12BZt0GvgKvVoKs^`0UDp~;?}3;5dn)aoA!iv8uU9m@Ga~v&9-zO{iuGT>K zJtjTjEle|RF(G6mIkU%d`Fh%drG{y;aJ~aP$CO{Mvr|7CximZYkunkwnZoJ5c17+j zkg848Y1`bYVxI|cn|=J|jf5uej(i_f;DXtmVz)CFAU`uD_Cuy?m*O){J~kmo=wZeI z3Gtj5oop02`<-#S@tjzH)w6?xLq5z@anu@!n_*f7=!McmRR^s>VH z&r5?#)qQ1>+@^}E#cX$ww`v4=Z+I)+cukfG1&=>!H&|vRA)E{CwapO4erLifIDj-p zs;==^wahuKtUzD#VH6=*ZHP64#{&)XRqv%4>hcaJ{w>KC!4oHxLSIzg%mQy)xu%yx z#29K31(%jdR*Pn|7i+cWo#c6Bt2D4z8OqKqswkfD)Wjs?&Jk-zWUDrB6x=D$4gO;# zPN?jw#{<&0B0O35nkdF{&W|i!*9)1#Q^k*1G*9kkQ)6AcwuGe`IVa1`Xcw8wmy)>? z-THp$Vl7EMP;{82sQ>EIIrmP$2%GPnto5-v>7!eM6ss|>64meJ%xk%Ge>(ILyni4; z>g$)9**II)?{>?A2vx&79t5OHt0oqSo3VCfme>kWifj^*Z1>;-SuKvYJE|KR2O1qt zOQAA!&z%&FuUEt)X8TpD8^^phN6ul#MtU-`3XX0ovZZLnRR*m#SUlEwZ!WJVp zs@6|-rx-Hc;YLZcs4agWExVukTB<(ByU;KGs7E-WoHbcKop%O^-^^WlD7$@gv8Y!D z;!K!~5se74E8**OXBa{c8BM4^?wzd9T|RDUySA8?Jb5{O|BQ(F!mM~aow*Ub<|;(& zOw-7$-OUN9I2&YUWW?*pE^7hIaM>csBu;Mssp>*S!ov>d#zxW$4#KYWKhN6p+{g^I zHFcUW+^_n?(1me5V57qrJv&IBY7Ti(U^%N)YBE2CxsiYg8C`VP z)Pi5dK@XI|qA!aI9n{b+m@(1oPykCJS6pWwo*mU0859t6yqgzs{??EAywIQ%jNrcB zH1N*!LE0FKC>L+Cv}!P7p5fES($#h5&hcesxJbFop@Z2Q>WI`E21coGn@W~0hOpqw zF$cYwB3n5AQgYe3z9WhjcOiz1+syESmxSh$`1>qdhSvO=lkK+Qv_!VjrGe_-S211N zE9^Mlv{&qBsBr1x63Tw^Ofn2W{F7a5bxnY~qo7+`ggqEsMX%UMy7F~a&v z*PQD>y(|w|>Zj7RM|0B@UCl#j8YAtW5S%HwIaJF}cJBLwrJ93xqO3Spj8tEKaQ^#> z8>rG?gE6et!kj$*66gL#p#J^N2)L8xomOYFG^Z&YFH-6@>2 z7~hU}ZB&;rd*^@H5`QoDlyT9;lXow+@)>Q_T~k6Xq|PQ0Vd<8isRXruQBndxlJBQ8WirOUCi_gm47kdgq$bTj0|9 zbsQe@X@85SM`(!Gz&T&dG)>Krg{HAn=%i3l4jZ!^JRSOVRPu_{F}apOBlB>RI_%l_ z)|P!^me$=ef>avm2YJ%V`ATOWK`w2FD>C|x^evGF2UfqoMdL!`U~hIrXGVhK;B)dHpUJJqD(Le32h@uh7E zRl-+a+_)DtaAYLTuz1r^;l{S4oRyHKe?BSiohDc<%~>OL zFG9QP!SDKj@D+;(sF%TT2XLJDsVx3C(t2daFMA&tFd2Dtyf*2 zr5+LK?N7PIhqeGS2r&u0L}T(~SzMpaNcWu8sR;Rfw0w!^7hVV|*YOx+UMofR=s(dxfyJuipKS?8A zz=T$`+hnI}zcLdfjfJB+F80|?6pq4K$NCt^ep+UyF^W;BX!75LtfalBut4cXw5s33 z*dQxoYvX9Pdwq_0;tN6VWW6svdOo~G+TlKNHn;O&xwG#T_~)P^Td^QmXGhI`TMo`` zqHwxZ4mE6kJ;>7=RZ2u4$|oQ@D@3PtU|`lwXnZzBTO~Ft zSY4;R*>u(AHGMti-G@Offk;h7!^h(Ngg&|-$kC)L>@jXZHX?;c1#5*{$4Iquq(6&c z`zR`U%_Mcp>~d}NXsT^J5AV+GZ7Rb1vl_zR7YR-p~8OJCJp_N;K(GF_}2m^}BQnI%b-z(%HvOew>?H=|=72@|rp85-x_Y0EPYh zf+yNOANk^#wWLg(Z2SDA^P*$B?C<8$C=u>%Klb&pmccf_>~d$<+2}FE;a+i~x33=B zUi7AMrT!39cikY0=k@#`0f7>qOTWW0RHbZviByu6-5Q-a{bWy@?cVGtM-ce0-YuiTw=|y@GkS0i#-a$p0h$tNbL^>e^ zRHR84=^X_Gq)3%sL+BAguhONH03tmRiIMia<0;=d&pBtU_pEQdUtz7xUm-J@$)4H! zx_{SoQ(@$zvm&w$OU4jh+dny+KUR<~!>1*TENm36wmeia8Um8XyTY#XNZMQscPhZV z)?d^?ut8a$)ecdwJotYd3UzSw{QA{Ee)_^}K!FqpT{pji2<8qRY3+F5Nt^+sGuBU+ zBCrw&mP!vRidint$L&omS#qR_dJZo-?Khv2FA)<3Hy4CzC@1Ze;gt5oUgUu8;{>N$ zS0hxViuL7Ff<9DoxZ}<0`k!&l^p40_>uiqHD65sMjcl=bX0_F@UW{Cb)?tftD~kPI z?r_=C#d561!a*?NQ>kO|yJG=y$BoVLE24{fd^)PMb)&B;+Bh9e`xjq4B1^d#&Lx`e zycQ^cn3B_1IhV(8Jw{IF_CIcc=LUo{+UUv8%GYUKmRfD@r`*$arnI0=?RXBfzW!%! z?%#M%e=C7f)4>!D#?#ejO6ws^eu8qGCGD*Z6ON6k%UkvjQblY)5BMc|8r&DyW9+5% zfRTgN_sh751L9Rqj{zFtdsBzi?|l)BRs?}E_B@XG;@lN^R#kYxgop|G*7Tc8Ls|hc zj3{7$`70PMjsc&D=h15%&H`%I5Ma~(zwv=b20Ux^sLJiusOUf!j3?({$vD13QLaUt z{}n}3nx$$R$d7sc4y+gj6XGYitS0f-U?~7cya~uffA&`bcA|enll>_?`FmbCaB7Uz zl>o+n5m>gDaNYRr{api8#6MZ){}S9yr%ISY2Z4HQuT56}5WP2j45PEGpXHs}czaJG zon0rJp^d(LpHZ*2j;j5-eYqaH%X(yayGFMmPvK8+gN6Lml9R`P;xS41mz2?$9ls4! z*!E+M69FW^CNKavPU<&cS+G?GprV4m(Sy)*jkAourG!bU8D7Ll0;mH?9e~HRYM}jh zLVoB40}z8?5};T8Ju8fA98gya^nViym&8JTNhA?$U*?@NfB__e!T~rvfaUZrqH6aM zAZ~q2{S7>P(g!+w15iY6DImvX0EES50EPc<^SAi2KOf&te|G|_RWnW@{>-7;wZZSX zO>lt4zHD|`LF!jd*Xxv3N--;PDMgUPX`f3Z{wi^#KX!zFv!%er`ByCuxHwe6cljq* z{r|c+K=uF6WYk|&7l4ToP(ZC7&;I;_NMGo$RO_Y4_Ct(s6kN2!<=*U%Mb_zrx90K< zpu0o$dubnFyDe|IN|=YX1h}QO=?W%RN*)`TIXAiuMQ9nR6!xf|dtOA)cwYPhbjiet zt=*6x!qAYPBN}Hu9Y~)0Qa`Fgp0qX0TukSE=l^ftTlhs{PXjJ=7LB9R009V~Ca~$W z`>p=@pRAr!a;%;o(f!umJo_i(gM{%^ZFKv30OzBsIS$Cg-yQ**!*_r-$Q_ITAo)mO z69_Ee`=N`tAB8;k)m|N-PK`nalIV|)kta-Y$kVSpfbw-?&LRy=4%JFpCb^xTi@NcM zY&uy@x5fk51)ba9|0mH9T^;6)t|13m+Lr713(T>4=`ebjKuZ1Zhp%$ta$kohidOc^ zK6(6k?yjCF|G@iYJ*8oven4XVN7KFNZ&>Z0414$3CWFW3o(F>S`p9~c6K)Zz=~zlK z&fh<%Q2tgAk-q10RQvcXN+eH=S$M~nj5P454_sT^i7eBq0PO^`PalJucHbxy+Pa_Z zvOsAIKX!ckj`q)R$I7Hx`qF=D+PGA!id;7<#a*#+!H3VOs?a5RNT|wGY2mp79^l?L z&2k7_-S$yn*m?ksNJJY4*1!FpJ+fl1w7t`chhq%9*0oaoMEk7{W4wo}zz;gJ4Qp>_ z(MIGmN0jKp`Jsl&6w6l*6*C<~SY!o4UwS44lUPH+Qhwn#LKZffw;@R=tnuCr#u$5pE;;huG*`A=5*q?@+}BRLDx# zMT4@PZbxM1SfbR~!@WFlZ!kQGo&15KGbukh_|ad>*3S*Knh>?Q-qRlZ4e*)ky>lb{ zejBOto8R$CV*M<>3ZjyIdC}aG1m9C*YeJ#nj5igMgy7P360{W^D4Tdll5V^T7VO=7 z=E5C#naxq`%GX#W)>M;6WKD!cwD-@%e$;NV{B)Uw_f$$-mAElhdI8^y;pNy zYsk$fI}RpuYKa0|~l&$XM&{(P$TR(XG>qpIv8? zNR*hF@Wic3%CHz59sUC$3}?JH7u~M2mTjPbQofQ`;F@6-Zewk&Q!aM$`j+)os0g#E zMM18r6wOzLPt3hUK49wKt&ljNb@x;)F^Y|=)R0bD99?wdA#KT9vv0#{`zMR-2z7t+ zD5vqgr67r$W9Wka3m7jZ6#*1FWKarq1qYf;{WS~iRpF{!VU zbHjPTg@|9{3c*-!Do}_lUREFG5%e6tS?y4l>0aGA%CH~|!!;JAi|uubw?{5DUfRhb z>&j!+UzemI)bw5`#`SP?z6tR4-LQmckBvHHjl$B|_#s8E-N}>b&d*2o)E_rW3Zlp7 z&21+6CSPPHU0dX#@w1&X3h#bu>H11#RId7iAAc&uF-v#rmX=s_=)JIThQ(H6RY{yn z&kE;xN+*?D`en6%zF?urc9oJl{?(%bDV!A*eu$-IMRkWzg_f?R{jg*Zzl3BGlIfU^ z6%?r6P=|eoi&y9E_*?_ev};v{STuf-Bj1X>D)f%6c&PB6qrfepVByQ18K*2e%HT-3 zQ#N?b72cVHy#pbjdmS|~%1ewwSs}9&K73W6#M>wJCO>a}6fxyuhbAh7*xQgYcpMj~ zGI{3WU%G?u+GxblREp&}y zL|GkFu-7%uR5cAU=X`2CmHEXDd7R+l zwC^feI@vfowa=j9H_vAhQJfZMx75&(I!_#0PR3q-F)+*V6L|7d6ZB2yvbG*hR>9l! zBNNAm13n(Jxx~~R<%#9LPY#Cl>MW063qtIR;~2nEFEyei?XXQL-WV>ooqpX0#FV$V z?ZzTVu3P0ibvpA#%7XBKiAY_cj11%V=}rbfd*KVTn38R2=lwwNeAr%(O{1+H zU;sfUJ9jG%BsJE8g{2EqmJu;QPcw;UeM%8$eUG<{nlnzXIZ&H`>(&1UL$V%Dz1t4-M@*c((Ko`8pu=pMZG2fy^CaklI%|D z)|3^QKQFFI&)w^y?0X<85D|NiTaHpcHMi6iy0vh%y3wMikQ`B|8RNS(^*k9*~1giS!e(RrX`CyuRsO~vmbM5IaDr6(@@=yY62++O=t$yo7xLLQ_-A*tG31Fh78bIjo8K>(Ax*OB^9 zc?=a&85!=92nG)o^7DqG=9>I6@ZB?KH*p>?d8i5vxEoTr>N+WrRswFHJ%l+ud#Vqv zkJA$rcOQTmOJoYviF_imnLyfSs#1hhy4h0WUO^Ds7+qCO^Z=ae)-q_DGJR@MEyXmG z!-7MlZ05G3a52$xE#q^^dBa|1@pWh`T@MwPLZ5QG)q8G%nH!&}YgdXlcIChEvWh%$ zm46-GFeLReoMXK`2sejuj>ER0Y$b38@K;~lSTDF{tK82hk2(vUWZ`)f3|;lpTf`DS za$PP=cM_JC*gerTFAZ(>x@32Usmv4MOGOR_|KX|%qOCnWNUI@+j z-naHim{@>#knD_c${-R(EA^28G6|D4;MAFslsmk#xT7%g;?#BjZ1JXZaup)ynr>zF z8&dw~N25Ry1z7ty)sPV83lQOVCu9voAI+j?fwnPDMfZ|0n)I2N*soe57F022_G8yl zUq5Tl!XoEa@p0`q-+f76Z|@pt#tIuDT5?Kz%CkzMuy!gT95b@eo>FOmZ_~;}x0krT zEi3@wc*lrdg4z)uSH~z1@Fxj z2FfD|;bgBjn*h&e#2Ltm8vKPN{f=FIyG2nfsEGHIy1gv#8`rP&zJWay9>L6n#9{PS zg!;AZg%J&GB8XtMjykD2YjU$S1L^LsrLm3-5Um_onD56w zBk4cY>EVg}kHf?pSlUQ@8bIBGHtP;L_?V$4i!j-^@MImu5C;ROHMS`es3b8GP8NZ4 z)EW5#OkoHuV7pN#Dpy%+W5d$y-JB)k;8Exrqszb`{Rfex5YY523AfrE5_7@Uh@&=R zwc5YP3@ZFMrYpq*7{Px{G-S88}+e~Mcr)*F#gGLiPgq?uA zjgyR(l(SJVMj!BT+6TN?cY z#~tAeR9DKrt?=L!qB(6dc^#fz`+3u6a;VGpp6RWIoRV9pOE?qnq=kO)&e+lYGTDcGvO1jzWMka17IV2Kv02>CAl zw_w+)+{=JJc!d`LtPITnF#w2AFa4b#sMf9t+#z$TfFxgFPBU%(7O+e{^rN~6_?_fS zp$06hQvmcP@;#RI43DF`V0a1KQ2BsBJ##r6XYV@t5VgukDBd^t?)uVZwq&;al)9TO z%OI@QXUjt-JpPAVf5<<78dRnyk6WGH4~c564zH+*_V`g{Gz;Qv3n6I1Umw|}Q7RvR zo}xs0oOJnB>0K|E0?z@r8oJIrjA#wBUh#JNf>^DNd@*|TuEF!J_JLe$_JS~Wb{2UF zcA`fEzFggicCAw#KLylDVm5U~y&8P1dOtM7^Zn2d#aQ_o?P>p6z`Zd?1u1Mg*pp<4`sF@qfd>Ji4E(8iz zG=CTD&mxjv6;+>ETBlt!7xRhto?UG$4HbWqNd6UfV_@d7F9Brk6?#j0)Ll<~BQ@YN zS@mOEK$fCWT9f>>kKs8JIQG2I0E6W{|&F zfSld`?Vk-%Ah6FrN3d-z8)KXka4PFO-O*)cSM9%MtS8DjU*%@F8{u^GLi%|RTc8Q`=kLW_E7%zZICe;G zUbScc)aMlsR3;O_RB(gmQx>gpJ&bPhP3FphMtM|FHVYwFD@PIWrkqhvib0Zu_w*Fw zxt8<6V<+Cz3i7RnapDKpBNgq^#U*>(CPc_DODAX2st(%uRa;RtQW zL&%Lu zh%RZ)M!j4bRgTV&_itOXyGaG5rSiC5_C!;K^WO)3J!JbEZb_YT-yH z@8Dwsx6j^f#-v_9Af^wy^NEXy>Cz8Vz>tUipT61sC!>n28z4}{LfDaeH%ovt-ue|l z+y<}NwvffC2I0WAb%<$xKz>r;sUHt1wYFq`1;O$&>rJqQ4h_F|xIWkTJG%F;uYY4* z{fZ1ja;nO#Nk+c(2&?YJaB^Ao1@z=zLMJ?YRJ8m6MV-EBL4Su-k4^T{kK+GVCxE-k zmNn?~flkSpT`Si5BsdwiHfyZ4%+ktf;sPaUmpKm~Bq-g_9zG)TpN|XDtZx!+F!UtT-JZPAAn;h9 zifo@$^)Ah^O1fQ7deC~rmvT?AeM8D4LTBOn@Bp(;7E*z)t`WIl-hDFHO|I%?$7kD) z*OTQiVNCeRs%0!+JJ4ljX6a~QnITfm4x+9s9G9g!4DDpiW&hE}!)i6k)xYo{TpxE8 z!s$-cbk{*;ilu3N*uC_ie|S!rLnl_9_?sr&n%enOfbg&(ta?-?3rf8!b)jOpc-A&F{fpZ9Zd3K5%x5K`E|;DhlFU-So(=F z>L{WzGc`3o!`H!5XJS|___P&>P}#9m0FBGuC${Nm$>|m;^Dhb8a2F8?1rbpbiRS;i z$Mb*kiv_4%peu^(fBPKpH{2WiM+EaCJ&)<#z4raJFyzUhaak{J?Z z-U99u1LealTozY|))Iw@$1Vg38hED+j6$^d85A>0+26BJH7&Et&4a`(;Zv3$*u^-= zY_CEs-ZkoxqwZ-68{N6m!LW*VC4`X0d7PO#z^W~Fd=Z1?W3qn`F}bDV8f4DcC6vYKg3Yw)I>`CLytjt}8To-MPXo*yECLbPbiYrR7epIP?j=o_xhDwolQ*O>`q& z?`uxFVvl1&vBhVy*%QFyvyDAiM1^?ZB1WgIw)M&ly~~%N8D>|{V|O+W4#W>^gdPi) zr)v1hNR!4B_j!HZ4r%B|or`u`MMa7KfH+OSMMVwNcWxeYW?F#)Ocy7tcf%) z-)0wL#%(2@d6C5gGo9I8%n%Z)ZO*jb+{fhrgkf)+%$NCjP3DCKMp#bMM0EPhdXBcU z^mEX&#eE*qKq$`am#U4Ow`Y-C2$Q?_$2pq`c#a!pQNMP|RvK>vDNun>Y2&b#X_m!$ z;R*xg`J5+7uQgo8i32@r+osz!iy%DotB>E9)=kv3n~DY7><=mHsmjwl zhW^Zfx46X6&P_QQW(<59#-H3aKlHh%g7eMvyA5r!<|n|UCo6BB5$&kX`Z-X=E`PPi9m&}?-fyJ(s0*C&V*7jjUQE<1*l<=!IOiEozgYaWC+WW7v@put zLz{YIb1n3XU3_?$4bZq~V-5^N@SrO4s9hJBUK-%@U_~~gK@k6vU4vz0o;}30t^%$=b0)roy4uUDK8&yq%PmNR;Q(!AUoR8KXQA)m?$^IfZt%F1TRBs55a6pmf`RaUgn%0Dm=N?pwW-H3=ZIVn>P5!R?#rsE zUxk^Du!(D>%e;3?`8CnVu2t$lQRuH1iry3whm&m8o^Y7i?&B6e3^i}uXmiSvcG#-h zWum2ic6&oFEuyDy&X}>Ve$?V0M8uuRtJt_z>v4azRGYC%=dA7Ojc3Bo_}-|R+?&HfH3J8JkATl(ZP6hg)79XUCR!pP@^vR9g{dbC<*_g#( z5h!&|>`GfcW9UQ^UOo)5tpb*^{F=IQl@O_Lq z6~e5V{qX_o)%pse9b8V)EyYj#B*fOT8udIVrr2MOlg9yrp5naqxnFy`l-s4RyXNS+e($;;`6Q?WpgMNJ^EK$+&V*;h$SCzW7q60)Z3VDLM{{%P*Odd) z^l$nCsWQ5Ue4^|YnU@|%GIwBr<@E|XZYy*R8{FpN>;K_4%t~?2s3jK7y*#OV&AKQ~ z{@8BCQ4P!SOd%Edt1_HHzs=8-lcE3xq7vYUceWk5R|a^RSH zx1y!CtybY0Ei|X&Jne!LP(olnHbRe1J@JLRQvKX5f$5)ZbuC6^ABlAfkF>qlgWpb8 zxkAmhHRqa7r`Q49LoStlI|+W{jo!>f8DjTxncxuzrF?2qBcvb5g7ro#Np$s+*}AAH zl;GI@wbIuYR}Ctb{5&R)CB1!O;8gqiCau#F9t(1!oT>;06^8O&9kt7ev4GwFPkq-f z12E117q0?WBkiSuN}serqW9c|-=6-Ch?e50V4) Date: Mon, 22 Jun 2020 12:47:46 +0530 Subject: [PATCH 046/131] YARN-10321. Break down TestUserGroupMappingPlacementRule#testMapping into test scenarios Contributed by Szilard Nemeth. Reviewed by Gergely Pollak. --- .../UserGroupMappingPlacementRule.java | 4 +- .../TestUserGroupMappingPlacementRule.java | 476 ++++++++++-------- 2 files changed, 275 insertions(+), 205 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java index 0e8cb9cc047e8..a68f3c61772d8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java @@ -141,7 +141,7 @@ private ApplicationPlacementContext getPlacementForUser(String user) } else { if (LOG.isDebugEnabled()) { LOG.debug("Creating placement context for user {} using " + - "static user static mapping", user); + "current user static mapping", user); } return getPlacementContext(mapping); } @@ -173,7 +173,7 @@ private ApplicationPlacementContext getPlacementForUser(String user) } else { if (LOG.isDebugEnabled()) { LOG.debug("Creating placement context for user {} using " + - "static user static mapping", user); + "current user static mapping", user); } return getPlacementContext(mapping); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java index 5d2aaa17a5759..432c009fbe926 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java @@ -169,7 +169,7 @@ public void testSecondaryGroupMapping() throws YarnException { } @Test - public void testNullGroupMapping() throws YarnException { + public void testNullGroupMapping() { conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, NullGroupsMapping.class, GroupMappingServiceProvider.class); try { @@ -190,246 +190,316 @@ public void testNullGroupMapping() throws YarnException { } @Test - public void testMapping() throws YarnException { - //if a mapping rule defines no parent, we cannot expect auto creation, - // so we must provide already existing queues + public void testSimpleUserMappingToSpecificQueue() throws YarnException { verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("a") - .queue("a") - .build()) - .inputUser("a") - .expectedQueue("a") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("a") + .queue("a") + .build()) + .inputUser("a") + .expectedQueue("a") + .build()); + } + + @Test + public void testSimpleGroupMappingToSpecificQueue() throws YarnException { verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.GROUP) - .source("agroup") - .queue("a") - .build()) - .inputUser("a") - .expectedQueue("a") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.GROUP) + .source("agroup") + .queue("a") + .build()) + .inputUser("a") + .expectedQueue("a") + .build()); + } + + @Test + public void testUserMappingToSpecificQueueForEachUser() throws YarnException { verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("%user") - .queue("b") - .build()) - .inputUser("a") - .expectedQueue("b") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("b") + .build()) + .inputUser("a") + .expectedQueue("b") + .build()); + } + + @Test + public void testUserMappingToQueueNamedAsUsername() throws YarnException { verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("%user") - .queue("%user") - .build()) - .inputUser("a") - .expectedQueue("a") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%user") + .build()) + .inputUser("a") + .expectedQueue("a") + .build()); + } + + @Test + public void testUserMappingToQueueNamedGroupOfTheUser() throws YarnException { verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("%user") - .queue("%primary_group") - .build()) - .inputUser("a") - .expectedQueue("agroup") - .expectedParentQueue("root") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%primary_group") + .build()) + .inputUser("a") + .expectedQueue("agroup") + .expectedParentQueue("root") + .build()); + } + + @Test + public void testUserMappingToQueueNamedAsUsernameWithPrimaryGroupAsParentQueue() + throws YarnException { verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("%user") - .queue("%user") - .parentQueue("%primary_group") - .build()) - .inputUser("a") - .expectedQueue("a") - .expectedParentQueue("root.agroup") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%user") + .parentQueue("%primary_group") + .build()) + .inputUser("a") + .expectedQueue("a") + .expectedParentQueue("root.agroup") + .build()); + } + + @Test + public void testUserMappingToQueueNamedAsUsernameWithSecondaryGroupAsParentQueue() + throws YarnException { verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("%user") - .queue("%user") - .parentQueue("%secondary_group") - .build()) - .inputUser("b") - .expectedQueue("b") - .expectedParentQueue("root.bsubgroup2") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%user") + .parentQueue("%secondary_group") + .build()) + .inputUser("b") + .expectedQueue("b") + .expectedParentQueue("root.bsubgroup2") + .build()); + } + + @Test + public void testGroupMappingToStaticQueue() throws YarnException { verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.GROUP) - .source("asubgroup1") - .queue("a") - .build()) - .inputUser("a") - .expectedQueue("a") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.GROUP) + .source("asubgroup1") + .queue("a") + .build()) + .inputUser("a") + .expectedQueue("a") + .build()); + } - // "agroup" queue exists + @Test + public void testUserMappingToQueueNamedAsGroupNameWithRootAsParentQueue() + throws YarnException { verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("%user") - .queue("%primary_group") - .parentQueue("root") - .build()) - .inputUser("a") - .expectedQueue("agroup") - .expectedParentQueue("root") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%primary_group") + .parentQueue("root") + .build()) + .inputUser("a") + .expectedQueue("agroup") + .expectedParentQueue("root") + .build()); + } + @Test + public void testUserMappingToPrimaryGroupQueueDoesNotExistUnmanagedParent() + throws YarnException { // "abcgroup" queue doesn't exist, %primary_group queue, not managed parent verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("%user") - .queue("%primary_group") - .parentQueue("bsubgroup2") - .build()) - .inputUser("abc") - .expectedQueue("default") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%primary_group") + .parentQueue("bsubgroup2") + .build()) + .inputUser("abc") + .expectedQueue("default") + .build()); + } + @Test + public void testUserMappingToPrimaryGroupQueueDoesNotExistManagedParent() + throws YarnException { // "abcgroup" queue doesn't exist, %primary_group queue, managed parent verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("%user") - .queue("%primary_group") - .parentQueue("managedParent") - .build()) - .inputUser("abc") - .expectedQueue("abcgroup") - .expectedParentQueue("root.managedParent") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%primary_group") + .parentQueue("managedParent") + .build()) + .inputUser("abc") + .expectedQueue("abcgroup") + .expectedParentQueue("root.managedParent") + .build()); + } + @Test + public void testUserMappingToSecondaryGroupQueueDoesNotExist() + throws YarnException { // "abcgroup" queue doesn't exist, %secondary_group queue verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("%user") - .queue("%secondary_group") - .parentQueue("bsubgroup2") - .build()) - .inputUser("abc") - .expectedQueue("default") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%secondary_group") + .parentQueue("bsubgroup2") + .build()) + .inputUser("abc") + .expectedQueue("default") + .build()); + } + @Test + public void testUserMappingToSecondaryGroupQueueUnderParent() + throws YarnException { // "asubgroup2" queue exists, %secondary_group queue verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("%user") - .queue("%secondary_group") - .parentQueue("root") - .build()) - .inputUser("a") - .expectedQueue("asubgroup2") - .expectedParentQueue("root") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%secondary_group") + .parentQueue("root") + .build()) + .inputUser("a") + .expectedQueue("asubgroup2") + .expectedParentQueue("root") + .build()); + } + @Test + public void testUserMappingToSpecifiedQueueOverwritesInputQueueFromMapping() + throws YarnException { // specify overwritten, and see if user specified a queue, and it will be // overridden - verifyQueueMapping( - QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("user") - .queue("a") - .build()) - .inputUser("user") - .inputQueue("b") - .expectedQueue("a") - .overwrite(true) - .build()); - - // if overwritten not specified, it should be which user specified - verifyQueueMapping( - QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("user") - .queue("a") - .build()) - .inputUser("user") - .inputQueue("b") - .expectedQueue("b") - .build()); - - // if overwritten not specified, it should be which user specified - verifyQueueMapping( - QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.GROUP) - .source("usergroup") - .queue("%user") - .parentQueue("usergroup") - .build()) - .inputUser("user") - .inputQueue("a") - .expectedQueue("a") - .build()); - - // if overwritten not specified, it should be which user specified - verifyQueueMapping( - QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.GROUP) - .source("usergroup") - .queue("b") - .parentQueue("root.bsubgroup2") - .build()) - .inputUser("user") - .inputQueue("a") - .expectedQueue("b") - .overwrite(true) - .build()); - - // If user specific queue is enabled for a specified group under a given - // parent queue - verifyQueueMapping( - QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.GROUP) - .source("agroup") - .queue("%user") - .parentQueue("root.agroup") - .build()) - .inputUser("a") - .expectedQueue("a") - .build()); - - // If user specific queue is enabled for a specified group without parent - // queue - verifyQueueMapping( - QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.GROUP) - .source("agroup") - .queue("%user") - .build()) - .inputUser("a") - .expectedQueue("a") - .build()); + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("user") + .queue("a") + .build()) + .inputUser("user") + .inputQueue("b") + .expectedQueue("a") + .overwrite(true) + .build()); + } + + @Test + public void testUserMappingToExplicitlySpecifiedQueue() throws YarnException { + // if overwritten not specified, it should be which user specified + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("user") + .queue("a") + .build()) + .inputUser("user") + .inputQueue("b") + .expectedQueue("b") + .build()); + } + + @Test + public void testGroupMappingToExplicitlySpecifiedQueue() + throws YarnException { + // if overwritten not specified, it should be which user specified + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.GROUP) + .source("usergroup") + .queue("%user") + .parentQueue("usergroup") + .build()) + .inputUser("user") + .inputQueue("a") + .expectedQueue("a") + .build()); + } + + @Test + public void testGroupMappingToSpecifiedQueueOverwritesInputQueueFromMapping() + throws YarnException { + // if overwritten not specified, it should be which user specified + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.GROUP) + .source("usergroup") + .queue("b") + .parentQueue("root.bsubgroup2") + .build()) + .inputUser("user") + .inputQueue("a") + .expectedQueue("b") + .overwrite(true) + .build()); + } + + @Test + public void testGroupMappingToSpecifiedQueueUnderAGivenParentQueue() + throws YarnException { + // If user specific queue is enabled for a specified group under a given + // parent queue + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.GROUP) + .source("agroup") + .queue("%user") + .parentQueue("root.agroup") + .build()) + .inputUser("a") + .expectedQueue("a") + .build()); + } + + @Test + public void testGroupMappingToSpecifiedQueueWithoutParentQueue() + throws YarnException { + // If user specific queue is enabled for a specified group without parent + // queue + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.GROUP) + .source("agroup") + .queue("%user") + .build()) + .inputUser("a") + .expectedQueue("a") + .build()); } /** From 201d734af3992df13bc5f4d47b8869da4fb2b2c5 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Mon, 22 Jun 2020 19:43:19 +0900 Subject: [PATCH 047/131] HDFS-15428. Javadocs fails for hadoop-federation-balance. Contributed by Xieming Li. --- .../apache/hadoop/tools/fedbalance/MountTableProcedure.java | 4 ++-- .../tools/fedbalance/procedure/BalanceProcedureScheduler.java | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/MountTableProcedure.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/MountTableProcedure.java index 8f789831d394f..a960062ef1ef8 100644 --- a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/MountTableProcedure.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/MountTableProcedure.java @@ -43,9 +43,9 @@ /** * Update mount table. * Old mount table: - * /a/b/c -> {ns:src path:/a/b/c} + * /a/b/c -> {ns:src path:/a/b/c} * New mount table: - * /a/b/c -> {ns:dst path:/a/b/c} + * /a/b/c -> {ns:dst path:/a/b/c} */ public class MountTableProcedure extends BalanceProcedure { diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java index 0f82b88f0a937..a27db10673396 100644 --- a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java @@ -44,6 +44,7 @@ import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.WORK_THREAD_NUM_DEFAULT; import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.JOURNAL_CLASS; /** + *

  * The state machine framework consist of:
  *   Job:                The state machine. It implements the basic logic of the
  *                       state machine.
@@ -54,7 +55,7 @@
  *   Journal:            It handles the job persistence and recover.
  *
  * Example:
- *   Job.Builder builder = new Job.Builder<>();
+ *   Job.Builder builder = new Job.Builder<>();
  *   builder.nextProcedure(new WaitProcedure("wait", 1000, 30 * 1000));
  *   Job job = builder.build();
  *
@@ -62,6 +63,7 @@
  *   scheduler.init();
  *   scheduler.submit(job);
  *   scheduler.waitUntilDone(job);
+ * 
*/ public class BalanceProcedureScheduler { public static final Logger LOG = From 7c02d1889bbeabc73c95a4c83f0cd204365ff410 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Tue, 23 Jun 2020 01:42:25 -0700 Subject: [PATCH 048/131] HDFS-15427. Merged ListStatus with Fallback target filesystem and InternalDirViewFS. Contributed by Uma Maheswara Rao G. --- .../apache/hadoop/fs/viewfs/InodeTree.java | 4 +- .../hadoop/fs/viewfs/ViewFileSystem.java | 89 ++++--- .../org/apache/hadoop/fs/viewfs/ViewFs.java | 94 ++++--- .../TestViewFileSystemLinkFallback.java | 251 +++++++++++++++++- 4 files changed, 360 insertions(+), 78 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java index 50c839b52b654..d1e5d3a4e5fba 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java @@ -374,7 +374,7 @@ protected abstract T getTargetFileSystem(URI uri) throws UnsupportedFileSystemException, URISyntaxException, IOException; protected abstract T getTargetFileSystem(INodeDir dir) - throws URISyntaxException; + throws URISyntaxException, IOException; protected abstract T getTargetFileSystem(String settings, URI[] mergeFsURIs) throws UnsupportedFileSystemException, URISyntaxException, IOException; @@ -393,7 +393,7 @@ private boolean hasFallbackLink() { return rootFallbackLink != null; } - private INodeLink getRootFallbackLink() { + protected INodeLink getRootFallbackLink() { Preconditions.checkState(root.isInternalDir()); return rootFallbackLink; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 1ee06e02aab15..06052b80d9219 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -290,8 +290,9 @@ protected FileSystem getTargetFileSystem(final URI uri) @Override protected FileSystem getTargetFileSystem(final INodeDir dir) - throws URISyntaxException { - return new InternalDirOfViewFs(dir, creationTime, ugi, myUri, config); + throws URISyntaxException { + return new InternalDirOfViewFs(dir, creationTime, ugi, myUri, config, + this); } @Override @@ -518,10 +519,10 @@ public void access(Path path, FsAction mode) throws AccessControlException, /** * {@inheritDoc} * - * Note: listStatus on root("/") considers listing from fallbackLink if - * available. If the same directory name is present in configured mount path - * as well as in fallback link, then only the configured mount path will be - * listed in the returned result. + * Note: listStatus considers listing from fallbackLink if available. If the + * same directory path is present in configured mount path as well as in + * fallback fs, then only the fallback path will be listed in the returned + * result except for link. * * If any of the the immediate children of the given path f is a symlink(mount * link), the returned FileStatus object of that children would be represented @@ -1125,11 +1126,13 @@ static class InternalDirOfViewFs extends FileSystem { final UserGroupInformation ugi; // the user/group of user who created mtable final URI myUri; private final boolean showMountLinksAsSymlinks; + private InodeTree fsState; public InternalDirOfViewFs(final InodeTree.INodeDir dir, final long cTime, final UserGroupInformation ugi, URI uri, - Configuration config) throws URISyntaxException { + Configuration config, InodeTree fsState) throws URISyntaxException { myUri = uri; + this.fsState = fsState; try { initialize(myUri, config); } catch (IOException e) { @@ -1225,7 +1228,8 @@ public FileStatus[] listStatus(Path f) throws AccessControlException, FileNotFoundException, IOException { checkPathIsSlash(f); FileStatus[] fallbackStatuses = listStatusForFallbackLink(); - FileStatus[] result = new FileStatus[theInternalDir.getChildren().size()]; + Set linkStatuses = new HashSet<>(); + Set internalDirStatuses = new HashSet<>(); int i = 0; for (Entry> iEntry : theInternalDir.getChildren().entrySet()) { @@ -1238,11 +1242,10 @@ public FileStatus[] listStatus(Path f) throws AccessControlException, // To maintain backward compatibility, with default option(showing // mount links as symlinks), we will represent target link as // symlink and rest other properties are belongs to mount link only. - result[i++] = + linkStatuses.add( new FileStatus(0, false, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getShortUserName(), - ugi.getPrimaryGroupName(), link.getTargetLink(), - path); + ugi.getPrimaryGroupName(), link.getTargetLink(), path)); continue; } @@ -1258,11 +1261,12 @@ public FileStatus[] listStatus(Path f) throws AccessControlException, FileStatus status = ((ChRootedFileSystem)link.getTargetFileSystem()) .getMyFs().getFileStatus(new Path(linkedPath)); - result[i++] = new FileStatus(status.getLen(), status.isDirectory(), - status.getReplication(), status.getBlockSize(), - status.getModificationTime(), status.getAccessTime(), - status.getPermission(), status.getOwner(), status.getGroup(), - null, path); + linkStatuses.add( + new FileStatus(status.getLen(), status.isDirectory(), + status.getReplication(), status.getBlockSize(), + status.getModificationTime(), status.getAccessTime(), + status.getPermission(), status.getOwner(), + status.getGroup(), null, path)); } catch (FileNotFoundException ex) { LOG.warn("Cannot get one of the children's(" + path + ") target path(" + link.getTargetFileSystem().getUri() @@ -1270,51 +1274,58 @@ public FileStatus[] listStatus(Path f) throws AccessControlException, throw ex; } } else { - result[i++] = + internalDirStatuses.add( new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getShortUserName(), - ugi.getPrimaryGroupName(), path); + ugi.getPrimaryGroupName(), path)); } } + FileStatus[] internalDirStatusesMergedWithFallBack = internalDirStatuses + .toArray(new FileStatus[internalDirStatuses.size()]); if (fallbackStatuses.length > 0) { - return consolidateFileStatuses(fallbackStatuses, result); - } else { - return result; + internalDirStatusesMergedWithFallBack = + merge(fallbackStatuses, internalDirStatusesMergedWithFallBack); } + // Links will always have precedence than internalDir or fallback paths. + return merge(linkStatuses.toArray(new FileStatus[linkStatuses.size()]), + internalDirStatusesMergedWithFallBack); } - private FileStatus[] consolidateFileStatuses(FileStatus[] fallbackStatuses, - FileStatus[] mountPointStatuses) { + private FileStatus[] merge(FileStatus[] toStatuses, + FileStatus[] fromStatuses) { ArrayList result = new ArrayList<>(); Set pathSet = new HashSet<>(); - for (FileStatus status : mountPointStatuses) { + for (FileStatus status : toStatuses) { result.add(status); pathSet.add(status.getPath().getName()); } - for (FileStatus status : fallbackStatuses) { + for (FileStatus status : fromStatuses) { if (!pathSet.contains(status.getPath().getName())) { result.add(status); } } - return result.toArray(new FileStatus[0]); + return result.toArray(new FileStatus[result.size()]); } private FileStatus[] listStatusForFallbackLink() throws IOException { - if (theInternalDir.isRoot() && - theInternalDir.getFallbackLink() != null) { - FileSystem linkedFs = - theInternalDir.getFallbackLink().getTargetFileSystem(); - // Fallback link is only applicable for root - FileStatus[] statuses = linkedFs.listStatus(new Path("/")); - for (FileStatus status : statuses) { - // Fix the path back to viewfs scheme - status.setPath( - new Path(myUri.toString(), status.getPath().getName())); + if (this.fsState.getRootFallbackLink() != null) { + FileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path p = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + if (theInternalDir.isRoot() || linkedFallbackFs.exists(p)) { + FileStatus[] statuses = linkedFallbackFs.listStatus(p); + for (FileStatus status : statuses) { + // Fix the path back to viewfs scheme + Path pathFromConfiguredFallbackRoot = + new Path(p, status.getPath().getName()); + status.setPath( + new Path(myUri.toString(), pathFromConfiguredFallbackRoot)); + } + return statuses; } - return statuses; - } else { - return new FileStatus[0]; } + return new FileStatus[0]; } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index fae5d1b5f62ab..d18233a8e9741 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileChecksum; +import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsServerDefaults; @@ -236,7 +237,8 @@ protected AbstractFileSystem getTargetFileSystem(final URI uri) @Override protected AbstractFileSystem getTargetFileSystem( final INodeDir dir) throws URISyntaxException { - return new InternalDirOfViewFs(dir, creationTime, ugi, getUri()); + return new InternalDirOfViewFs(dir, creationTime, ugi, getUri(), this, + config); } @Override @@ -455,6 +457,11 @@ public LocatedFileStatus getViewFsFileStatus(LocatedFileStatus stat, /** * {@inheritDoc} * + * Note: listStatus considers listing from fallbackLink if available. If the + * same directory path is present in configured mount path as well as in + * fallback fs, then only the fallback path will be listed in the returned + * result except for link. + * * If any of the the immediate children of the given path f is a symlink(mount * link), the returned FileStatus object of that children would be represented * as a symlink. It will not be resolved to the target path and will not get @@ -880,15 +887,20 @@ static class InternalDirOfViewFs extends AbstractFileSystem { final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable final URI myUri; // the URI of the outer ViewFs - + private InodeTree fsState; + private Configuration conf; + public InternalDirOfViewFs(final InodeTree.INodeDir dir, - final long cTime, final UserGroupInformation ugi, final URI uri) + final long cTime, final UserGroupInformation ugi, final URI uri, + InodeTree fsState, Configuration conf) throws URISyntaxException { super(FsConstants.VIEWFS_URI, FsConstants.VIEWFS_SCHEME, false, -1); theInternalDir = dir; creationTime = cTime; this.ugi = ugi; myUri = uri; + this.fsState = fsState; + this.conf = conf; } static private void checkPathIsSlash(final Path f) throws IOException { @@ -1015,7 +1027,8 @@ public int getUriDefaultPort() { public FileStatus[] listStatus(final Path f) throws IOException { checkPathIsSlash(f); FileStatus[] fallbackStatuses = listStatusForFallbackLink(); - FileStatus[] result = new FileStatus[theInternalDir.getChildren().size()]; + Set linkStatuses = new HashSet<>(); + Set internalDirStatuses = new HashSet<>(); int i = 0; for (Entry> iEntry : theInternalDir.getChildren().entrySet()) { @@ -1029,11 +1042,10 @@ public FileStatus[] listStatus(final Path f) throws IOException { // To maintain backward compatibility, with default option(showing // mount links as symlinks), we will represent target link as // symlink and rest other properties are belongs to mount link only. - result[i++] = + linkStatuses.add( new FileStatus(0, false, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getShortUserName(), - ugi.getPrimaryGroupName(), link.getTargetLink(), - path); + ugi.getPrimaryGroupName(), link.getTargetLink(), path)); continue; } @@ -1049,11 +1061,12 @@ public FileStatus[] listStatus(final Path f) throws IOException { FileStatus status = ((ChRootedFs) link.getTargetFileSystem()).getMyFs() .getFileStatus(new Path(linkedPath)); - result[i++] = new FileStatus(status.getLen(), status.isDirectory(), - status.getReplication(), status.getBlockSize(), - status.getModificationTime(), status.getAccessTime(), - status.getPermission(), status.getOwner(), status.getGroup(), - null, path); + linkStatuses.add( + new FileStatus(status.getLen(), status.isDirectory(), + status.getReplication(), status.getBlockSize(), + status.getModificationTime(), status.getAccessTime(), + status.getPermission(), status.getOwner(), + status.getGroup(), null, path)); } catch (FileNotFoundException ex) { LOG.warn("Cannot get one of the children's(" + path + ") target path(" + link.getTargetFileSystem().getUri() @@ -1061,51 +1074,62 @@ public FileStatus[] listStatus(final Path f) throws IOException { throw ex; } } else { - result[i++] = + internalDirStatuses.add( new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getShortUserName(), - ugi.getGroupNames()[0], path); + ugi.getPrimaryGroupName(), path)); } } + + FileStatus[] internalDirStatusesMergedWithFallBack = internalDirStatuses + .toArray(new FileStatus[internalDirStatuses.size()]); if (fallbackStatuses.length > 0) { - return consolidateFileStatuses(fallbackStatuses, result); - } else { - return result; + internalDirStatusesMergedWithFallBack = + merge(fallbackStatuses, internalDirStatusesMergedWithFallBack); } + + // Links will always have precedence than internalDir or fallback paths. + return merge(linkStatuses.toArray(new FileStatus[linkStatuses.size()]), + internalDirStatusesMergedWithFallBack); } - private FileStatus[] consolidateFileStatuses(FileStatus[] fallbackStatuses, - FileStatus[] mountPointStatuses) { + private FileStatus[] merge(FileStatus[] toStatuses, + FileStatus[] fromStatuses) { ArrayList result = new ArrayList<>(); Set pathSet = new HashSet<>(); - for (FileStatus status : mountPointStatuses) { + for (FileStatus status : toStatuses) { result.add(status); pathSet.add(status.getPath().getName()); } - for (FileStatus status : fallbackStatuses) { + for (FileStatus status : fromStatuses) { if (!pathSet.contains(status.getPath().getName())) { result.add(status); } } - return result.toArray(new FileStatus[0]); + return result.toArray(new FileStatus[result.size()]); } private FileStatus[] listStatusForFallbackLink() throws IOException { - if (theInternalDir.isRoot() && - theInternalDir.getFallbackLink() != null) { - AbstractFileSystem linkedFs = - theInternalDir.getFallbackLink().getTargetFileSystem(); - // Fallback link is only applicable for root - FileStatus[] statuses = linkedFs.listStatus(new Path("/")); - for (FileStatus status : statuses) { - // Fix the path back to viewfs scheme - status.setPath( - new Path(myUri.toString(), status.getPath().getName())); + if (fsState.getRootFallbackLink() != null) { + AbstractFileSystem linkedFallbackFs = + fsState.getRootFallbackLink().getTargetFileSystem(); + Path p = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + if (theInternalDir.isRoot() || FileContext + .getFileContext(linkedFallbackFs, conf).util().exists(p)) { + // Fallback link is only applicable for root + FileStatus[] statuses = linkedFallbackFs.listStatus(p); + for (FileStatus status : statuses) { + // Fix the path back to viewfs scheme + Path pathFromConfiguredFallbackRoot = + new Path(p, status.getPath().getName()); + status.setPath( + new Path(myUri.toString(), pathFromConfiguredFallbackRoot)); + } + return statuses; } - return statuses; - } else { - return new FileStatus[0]; } + return new FileStatus[0]; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java index 7266ad7b52768..f7f5453cce05d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.viewfs; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -36,6 +37,7 @@ import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; @@ -343,8 +345,8 @@ public void testListingWithFallbackLinkWithSameMountDirectories() afterFallback.add(stat.getPath()); } afterFallback.removeAll(beforeFallback); - assertTrue("The same directory name in fallback link should be shaded", - afterFallback.size() == 1); + assertEquals("The same directory name in fallback link should be shaded", + 1, afterFallback.size()); Path[] fallbackArray = new Path[afterFallback.size()]; // Only user1 should be listed as fallback link Path expected = new Path(viewFsUri.toString(), "user1"); @@ -359,4 +361,249 @@ public void testListingWithFallbackLinkWithSameMountDirectories() assertTrue(vfs.getFileStatus(childDir).isDirectory()); } } + + /** + * Tests ListStatus on non-link parent with fallback configured. + * =============================Example.====================================== + * ===== Fallback path tree =============== Mount Path Tree ================== + * =========================================================================== + * * / ***** / ***************** + * * / ***** / ***************** + * * user1 ***** user1 ***************** + * * / ***** / ***************** + * * hive ***** hive ***************** + * * / \ ***** / ***************** + * * warehouse warehouse1 ***** warehouse ***************** + * * (-rwxr--r--) ***** (-r-xr--r--) ***************** + * * / ***** / ***************** + * * partition-0 ***** partition-0 ***************** + * =========================================================================== + * =========================================================================== + * *** ls /user1/hive ********* + * *** viewfs://default/user1/hive/warehouse (-rwxr--r--) ********* + * *** viewfs://default/user1/hive/warehouse1 ********* + * =========================================================================== + */ + @Test + public void testListingWithFallbackLinkWithSameMountDirectoryTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + // Creating multiple directories path under the fallback directory. + // "/user1/hive/warehouse/partition-0" directory already exists as + // configured mount point. + Path dir1 = new Path(targetTestRoot, + "fallbackDir/user1/hive/warehouse/partition-0"); + Path dir2 = new Path(targetTestRoot, "fallbackDir/user1/hive/warehouse1"); + fsTarget.mkdirs(dir1); + fsTarget.mkdirs(dir2); + fsTarget.setPermission(new Path(targetTestRoot, "fallbackDir/user1/hive/"), + FsPermission.valueOf("-rwxr--r--")); + URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null); + + HashSet beforeFallback = new HashSet<>(); + try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + for (FileStatus stat : vfs + .listStatus(new Path(viewFsUri.toString(), "/user1/hive/"))) { + beforeFallback.add(stat.getPath()); + } + } + ConfigUtil + .addLinkFallback(conf, new Path(targetTestRoot, "fallbackDir").toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + HashSet afterFallback = new HashSet<>(); + for (FileStatus stat : vfs + .listStatus(new Path(viewFsUri.toString(), "/user1/hive/"))) { + afterFallback.add(stat.getPath()); + if (dir1.getName().equals(stat.getPath().getName())) { + // make sure fallback dir listed out with correct permissions, but not + // with link permissions. + assertEquals(FsPermission.valueOf("-rwxr--r--"), + stat.getPermission()); + } + } + // + //viewfs://default/user1/hive/warehouse + afterFallback.removeAll(beforeFallback); + assertEquals("The same directory name in fallback link should be shaded", + 1, afterFallback.size()); + } + } + + /** + * Tests ListStatus on link parent with fallback configured. + * =============================Example.====================================== + * ===== Fallback path tree =============== Mount Path Tree ================== + * =========================================================================== + * * / ***** / ********** + * * / ***** / ********** + * * user1 ***** user1 ********** + * * / ***** / ********** + * * hive ***** hive ********** + * * / \ ***** / ********** + * * warehouse warehouse1 ***** warehouse ********** + * * (-rwxr--r--) ***** (-r-xr--r--) ********** + * * / ***** / ********** + * * partition-0 ***** partition-0 ---> targetTestRoot ********** + * * ***** (-r-xr--r--) (-rwxr--rw-) ********** + * =========================================================================== + * =========================================================================== + * *** ls /user1/hive/warehouse ** + * *** viewfs://default/user1/hive/warehouse/partition-0 (-rwxr--rw-) ** + * =========================================================================== + */ + @Test + public void testLSOnLinkParentWithFallbackLinkWithSameMountDirectoryTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + // Creating multiple directories path under the fallback directory. + // "/user1/hive/warehouse/partition-0" directory already exists as + // configured mount point. + Path dir1 = new Path(targetTestRoot, + "fallbackDir/user1/hive/warehouse/partition-0"); + Path dir2 = new Path(targetTestRoot, "fallbackDir/user1/hive/warehouse1"); + fsTarget.mkdirs(dir1); + fsTarget.mkdirs(dir2); + fsTarget.setPermission(new Path(targetTestRoot, + "fallbackDir/user1/hive/warehouse/partition-0"), + FsPermission.valueOf("-rwxr--r--")); + fsTarget.setPermission(targetTestRoot, FsPermission.valueOf("-rwxr--rw-")); + URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null); + + HashSet beforeFallback = new HashSet<>(); + try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + for (FileStatus stat : vfs.listStatus( + new Path(viewFsUri.toString(), "/user1/hive/warehouse/"))) { + beforeFallback.add(stat.getPath()); + } + } + ConfigUtil + .addLinkFallback(conf, new Path(targetTestRoot, "fallbackDir").toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + HashSet afterFallback = new HashSet<>(); + for (FileStatus stat : vfs.listStatus( + new Path(viewFsUri.toString(), "/user1/hive/warehouse/"))) { + afterFallback.add(stat.getPath()); + if (dir1.getName().equals(stat.getPath().getName())) { + // make sure fallback dir listed out with correct permissions, but not + // with link permissions. + assertEquals(FsPermission.valueOf("-rwxr--rw-"), + stat.getPermission()); + } + } + afterFallback.removeAll(beforeFallback); + assertEquals("Just to make sure paths are same.", 0, + afterFallback.size()); + } + } + + /** + * Tests ListStatus on root with fallback configured. + * =============================Example.======================================= + * ===== Fallback path tree =============== Mount Path Tree ================== + * =========================================================================== + * * / / ***** / *** + * * / / ***** / *** + * * user1 user2 ***** user1 ---> targetTestRoot *** + * *(-r-xr--r--) (-r-xr--r--) ***** (-rwxr--rw-) *** + * =========================================================================== + * =========================================================================== + * *** ls /user1/hive/warehouse ** + * *** viewfs://default/user1(-rwxr--rw-) ** + * *** viewfs://default/user2(-r-xr--r--) ** + * =========================================================================== + */ + @Test + public void testLSOnRootWithFallbackLinkWithSameMountDirectories() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil + .addLink(conf, "/user1", new Path(targetTestRoot.toString()).toUri()); + // Creating multiple directories path under the fallback directory. + // "/user1" directory already exists as configured mount point. + Path dir1 = new Path(targetTestRoot, "fallbackDir/user1"); + Path dir2 = new Path(targetTestRoot, "fallbackDir/user2"); + fsTarget.mkdirs(dir1); + fsTarget.mkdirs(dir2, FsPermission.valueOf("-rwxr--r--")); + fsTarget.setPermission(targetTestRoot, FsPermission.valueOf("-rwxr--rw-")); + URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null); + + HashSet beforeFallback = new HashSet<>(); + try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + for (FileStatus stat : vfs + .listStatus(new Path(viewFsUri.toString(), "/"))) { + beforeFallback.add(stat.getPath()); + } + } + ConfigUtil + .addLinkFallback(conf, new Path(targetTestRoot, "fallbackDir").toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + HashSet afterFallback = new HashSet<>(); + for (FileStatus stat : vfs + .listStatus(new Path(viewFsUri.toString(), "/"))) { + afterFallback.add(stat.getPath()); + if (dir1.getName().equals(stat.getPath().getName())) { + // make sure fallback dir listed out with correct permissions, but not + // with link permissions. + assertEquals(FsPermission.valueOf("-rwxr--rw-"), + stat.getPermission()); + } else { + assertEquals("Path is: " + stat.getPath(), + FsPermission.valueOf("-rwxr--r--"), stat.getPermission()); + } + } + afterFallback.removeAll(beforeFallback); + assertEquals(1, afterFallback.size()); + assertEquals("/user2 dir from fallback should be listed.", "user2", + afterFallback.iterator().next().getName()); + } + } + + @Test + public void testLSOnLinkParentWhereMountLinkMatchesWithAFileUnderFallback() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, true); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/part-0", + new Path(targetTestRoot.toString()).toUri()); + // Create a file path in fallback matching to the path of mount link. + Path file1 = + new Path(targetTestRoot, "fallbackDir/user1/hive/warehouse/part-0"); + fsTarget.createNewFile(file1); + Path dir2 = new Path(targetTestRoot, "fallbackDir/user1/hive/warehouse1"); + fsTarget.mkdirs(dir2); + URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null); + + ConfigUtil + .addLinkFallback(conf, new Path(targetTestRoot, "fallbackDir").toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + for (FileStatus stat : vfs.listStatus( + new Path(viewFsUri.toString(), "/user1/hive/warehouse/"))) { + if (file1.getName().equals(stat.getPath().getName())) { + // Link represents as symlink. + assertFalse(stat.isFile()); + assertFalse(stat.isDirectory()); + assertTrue(stat.isSymlink()); + Path fileUnderDir = new Path(stat.getPath(), "check"); + assertTrue(vfs.mkdirs(fileUnderDir)); // Creating dir under target + assertTrue(fsTarget + .exists(new Path(targetTestRoot, fileUnderDir.getName()))); + } + } + } + } } From fa14e4bc001e28d9912e8d985d09bab75aedb87c Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Tue, 23 Jun 2020 16:13:43 +0800 Subject: [PATCH 049/131] HADOOP-17068. Client fails forever when namenode ipaddr changed. Contributed by Sean Chow. --- .../src/main/java/org/apache/hadoop/ipc/Client.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 688eed647c209..6240f859cf786 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -649,6 +649,7 @@ private synchronized boolean updateAddress() throws IOException { private synchronized void setupConnection( UserGroupInformation ticket) throws IOException { + LOG.debug("Setup connection to " + server.toString()); short ioFailures = 0; short timeoutFailures = 0; while (true) { @@ -711,8 +712,16 @@ private synchronized void setupConnection( } catch (IOException ie) { if (updateAddress()) { timeoutFailures = ioFailures = 0; + try { + // HADOOP-17068: when server changed, ignore the exception. + handleConnectionFailure(ioFailures++, ie); + } catch (IOException ioe) { + LOG.warn("Exception when handle ConnectionFailure: " + + ioe.getMessage()); + } + } else { + handleConnectionFailure(ioFailures++, ie); } - handleConnectionFailure(ioFailures++, ie); } } } @@ -1277,7 +1286,7 @@ private synchronized void close() { cleanupCalls(); } } else { - // log the info + // Log the newest server information if update address. if (LOG.isDebugEnabled()) { LOG.debug("closing ipc connection to " + server + ": " + closeException.getMessage(),closeException); From 03f855e3e7a4505362e221c8a07ae9317af773d0 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Tue, 23 Jun 2020 12:13:04 +0200 Subject: [PATCH 050/131] YARN-10316. FS-CS converter: convert maxAppsDefault, maxRunningApps settings. Contributed by Peter Bacsko --- .../FSConfigToCSConfigConverter.java | 47 +++++++----- .../FSConfigToCSConfigRuleHandler.java | 8 -- .../fair/converter/FSQueueConverter.java | 10 +-- .../TestFSConfigToCSConfigConverter.java | 74 ++++++++++--------- .../TestFSConfigToCSConfigRuleHandler.java | 6 -- .../fair/converter/TestFSQueueConverter.java | 6 +- 6 files changed, 76 insertions(+), 75 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java index 5acf356725211..a8862e9af3609 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java @@ -83,6 +83,9 @@ public class FSConfigToCSConfigConverter { private boolean preemptionEnabled = false; private int queueMaxAppsDefault; private float queueMaxAMShareDefault; + private Map userMaxApps; + private int userMaxAppsDefault; + private boolean autoCreateChildQueues = false; private boolean sizeBasedWeight = false; private boolean userAsDefaultQueue = false; @@ -99,6 +102,8 @@ public class FSConfigToCSConfigConverter { private boolean consoleMode = false; private boolean convertPlacementRules = false; + + public FSConfigToCSConfigConverter(FSConfigToCSConfigRuleHandler ruleHandler, ConversionOptions conversionOptions) { this.ruleHandler = ruleHandler; @@ -242,14 +247,13 @@ void convert(Configuration inputYarnSiteConfig) throws Exception { AllocationConfiguration allocConf = fs.getAllocationConfiguration(); queueMaxAppsDefault = allocConf.getQueueMaxAppsDefault(); + userMaxAppsDefault = allocConf.getUserMaxAppsDefault(); + userMaxApps = allocConf.getUserMaxApps(); queueMaxAMShareDefault = allocConf.getQueueMaxAMShareDefault(); convertedYarnSiteConfig = new Configuration(false); capacitySchedulerConfig = new Configuration(false); - checkUserMaxApps(allocConf); - checkUserMaxAppsDefault(allocConf); - convertYarnSiteXml(inputYarnSiteConfig, havePlacementPolicies); convertCapacitySchedulerXml(fs); @@ -287,7 +291,9 @@ private void convertYarnSiteXml(Configuration inputYarnSiteConfig, private void convertCapacitySchedulerXml(FairScheduler fs) { FSParentQueue rootQueue = fs.getQueueManager().getRootQueue(); - emitDefaultMaxApplications(); + emitDefaultQueueMaxParallelApplications(); + emitDefaultUserMaxParallelApplications(); + emitUserMaxParallelApplications(); emitDefaultMaxAMShare(); FSQueueConverter queueConverter = FSQueueConverterBuilder.create() @@ -322,14 +328,30 @@ private void convertCapacitySchedulerXml(FairScheduler fs) { } } - private void emitDefaultMaxApplications() { + private void emitDefaultQueueMaxParallelApplications() { if (queueMaxAppsDefault != Integer.MAX_VALUE) { capacitySchedulerConfig.set( - CapacitySchedulerConfiguration.MAXIMUM_SYSTEM_APPLICATIONS, + PREFIX + "max-parallel-apps", String.valueOf(queueMaxAppsDefault)); } } + private void emitDefaultUserMaxParallelApplications() { + if (userMaxAppsDefault != Integer.MAX_VALUE) { + capacitySchedulerConfig.set( + PREFIX + "user.max-parallel-apps", + String.valueOf(userMaxAppsDefault)); + } + } + + private void emitUserMaxParallelApplications() { + userMaxApps + .forEach((user, apps) -> { + capacitySchedulerConfig.setInt( + PREFIX + "user." + user + ".max-parallel-apps", apps); + }); + } + private void emitDefaultMaxAMShare() { if (queueMaxAMShareDefault == QUEUE_MAX_AM_SHARE_DISABLED) { capacitySchedulerConfig.setFloat( @@ -374,19 +396,6 @@ private void checkReservationSystem(Configuration conf) { } } - private void checkUserMaxApps(AllocationConfiguration allocConf) { - if (allocConf.getUserMaxApps() != null - && allocConf.getUserMaxApps().size() > 0) { - ruleHandler.handleUserMaxApps(); - } - } - - private void checkUserMaxAppsDefault(AllocationConfiguration allocConf) { - if (allocConf.getUserMaxAppsDefault() > 0) { - ruleHandler.handleUserMaxAppsDefault(); - } - } - private boolean isDrfUsed(FairScheduler fs) { FSQueue rootQueue = fs.getQueueManager().getRootQueue(); AllocationConfiguration allocConf = fs.getAllocationConfiguration(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigRuleHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigRuleHandler.java index 3a2d363ca1df5..b8a65fbe94f02 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigRuleHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigRuleHandler.java @@ -170,14 +170,6 @@ public void handleChildQueueCount(String queue, int count) { } } - public void handleUserMaxApps() { - handle(USER_MAX_RUNNING_APPS, "", null); - } - - public void handleUserMaxAppsDefault() { - handle(USER_MAX_APPS_DEFAULT, "", null); - } - public void handleDynamicMaxAssign() { handle(DYNAMIC_MAX_ASSIGN, FairSchedulerConfiguration.DYNAMIC_MAX_ASSIGN, null); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSQueueConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSQueueConverter.java index 2b3f28495d7d7..cc52309b47bcd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSQueueConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSQueueConverter.java @@ -43,7 +43,7 @@ */ public class FSQueueConverter { public static final float QUEUE_MAX_AM_SHARE_DISABLED = -1.0f; - private static final int MAX_RUNNING_APPS_UNSET = Integer.MIN_VALUE; + private static final int MAX_RUNNING_APPS_UNSET = Integer.MAX_VALUE; private static final String FAIR_POLICY = "fair"; private static final String FIFO_POLICY = "fifo"; @@ -79,7 +79,7 @@ public void convertQueueHierarchy(FSQueue queue) { emitChildQueues(queueName, children); emitMaxAMShare(queueName, queue); - emitMaxRunningApps(queueName, queue); + emitMaxParallelApps(queueName, queue); emitMaxAllocations(queueName, queue); emitPreemptionDisabled(queueName, queue); @@ -138,14 +138,14 @@ private void emitMaxAMShare(String queueName, FSQueue queue) { /** * <maxRunningApps> - * ==> yarn.scheduler.capacity.<queue-name>.maximum-applications. + * ==> yarn.scheduler.capacity.<queue-name>.max-parallel-apps. * @param queueName * @param queue */ - private void emitMaxRunningApps(String queueName, FSQueue queue) { + private void emitMaxParallelApps(String queueName, FSQueue queue) { if (queue.getMaxRunningApps() != MAX_RUNNING_APPS_UNSET && queue.getMaxRunningApps() != queueMaxAppsDefault) { - capacitySchedulerConfig.set(PREFIX + queueName + ".maximum-applications", + capacitySchedulerConfig.set(PREFIX + queueName + ".max-parallel-apps", String.valueOf(queue.getMaxRunningApps())); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java index 46e1fb39ad622..141a4f88c5a0a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java @@ -155,17 +155,7 @@ private void createConverter() { .withOutputDirectory(FSConfigConverterTestCommons.OUTPUT_DIR); } - @Test - public void testDefaultMaxApplications() throws Exception { - converter.convert(config); - - Configuration conf = converter.getCapacitySchedulerConfig(); - int maxApps = - conf.getInt( - CapacitySchedulerConfiguration.MAXIMUM_SYSTEM_APPLICATIONS, -1); - assertEquals("Default max apps", 15, maxApps); - } @Test public void testDefaultMaxAMShare() throws Exception { @@ -252,57 +242,73 @@ public void testConvertACLs() throws Exception { } @Test - public void testDefaultMaxRunningApps() throws Exception { + public void testDefaultQueueMaxParallelApps() throws Exception { converter.convert(config); Configuration conf = converter.getCapacitySchedulerConfig(); - // default setting - assertEquals("Default max apps", 15, - conf.getInt(PREFIX + "maximum-applications", -1)); + assertEquals("Default max parallel apps", 15, + conf.getInt(PREFIX + "max-parallel-apps", -1)); } @Test - public void testQueueMaxChildCapacityNotSupported() throws Exception { - expectedException.expect(UnsupportedPropertyException.class); - expectedException.expectMessage("test"); + public void testSpecificQueueMaxParallelApps() throws Exception { + converter.convert(config); - Mockito.doThrow(new UnsupportedPropertyException("test")) - .when(ruleHandler).handleMaxChildCapacity(); + Configuration conf = converter.getCapacitySchedulerConfig(); - converter.convert(config); + assertEquals("root.admins.alice max parallel apps", 2, + conf.getInt(PREFIX + "root.admins.alice.max-parallel-apps", -1)); } @Test - public void testReservationSystemNotSupported() throws Exception { - expectedException.expect(UnsupportedPropertyException.class); - expectedException.expectMessage("maxCapacity"); + public void testDefaultUserMaxParallelApps() throws Exception { + converter.convert(config); - Mockito.doThrow(new UnsupportedPropertyException("maxCapacity")) - .when(ruleHandler).handleMaxChildCapacity(); - config.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true); + Configuration conf = converter.getCapacitySchedulerConfig(); + int userMaxParallelApps = + conf.getInt( + PREFIX + "user.max-parallel-apps", -1); + + assertEquals("Default user max parallel apps", 10, + userMaxParallelApps); + } + @Test + public void testSpecificUserMaxParallelApps() throws Exception { converter.convert(config); + + Configuration conf = converter.getCapacitySchedulerConfig(); + + assertEquals("Max parallel apps for alice", 30, + conf.getInt(PREFIX + "user.alice.max-parallel-apps", -1)); + assertNull("Max parallel apps should be undefined for user bob", + conf.get(PREFIX + "user.bob.max-parallel-apps")); + assertNull("Max parallel apps should be undefined for user joe", + conf.get(PREFIX + "user.joe.max-parallel-apps")); + assertNull("Max parallel apps should be undefined for user john", + conf.get(PREFIX + "user.john.max-parallel-apps")); } @Test - public void testUserMaxAppsNotSupported() throws Exception { + public void testQueueMaxChildCapacityNotSupported() throws Exception { expectedException.expect(UnsupportedPropertyException.class); - expectedException.expectMessage("userMaxApps"); + expectedException.expectMessage("test"); - Mockito.doThrow(new UnsupportedPropertyException("userMaxApps")) - .when(ruleHandler).handleUserMaxApps(); + Mockito.doThrow(new UnsupportedPropertyException("test")) + .when(ruleHandler).handleMaxChildCapacity(); converter.convert(config); } @Test - public void testUserMaxAppsDefaultNotSupported() throws Exception { + public void testReservationSystemNotSupported() throws Exception { expectedException.expect(UnsupportedPropertyException.class); - expectedException.expectMessage("userMaxAppsDefault"); + expectedException.expectMessage("maxCapacity"); - Mockito.doThrow(new UnsupportedPropertyException("userMaxAppsDefault")) - .when(ruleHandler).handleUserMaxAppsDefault(); + Mockito.doThrow(new UnsupportedPropertyException("maxCapacity")) + .when(ruleHandler).handleMaxChildCapacity(); + config.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true); converter.convert(config); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigRuleHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigRuleHandler.java index b563e64364915..d1eee29118142 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigRuleHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigRuleHandler.java @@ -76,8 +76,6 @@ public void testInitPropertyActionsToWarning() throws IOException { ruleHandler.handleQueueAutoCreate("test"); ruleHandler.handleReservationSystem(); ruleHandler.handleSpecifiedNotFirstRule(); - ruleHandler.handleUserMaxApps(); - ruleHandler.handleUserMaxAppsDefault(); } @Test @@ -106,8 +104,6 @@ public void testAllRulesWarning() throws IOException { ruleHandler.handleQueueAutoCreate("test"); ruleHandler.handleReservationSystem(); ruleHandler.handleSpecifiedNotFirstRule(); - ruleHandler.handleUserMaxApps(); - ruleHandler.handleUserMaxAppsDefault(); } @Test @@ -140,8 +136,6 @@ public void testAllRulesAbort() throws IOException { expectAbort(() -> ruleHandler.handleQueueAutoCreate("test")); expectAbort(() -> ruleHandler.handleReservationSystem()); expectAbort(() -> ruleHandler.handleSpecifiedNotFirstRule()); - expectAbort(() -> ruleHandler.handleUserMaxApps()); - expectAbort(() -> ruleHandler.handleUserMaxAppsDefault()); expectAbort(() -> ruleHandler.handleFairAsDrf("test")); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java index 0ed6bbf202443..dfbd532fb0d31 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java @@ -195,18 +195,18 @@ public void testQueueMaxAMShare() { } @Test - public void testQueueMaxRunningApps() { + public void testQueueMaxParallelApps() { converter = builder.build(); converter.convertQueueHierarchy(rootQueue); assertEquals("root.admins.alice max apps", 2, - csConfig.getInt(PREFIX + "root.admins.alice.maximum-applications", + csConfig.getInt(PREFIX + "root.admins.alice.max-parallel-apps", -1)); Set remaining = Sets.difference(ALL_QUEUES, Sets.newHashSet("root.admins.alice")); - assertNoValueForQueues(remaining, ".maximum-applications", csConfig); + assertNoValueForQueues(remaining, ".max-parallel-apps", csConfig); } @Test From 84110d850e2bc2a9ff4afcc7508fecd81cb5b7e5 Mon Sep 17 00:00:00 2001 From: lfengnan Date: Tue, 23 Jun 2020 13:12:29 -0700 Subject: [PATCH 051/131] HDFS-15383. RBF: Add support for router delegation token without watch (#2047) Improving router's performance for delegation tokens related operations. It achieves the goal by removing watchers from router on tokens since based on our experience. The huge number of watches inside Zookeeper is degrading Zookeeper's performance pretty hard. The current limit is about 1.2-1.5 million. --- .../AbstractDelegationTokenSecretManager.java | 6 +- .../ZKDelegationTokenSecretManager.java | 141 ++++++----- .../TestZKDelegationTokenSecretManager.java | 12 +- .../ZKDelegationTokenSecretManagerImpl.java | 174 ++++++++++++- ...estZKDelegationTokenSecretManagerImpl.java | 234 ++++++++++++++++++ 5 files changed, 498 insertions(+), 69 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/token/TestZKDelegationTokenSecretManagerImpl.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java index f329accec7553..3a22cee881070 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java @@ -23,11 +23,11 @@ import java.io.IOException; import java.security.MessageDigest; import java.util.Collection; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import javax.crypto.SecretKey; @@ -63,7 +63,7 @@ private String formatTokenId(TokenIdent id) { * to DelegationTokenInformation. Protected by this object lock. */ protected final Map currentTokens - = new HashMap(); + = new ConcurrentHashMap<>(); /** * Sequence number to create DelegationTokenIdentifier. @@ -75,7 +75,7 @@ private String formatTokenId(TokenIdent id) { * Access to allKeys is protected by this object lock */ protected final Map allKeys - = new HashMap(); + = new ConcurrentHashMap<>(); /** * Access to currentId is protected by this object lock. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java index cd3b8c0c0f279..f50035d03773e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java @@ -55,6 +55,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager; +import static org.apache.hadoop.util.Time.now; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NoNodeException; @@ -79,7 +80,7 @@ public abstract class ZKDelegationTokenSecretManager extends AbstractDelegationTokenSecretManager { - private static final String ZK_CONF_PREFIX = "zk-dt-secret-manager."; + public static final String ZK_CONF_PREFIX = "zk-dt-secret-manager."; public static final String ZK_DTSM_ZK_NUM_RETRIES = ZK_CONF_PREFIX + "zkNumRetries"; public static final String ZK_DTSM_ZK_SESSION_TIMEOUT = ZK_CONF_PREFIX @@ -100,6 +101,9 @@ public abstract class ZKDelegationTokenSecretManager 0) { int keyId = Integer.parseInt(tokSeg.substring(j + 1)); - synchronized (this) { - allKeys.remove(keyId); - } + allKeys.remove(keyId); } } } - private void processTokenAddOrUpdate(ChildData data) throws IOException { - ByteArrayInputStream bin = new ByteArrayInputStream(data.getData()); + protected TokenIdent processTokenAddOrUpdate(byte[] data) throws IOException { + ByteArrayInputStream bin = new ByteArrayInputStream(data); DataInputStream din = new DataInputStream(bin); TokenIdent ident = createIdentifier(); ident.readFields(din); @@ -488,12 +495,10 @@ private void processTokenAddOrUpdate(ChildData data) throws IOException { if (numRead > -1) { DelegationTokenInformation tokenInfo = new DelegationTokenInformation(renewDate, password); - synchronized (this) { - currentTokens.put(ident, tokenInfo); - // The cancel task might be waiting - notifyAll(); - } + currentTokens.put(ident, tokenInfo); + return ident; } + return null; } private void processTokenRemoved(ChildData data) throws IOException { @@ -501,11 +506,7 @@ private void processTokenRemoved(ChildData data) throws IOException { DataInputStream din = new DataInputStream(bin); TokenIdent ident = createIdentifier(); ident.readFields(din); - synchronized (this) { - currentTokens.remove(ident); - // The cancel task might be waiting - notifyAll(); - } + currentTokens.remove(ident); } @Override @@ -706,7 +707,7 @@ protected DelegationTokenInformation getTokenInfo(TokenIdent ident) { * * @param ident Identifier of the token */ - private synchronized void syncLocalCacheWithZk(TokenIdent ident) { + protected void syncLocalCacheWithZk(TokenIdent ident) { try { DelegationTokenInformation tokenInfo = getTokenInfoFromZK(ident); if (tokenInfo != null && !currentTokens.containsKey(ident)) { @@ -720,16 +721,21 @@ private synchronized void syncLocalCacheWithZk(TokenIdent ident) { } } - private DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident) + protected DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident) throws IOException { return getTokenInfoFromZK(ident, false); } - private DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident, + protected DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident, boolean quiet) throws IOException { String nodePath = getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX + ident.getSequenceNumber()); + return getTokenInfoFromZK(nodePath, quiet); + } + + protected DelegationTokenInformation getTokenInfoFromZK(String nodePath, + boolean quiet) throws IOException { try { byte[] data = zkClient.getData().forPath(nodePath); if ((data == null) || (data.length == 0)) { @@ -864,15 +870,30 @@ protected void updateToken(TokenIdent ident, @Override protected void removeStoredToken(TokenIdent ident) throws IOException { + removeStoredToken(ident, false); + } + + protected void removeStoredToken(TokenIdent ident, + boolean checkAgainstZkBeforeDeletion) throws IOException { String nodeRemovePath = getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX + ident.getSequenceNumber()); - if (LOG.isDebugEnabled()) { - LOG.debug("Removing ZKDTSMDelegationToken_" - + ident.getSequenceNumber()); - } try { - if (zkClient.checkExists().forPath(nodeRemovePath) != null) { + DelegationTokenInformation dtInfo = getTokenInfoFromZK(ident, true); + if (dtInfo != null) { + // For the case there is no sync or watch miss, it is possible that the + // local storage has expired tokens which have been renewed by peer + // so double check again to avoid accidental delete + if (checkAgainstZkBeforeDeletion + && dtInfo.getRenewDate() > now()) { + LOG.info("Node already renewed by peer " + nodeRemovePath + + " so this token should not be deleted"); + return; + } + if (LOG.isDebugEnabled()) { + LOG.debug("Removing ZKDTSMDelegationToken_" + + ident.getSequenceNumber()); + } while(zkClient.checkExists().forPath(nodeRemovePath) != null){ try { zkClient.delete().guaranteed().forPath(nodeRemovePath); @@ -895,7 +916,7 @@ protected void removeStoredToken(TokenIdent ident) } @Override - public synchronized TokenIdent cancelToken(Token token, + public TokenIdent cancelToken(Token token, String canceller) throws IOException { ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); DataInputStream in = new DataInputStream(buf); @@ -906,7 +927,7 @@ public synchronized TokenIdent cancelToken(Token token, return super.cancelToken(token, canceller); } - private void addOrUpdateToken(TokenIdent ident, + protected void addOrUpdateToken(TokenIdent ident, DelegationTokenInformation info, boolean isUpdate) throws Exception { String nodeCreatePath = getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX @@ -933,6 +954,10 @@ private void addOrUpdateToken(TokenIdent ident, } } + public boolean isTokenWatcherEnabled() { + return isTokenWatcherEnabled; + } + /** * Simple implementation of an {@link ACLProvider} that simply returns an ACL * that gives all permissions only to a single principal. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java index b2e177976b6d5..643da6a368b64 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java @@ -59,15 +59,15 @@ public class TestZKDelegationTokenSecretManager { private static final Logger LOG = LoggerFactory.getLogger(TestZKDelegationTokenSecretManager.class); - private static final int TEST_RETRIES = 2; + protected static final int TEST_RETRIES = 2; - private static final int RETRY_COUNT = 5; + protected static final int RETRY_COUNT = 5; - private static final int RETRY_WAIT = 1000; + protected static final int RETRY_WAIT = 1000; - private static final long DAY_IN_SECS = 86400; + protected static final long DAY_IN_SECS = 86400; - private TestingServer zkServer; + protected TestingServer zkServer; @Rule public Timeout globalTimeout = new Timeout(300000); @@ -425,7 +425,7 @@ private void verifyACL(CuratorFramework curatorFramework, // cancelled but.. that would mean having to make an RPC call for every // verification request. // Thus, the eventual consistency tradef-off should be acceptable here... - private void verifyTokenFail(DelegationTokenManager tm, + protected void verifyTokenFail(DelegationTokenManager tm, Token token) throws IOException, InterruptedException { verifyTokenFailWithRetry(tm, token, RETRY_COUNT); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java index 4a111187ac46a..2d55026c807af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java @@ -19,13 +19,26 @@ package org.apache.hadoop.hdfs.server.federation.router.security.token; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.ZKDelegationTokenSecretManager; +import org.apache.hadoop.util.Time; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooKeeper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; /** * Zookeeper based router delegation token store implementation. @@ -33,24 +46,181 @@ public class ZKDelegationTokenSecretManagerImpl extends ZKDelegationTokenSecretManager { + public static final String ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL = + ZK_CONF_PREFIX + "router.token.sync.interval"; + public static final int ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL_DEFAULT = 5; + private static final Logger LOG = LoggerFactory.getLogger(ZKDelegationTokenSecretManagerImpl.class); - private Configuration conf = null; + private Configuration conf; + + private final ScheduledExecutorService scheduler = + Executors.newSingleThreadScheduledExecutor(); + + // Local cache of delegation tokens, used for deprecating tokens from + // currentTokenMap + private final Set localTokenCache = + new HashSet<>(); + // Native zk client for getting all tokens + private ZooKeeper zookeeper; + private final String TOKEN_PATH = "/" + zkClient.getNamespace() + + ZK_DTSM_TOKENS_ROOT; + // The flag used to issue an extra check before deletion + // Since cancel token and token remover thread use the same + // API here and one router could have a token that is renewed + // by another router, thus token remover should always check ZK + // to confirm whether it has been renewed or not + private ThreadLocal checkAgainstZkBeforeDeletion = + new ThreadLocal() { + @Override + protected Boolean initialValue() { + return true; + } + }; public ZKDelegationTokenSecretManagerImpl(Configuration conf) { super(conf); this.conf = conf; try { - super.startThreads(); + startThreads(); } catch (IOException e) { LOG.error("Error starting threads for zkDelegationTokens", e); } LOG.info("Zookeeper delegation token secret manager instantiated"); } + @Override + public void startThreads() throws IOException { + super.startThreads(); + // start token cache related work when watcher is disabled + if (!isTokenWatcherEnabled()) { + LOG.info("Watcher for tokens is disabled in this secret manager"); + try { + // By default set this variable + checkAgainstZkBeforeDeletion.set(true); + // Ensure the token root path exists + if (zkClient.checkExists().forPath(ZK_DTSM_TOKENS_ROOT) == null) { + zkClient.create().creatingParentsIfNeeded() + .withMode(CreateMode.PERSISTENT) + .forPath(ZK_DTSM_TOKENS_ROOT); + } + // Set up zookeeper client + try { + zookeeper = zkClient.getZookeeperClient().getZooKeeper(); + } catch (Exception e) { + LOG.info("Cannot get zookeeper client ", e); + } finally { + if (zookeeper == null) { + throw new IOException("Zookeeper client is null"); + } + } + + LOG.info("Start loading token cache"); + long start = Time.now(); + rebuildTokenCache(true); + LOG.info("Loaded token cache in {} milliseconds", Time.now() - start); + + int syncInterval = conf.getInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, + ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL_DEFAULT); + scheduler.scheduleAtFixedRate(new Runnable() { + @Override + public void run() { + try { + rebuildTokenCache(false); + } catch (Exception e) { + // ignore + } + } + }, syncInterval, syncInterval, TimeUnit.SECONDS); + } catch (Exception e) { + LOG.error("Error rebuilding local cache for zkDelegationTokens ", e); + } + } + } + + @Override + public void stopThreads() { + super.stopThreads(); + scheduler.shutdown(); + } + @Override public DelegationTokenIdentifier createIdentifier() { return new DelegationTokenIdentifier(); } + + /** + * This function will rebuild local token cache from zk storage. + * It is first called when the secret manager is initialized and + * then regularly at a configured interval. + * + * @param initial whether this is called during initialization + * @throws IOException + */ + private void rebuildTokenCache(boolean initial) throws IOException { + localTokenCache.clear(); + // Use bare zookeeper client to get all children since curator will + // wrap the same API with a sorting process. This is time consuming given + // millions of tokens + List zkTokens; + try { + zkTokens = zookeeper.getChildren(TOKEN_PATH, false); + } catch (KeeperException | InterruptedException e) { + throw new IOException("Tokens cannot be fetched from path " + + TOKEN_PATH, e); + } + byte[] data; + for (String tokenPath : zkTokens) { + try { + data = zkClient.getData().forPath( + ZK_DTSM_TOKENS_ROOT + "/" + tokenPath); + } catch (KeeperException.NoNodeException e) { + LOG.debug("No node in path [" + tokenPath + "]"); + continue; + } catch (Exception ex) { + throw new IOException(ex); + } + // Store data to currentTokenMap + AbstractDelegationTokenIdentifier ident = processTokenAddOrUpdate(data); + // Store data to localTokenCache for sync + localTokenCache.add(ident); + } + if (!initial) { + // Sync zkTokens with local cache, specifically + // 1) add/update tokens to local cache from zk, which is done through + // processTokenAddOrUpdate above + // 2) remove tokens in local cache but not in zk anymore + for (AbstractDelegationTokenIdentifier ident : currentTokens.keySet()) { + if (!localTokenCache.contains(ident)) { + currentTokens.remove(ident); + } + } + } + } + + @Override + public AbstractDelegationTokenIdentifier cancelToken( + Token token, String canceller) + throws IOException { + checkAgainstZkBeforeDeletion.set(false); + AbstractDelegationTokenIdentifier ident = super.cancelToken(token, + canceller); + checkAgainstZkBeforeDeletion.set(true); + return ident; + } + + @Override + protected void removeStoredToken(AbstractDelegationTokenIdentifier ident) + throws IOException { + super.removeStoredToken(ident, checkAgainstZkBeforeDeletion.get()); + } + + @Override + protected void addOrUpdateToken(AbstractDelegationTokenIdentifier ident, + DelegationTokenInformation info, boolean isUpdate) throws Exception { + // Store the data in local memory first + currentTokens.put(ident, info); + super.addOrUpdateToken(ident, info, isUpdate); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/token/TestZKDelegationTokenSecretManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/token/TestZKDelegationTokenSecretManagerImpl.java new file mode 100644 index 0000000000000..3c7f8e88a91d1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/token/TestZKDelegationTokenSecretManagerImpl.java @@ -0,0 +1,234 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.security.token; + +import static org.apache.hadoop.hdfs.server.federation.router.security.token.ZKDelegationTokenSecretManagerImpl.ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL; +import static org.apache.hadoop.security.token.delegation.ZKDelegationTokenSecretManager.ZK_DTSM_TOKEN_WATCHER_ENABLED; +import static org.apache.hadoop.security.token.delegation.web.DelegationTokenManager.REMOVAL_SCAN_INTERVAL; +import static org.apache.hadoop.security.token.delegation.web.DelegationTokenManager.RENEW_INTERVAL; +import static org.junit.Assert.fail; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.router.security.token.ZKDelegationTokenSecretManagerImpl; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.SecretManager; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.delegation.TestZKDelegationTokenSecretManager; +import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier; +import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager; +import org.apache.hadoop.util.Time; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TestZKDelegationTokenSecretManagerImpl + extends TestZKDelegationTokenSecretManager { + private static final Logger LOG = + LoggerFactory.getLogger(TestZKDelegationTokenSecretManagerImpl.class); + + @SuppressWarnings("unchecked") + @Test + public void testMultiNodeOperationWithoutWatch() throws Exception { + String connectString = zkServer.getConnectString(); + Configuration conf = getSecretConf(connectString); + // disable watch + conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false); + conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 3); + + for (int i = 0; i < TEST_RETRIES; i++) { + ZKDelegationTokenSecretManagerImpl dtsm1 = + new ZKDelegationTokenSecretManagerImpl(conf); + ZKDelegationTokenSecretManagerImpl dtsm2 = + new ZKDelegationTokenSecretManagerImpl(conf); + DelegationTokenManager tm1, tm2; + tm1 = new DelegationTokenManager(conf, new Text("bla")); + tm1.setExternalDelegationTokenSecretManager(dtsm1); + tm2 = new DelegationTokenManager(conf, new Text("bla")); + tm2.setExternalDelegationTokenSecretManager(dtsm2); + + // common token operation without watchers should still be working + Token token = + (Token) tm1.createToken( + UserGroupInformation.getCurrentUser(), "foo"); + Assert.assertNotNull(token); + tm2.verifyToken(token); + tm2.renewToken(token, "foo"); + tm1.verifyToken(token); + tm1.cancelToken(token, "foo"); + try { + verifyTokenFail(tm2, token); + fail("Expected InvalidToken"); + } catch (SecretManager.InvalidToken it) { + // Ignore + } + + token = (Token) tm2.createToken( + UserGroupInformation.getCurrentUser(), "bar"); + Assert.assertNotNull(token); + tm1.verifyToken(token); + tm1.renewToken(token, "bar"); + tm2.verifyToken(token); + tm2.cancelToken(token, "bar"); + try { + verifyTokenFail(tm1, token); + fail("Expected InvalidToken"); + } catch (SecretManager.InvalidToken it) { + // Ignore + } + + dtsm1.stopThreads(); + dtsm2.stopThreads(); + verifyDestroy(tm1, conf); + verifyDestroy(tm2, conf); + } + } + + @Test + public void testMultiNodeTokenRemovalShortSyncWithoutWatch() + throws Exception { + String connectString = zkServer.getConnectString(); + Configuration conf = getSecretConf(connectString); + // disable watch + conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false); + // make sync quick + conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 3); + // set the renew window and removal interval to be a + // short time to trigger the background cleanup + conf.setInt(RENEW_INTERVAL, 10); + conf.setInt(REMOVAL_SCAN_INTERVAL, 10); + + for (int i = 0; i < TEST_RETRIES; i++) { + ZKDelegationTokenSecretManagerImpl dtsm1 = + new ZKDelegationTokenSecretManagerImpl(conf); + ZKDelegationTokenSecretManagerImpl dtsm2 = + new ZKDelegationTokenSecretManagerImpl(conf); + DelegationTokenManager tm1, tm2; + tm1 = new DelegationTokenManager(conf, new Text("bla")); + tm1.setExternalDelegationTokenSecretManager(dtsm1); + tm2 = new DelegationTokenManager(conf, new Text("bla")); + tm2.setExternalDelegationTokenSecretManager(dtsm2); + + // time: X + // token expiry time: + // tm1: X + 10 + // tm2: X + 10 + Token token = + (Token) tm1.createToken( + UserGroupInformation.getCurrentUser(), "foo"); + Assert.assertNotNull(token); + tm2.verifyToken(token); + + // time: X + 9 + // token expiry time: + // tm1: X + 10 + // tm2: X + 19 + Thread.sleep(9 * 1000); + tm2.renewToken(token, "foo"); + tm1.verifyToken(token); + + // time: X + 13 + // token expiry time: (sync happened) + // tm1: X + 19 + // tm2: X + 19 + Thread.sleep(4 * 1000); + tm1.verifyToken(token); + tm2.verifyToken(token); + + dtsm1.stopThreads(); + dtsm2.stopThreads(); + verifyDestroy(tm1, conf); + verifyDestroy(tm2, conf); + } + } + + // This is very unlikely to happen in real case, but worth putting + // the case out + @Test + public void testMultiNodeTokenRemovalLongSyncWithoutWatch() + throws Exception { + String connectString = zkServer.getConnectString(); + Configuration conf = getSecretConf(connectString); + // disable watch + conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false); + // make sync quick + conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 20); + // set the renew window and removal interval to be a + // short time to trigger the background cleanup + conf.setInt(RENEW_INTERVAL, 10); + conf.setInt(REMOVAL_SCAN_INTERVAL, 10); + + for (int i = 0; i < TEST_RETRIES; i++) { + ZKDelegationTokenSecretManagerImpl dtsm1 = + new ZKDelegationTokenSecretManagerImpl(conf); + ZKDelegationTokenSecretManagerImpl dtsm2 = + new ZKDelegationTokenSecretManagerImpl(conf); + ZKDelegationTokenSecretManagerImpl dtsm3 = + new ZKDelegationTokenSecretManagerImpl(conf); + DelegationTokenManager tm1, tm2, tm3; + tm1 = new DelegationTokenManager(conf, new Text("bla")); + tm1.setExternalDelegationTokenSecretManager(dtsm1); + tm2 = new DelegationTokenManager(conf, new Text("bla")); + tm2.setExternalDelegationTokenSecretManager(dtsm2); + tm3 = new DelegationTokenManager(conf, new Text("bla")); + tm3.setExternalDelegationTokenSecretManager(dtsm3); + + // time: X + // token expiry time: + // tm1: X + 10 + // tm2: X + 10 + // tm3: No token due to no sync + Token token = + (Token) tm1.createToken( + UserGroupInformation.getCurrentUser(), "foo"); + Assert.assertNotNull(token); + tm2.verifyToken(token); + + // time: X + 9 + // token expiry time: + // tm1: X + 10 + // tm2: X + 19 + // tm3: No token due to no sync + Thread.sleep(9 * 1000); + long renewalTime = tm2.renewToken(token, "foo"); + LOG.info("Renew for token {} at current time {} renewal time {}", + token.getIdentifier(), Time.formatTime(Time.now()), + Time.formatTime(renewalTime)); + tm1.verifyToken(token); + + // time: X + 13 + // token expiry time: (sync din't happen) + // tm1: X + 10 + // tm2: X + 19 + // tm3: X + 19 due to fetch from zk + Thread.sleep(4 * 1000); + tm2.verifyToken(token); + tm3.verifyToken(token); + + dtsm1.stopThreads(); + dtsm2.stopThreads(); + dtsm3.stopThreads(); + verifyDestroy(tm1, conf); + verifyDestroy(tm2, conf); + verifyDestroy(tm3, conf); + } + } + +} From 4b5b54c73f2fd9146237087a59453e2b5d70f9ed Mon Sep 17 00:00:00 2001 From: Thomas Marquardt Date: Wed, 24 Jun 2020 18:37:25 +0000 Subject: [PATCH 052/131] HADOOP-17089: WASB: Update azure-storage-java SDK Contributed by Thomas Marquardt DETAILS: WASB depends on the Azure Storage Java SDK. There is a concurrency bug in the Azure Storage Java SDK that can cause the results of a list blobs operation to appear empty. This causes the Filesystem listStatus and similar APIs to return empty results. This has been seen in Spark work loads when jobs use more than one executor core. See Azure/azure-storage-java#546 for details on the bug in the Azure Storage SDK. TESTS: A new test was added to validate the fix. All tests are passing: wasb: mvn -T 1C -Dparallel-tests=wasb -Dscale -DtestsThreadCount=8 clean verify Tests run: 248, Failures: 0, Errors: 0, Skipped: 11 Tests run: 651, Failures: 0, Errors: 0, Skipped: 65 abfs: mvn -T 1C -Dparallel-tests=abfs -Dscale -DtestsThreadCount=8 clean verify Tests run: 64, Failures: 0, Errors: 0, Skipped: 0 Tests run: 437, Failures: 0, Errors: 0, Skipped: 33 Tests run: 206, Failures: 0, Errors: 0, Skipped: 24 --- hadoop-project/pom.xml | 2 +- ...tNativeAzureFileSystemConcurrencyLive.java | 59 ++++++++++++++++++- 2 files changed, 58 insertions(+), 3 deletions(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 48928b508e318..4e819cd896a2c 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1419,7 +1419,7 @@ com.microsoft.azure azure-storage - 7.0.0 + 7.0.1 diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java index 1c868ea0ff1e6..2c99b84394f82 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java @@ -20,6 +20,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.junit.Assert; @@ -130,15 +131,56 @@ public void testConcurrentDeleteFile() throws Exception { } } + /** + * Validate the bug fix for HADOOP-17089. Please note that we were never + * able to reproduce this except during a Spark job that ran for multiple days + * and in a hacked-up azure-storage SDK that added sleep before and after + * the call to factory.setNamespaceAware(true) as shown in the description of + * + * @see https://github.com/Azure/azure-storage-java/pull/546 + */ + @Test(timeout = TEST_EXECUTION_TIMEOUT) + public void testConcurrentList() throws Exception { + final Path testDir = new Path("/tmp/data-loss/11230174258112/_temporary/0/_temporary/attempt_20200624190514_0006_m_0"); + final Path testFile = new Path(testDir, "part-00004-15ea87b1-312c-4fdf-1820-95afb3dfc1c3-a010.snappy.parquet"); + fs.create(testFile).close(); + List tasks = new ArrayList<>(THREAD_COUNT); + + for (int i = 0; i < THREAD_COUNT; i++) { + tasks.add(new ListTask(fs, testDir)); + } + + ExecutorService es = null; + try { + es = Executors.newFixedThreadPool(THREAD_COUNT); + + List> futures = es.invokeAll(tasks); + + for (Future future : futures) { + Assert.assertTrue(future.isDone()); + + // we are using Callable, so if an exception + // occurred during the operation, it will be thrown + // when we call get + long fileCount = future.get(); + assertEquals("The list should always contain 1 file.", 1, fileCount); + } + } finally { + if (es != null) { + es.shutdownNow(); + } + } + } + abstract class FileSystemTask implements Callable { private final FileSystem fileSystem; private final Path path; - protected FileSystem getFileSystem() { + FileSystem getFileSystem() { return this.fileSystem; } - protected Path getFilePath() { + Path getFilePath() { return this.path; } @@ -182,4 +224,17 @@ public Void call() throws Exception { return null; } } + + class ListTask extends FileSystemTask { + ListTask(FileSystem fs, Path p) { + super(fs, p); + } + + public Integer call() throws Exception { + FileSystem fs = getFileSystem(); + Path p = getFilePath(); + FileStatus[] files = fs.listStatus(p); + return files.length; + } + } } From 6a8fd73b273629d0c7c071cf4d090f67d9b96fe4 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Thu, 25 Jun 2020 17:27:58 +0200 Subject: [PATCH 053/131] YARN-10279. Avoid unnecessary QueueMappingEntity creations. Contributed by Marton Hudaky --- .../placement/QueuePlacementRuleUtils.java | 7 +------ .../placement/UserGroupMappingPlacementRule.java | 14 ++------------ 2 files changed, 3 insertions(+), 18 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java index 15c8fd8b70811..76e3e275fc9f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java @@ -74,12 +74,7 @@ public static QueueMapping validateAndGetAutoCreatedQueueMapping( validateQueueMappingUnderParentQueue(queueManager.getQueue( mapping.getParentQueue()), mapping.getParentQueue(), mapping.getFullPath()); - return QueueMapping.QueueMappingBuilder.create() - .type(mapping.getType()) - .source(mapping.getSource()) - .parentQueue(mapping.getParentQueue()) - .queue(mapping.getQueue()) - .build(); + return mapping; } return null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java index a68f3c61772d8..71d9bb78805d9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java @@ -485,24 +485,14 @@ private static QueueMapping validateAndGetAutoCreatedQueueMapping( && (mapping.getParentQueue().equals(PRIMARY_GROUP_MAPPING) || mapping.getParentQueue().equals(SECONDARY_GROUP_MAPPING))) { // dynamic parent queue - return QueueMappingBuilder.create() - .type(mapping.getType()) - .source(mapping.getSource()) - .queue(mapping.getQueue()) - .parentQueue(mapping.getParentQueue()) - .build(); + return mapping; } else if (mapping.hasParentQueue()) { //if parent queue is specified, // then it should exist and be an instance of ManagedParentQueue QueuePlacementRuleUtils.validateQueueMappingUnderParentQueue( queueManager.getQueue(mapping.getParentQueue()), mapping.getParentQueue(), mapping.getQueue()); - return QueueMappingBuilder.create() - .type(mapping.getType()) - .source(mapping.getSource()) - .queue(mapping.getQueue()) - .parentQueue(mapping.getParentQueue()) - .build(); + return mapping; } return null; From d5e1bb6155496cf9d82e121dd1b65d0072312197 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Fri, 26 Jun 2020 01:29:38 -0700 Subject: [PATCH 054/131] HDFS-15429. mkdirs should work when parent dir is an internalDir and fallback configured. Contributed by Uma Maheswara Rao G. --- .../hadoop/fs/viewfs/ViewFileSystem.java | 25 ++ .../org/apache/hadoop/fs/viewfs/ViewFs.java | 28 +- .../TestViewFileSystemLinkFallback.java | 229 +++++++++++--- .../fs/viewfs/TestViewFsLinkFallback.java | 297 ++++++++++++++++++ 4 files changed, 542 insertions(+), 37 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 06052b80d9219..56448cb600b61 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -1339,6 +1339,31 @@ public boolean mkdirs(Path dir, FsPermission permission) dir.toString().substring(1))) { return true; // this is the stupid semantics of FileSystem } + + if (this.fsState.getRootFallbackLink() != null) { + FileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + String leafChild = (InodeTree.SlashPath.equals(dir)) ? + InodeTree.SlashPath.toString() : + dir.getName(); + Path dirToCreate = new Path(parent, leafChild); + + try { + return linkedFallbackFs.mkdirs(dirToCreate, permission); + } catch (IOException e) { + if (LOG.isDebugEnabled()) { + StringBuilder msg = + new StringBuilder("Failed to create ").append(dirToCreate) + .append(" at fallback : ") + .append(linkedFallbackFs.getUri()); + LOG.debug(msg.toString(), e); + } + return false; + } + } + throw readOnlyMountTable("mkdirs", dir); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index d18233a8e9741..c769003aacffa 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -1134,11 +1134,35 @@ private FileStatus[] listStatusForFallbackLink() throws IOException { @Override public void mkdir(final Path dir, final FsPermission permission, - final boolean createParent) throws AccessControlException, - FileAlreadyExistsException { + final boolean createParent) throws IOException { if (theInternalDir.isRoot() && dir == null) { throw new FileAlreadyExistsException("/ already exits"); } + + if (this.fsState.getRootFallbackLink() != null) { + AbstractFileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + String leafChild = (InodeTree.SlashPath.equals(dir)) ? + InodeTree.SlashPath.toString() : + dir.getName(); + Path dirToCreate = new Path(parent, leafChild); + try { + // We are here because, the parent dir already exist in the mount + // table internal tree. So, let's create parent always in fallback. + linkedFallbackFs.mkdir(dirToCreate, permission, true); + return; + } catch (IOException e) { + if (LOG.isDebugEnabled()) { + StringBuilder msg = new StringBuilder("Failed to create {}") + .append(" at fallback fs : {}"); + LOG.debug(msg.toString(), dirToCreate, linkedFallbackFs.getUri()); + } + throw e; + } + } + throw readOnlyMountTable("mkdir", dir); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java index f7f5453cce05d..bec261cf3eb37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -69,7 +70,7 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest { "/tmp/TestViewFileSystemLinkFallback"; private final static Logger LOG = LoggerFactory.getLogger( TestViewFileSystemLinkFallback.class); - + private static URI viewFsDefaultClusterUri; @Override protected FileSystemTestHelper createFileSystemHelper() { @@ -93,6 +94,8 @@ public static void clusterSetupAtBeginning() throws IOException, FS_HDFS[i] = cluster.getFileSystem(i); } fsDefault = FS_HDFS[FS_INDEX_DEFAULT]; + viewFsDefaultClusterUri = new URI(FsConstants.VIEWFS_SCHEME, + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null); } @AfterClass @@ -327,21 +330,20 @@ public void testListingWithFallbackLinkWithSameMountDirectories() fsTarget.mkdirs(dir1); fsTarget.mkdirs(dir2); String clusterName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE; - URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, clusterName, - "/", null, null); - HashSet beforeFallback = new HashSet<>(); - try(FileSystem vfs = FileSystem.get(viewFsUri, conf)) { - for (FileStatus stat : vfs.listStatus(new Path(viewFsUri.toString()))) { + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + for (FileStatus stat : vfs + .listStatus(new Path(viewFsDefaultClusterUri.toString()))) { beforeFallback.add(stat.getPath()); } } ConfigUtil.addLinkFallback(conf, clusterName, new Path(targetTestRoot, "fallbackDir").toUri()); - try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { HashSet afterFallback = new HashSet<>(); - for (FileStatus stat : vfs.listStatus(new Path(viewFsUri.toString()))) { + for (FileStatus stat : vfs + .listStatus(new Path(viewFsDefaultClusterUri.toString()))) { afterFallback.add(stat.getPath()); } afterFallback.removeAll(beforeFallback); @@ -349,7 +351,7 @@ public void testListingWithFallbackLinkWithSameMountDirectories() 1, afterFallback.size()); Path[] fallbackArray = new Path[afterFallback.size()]; // Only user1 should be listed as fallback link - Path expected = new Path(viewFsUri.toString(), "user1"); + Path expected = new Path(viewFsDefaultClusterUri.toString(), "user1"); assertEquals("Path did not match", expected, afterFallback.toArray(fallbackArray)[0]); @@ -401,23 +403,21 @@ public void testListingWithFallbackLinkWithSameMountDirectoryTree() fsTarget.mkdirs(dir2); fsTarget.setPermission(new Path(targetTestRoot, "fallbackDir/user1/hive/"), FsPermission.valueOf("-rwxr--r--")); - URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, - Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null); HashSet beforeFallback = new HashSet<>(); - try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { - for (FileStatus stat : vfs - .listStatus(new Path(viewFsUri.toString(), "/user1/hive/"))) { + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + for (FileStatus stat : vfs.listStatus( + new Path(viewFsDefaultClusterUri.toString(), "/user1/hive/"))) { beforeFallback.add(stat.getPath()); } } ConfigUtil .addLinkFallback(conf, new Path(targetTestRoot, "fallbackDir").toUri()); - try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { HashSet afterFallback = new HashSet<>(); - for (FileStatus stat : vfs - .listStatus(new Path(viewFsUri.toString(), "/user1/hive/"))) { + for (FileStatus stat : vfs.listStatus( + new Path(viewFsDefaultClusterUri.toString(), "/user1/hive/"))) { afterFallback.add(stat.getPath()); if (dir1.getName().equals(stat.getPath().getName())) { // make sure fallback dir listed out with correct permissions, but not @@ -426,7 +426,6 @@ public void testListingWithFallbackLinkWithSameMountDirectoryTree() stat.getPermission()); } } - // //viewfs://default/user1/hive/warehouse afterFallback.removeAll(beforeFallback); assertEquals("The same directory name in fallback link should be shaded", @@ -475,23 +474,23 @@ public void testLSOnLinkParentWithFallbackLinkWithSameMountDirectoryTree() "fallbackDir/user1/hive/warehouse/partition-0"), FsPermission.valueOf("-rwxr--r--")); fsTarget.setPermission(targetTestRoot, FsPermission.valueOf("-rwxr--rw-")); - URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, - Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null); HashSet beforeFallback = new HashSet<>(); - try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { for (FileStatus stat : vfs.listStatus( - new Path(viewFsUri.toString(), "/user1/hive/warehouse/"))) { + new Path(viewFsDefaultClusterUri.toString(), + "/user1/hive/warehouse/"))) { beforeFallback.add(stat.getPath()); } } ConfigUtil .addLinkFallback(conf, new Path(targetTestRoot, "fallbackDir").toUri()); - try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { HashSet afterFallback = new HashSet<>(); for (FileStatus stat : vfs.listStatus( - new Path(viewFsUri.toString(), "/user1/hive/warehouse/"))) { + new Path(viewFsDefaultClusterUri.toString(), + "/user1/hive/warehouse/"))) { afterFallback.add(stat.getPath()); if (dir1.getName().equals(stat.getPath().getName())) { // make sure fallback dir listed out with correct permissions, but not @@ -508,7 +507,7 @@ public void testLSOnLinkParentWithFallbackLinkWithSameMountDirectoryTree() /** * Tests ListStatus on root with fallback configured. - * =============================Example.======================================= + * =============================Example.====================================== * ===== Fallback path tree =============== Mount Path Tree ================== * =========================================================================== * * / / ***** / *** @@ -536,23 +535,21 @@ public void testLSOnRootWithFallbackLinkWithSameMountDirectories() fsTarget.mkdirs(dir1); fsTarget.mkdirs(dir2, FsPermission.valueOf("-rwxr--r--")); fsTarget.setPermission(targetTestRoot, FsPermission.valueOf("-rwxr--rw-")); - URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, - Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null); HashSet beforeFallback = new HashSet<>(); - try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { for (FileStatus stat : vfs - .listStatus(new Path(viewFsUri.toString(), "/"))) { + .listStatus(new Path(viewFsDefaultClusterUri.toString(), "/"))) { beforeFallback.add(stat.getPath()); } } ConfigUtil .addLinkFallback(conf, new Path(targetTestRoot, "fallbackDir").toUri()); - try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { HashSet afterFallback = new HashSet<>(); for (FileStatus stat : vfs - .listStatus(new Path(viewFsUri.toString(), "/"))) { + .listStatus(new Path(viewFsDefaultClusterUri.toString(), "/"))) { afterFallback.add(stat.getPath()); if (dir1.getName().equals(stat.getPath().getName())) { // make sure fallback dir listed out with correct permissions, but not @@ -584,15 +581,14 @@ public void testLSOnLinkParentWhereMountLinkMatchesWithAFileUnderFallback() fsTarget.createNewFile(file1); Path dir2 = new Path(targetTestRoot, "fallbackDir/user1/hive/warehouse1"); fsTarget.mkdirs(dir2); - URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, - Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null); ConfigUtil .addLinkFallback(conf, new Path(targetTestRoot, "fallbackDir").toUri()); - try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { for (FileStatus stat : vfs.listStatus( - new Path(viewFsUri.toString(), "/user1/hive/warehouse/"))) { + new Path(viewFsDefaultClusterUri.toString(), + "/user1/hive/warehouse/"))) { if (file1.getName().equals(stat.getPath().getName())) { // Link represents as symlink. assertFalse(stat.isFile()); @@ -606,4 +602,167 @@ public void testLSOnLinkParentWhereMountLinkMatchesWithAFileUnderFallback() } } } + + /** + * Tests that directory making should be successful when the parent directory + * is same as the existent fallback directory. The new dir should be created + * in fallback instead failing. + */ + @Test + public void testMkdirsOfLinkParentWithFallbackLinkWithSameMountDirectoryTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path dir1 = new Path(targetTestRoot, + "fallbackDir/user1/hive/warehouse/partition-0"); + fsTarget.mkdirs(dir1); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path p = new Path("/user1/hive/warehouse/test"); + Path test = Path.mergePaths(fallbackTarget, p); + assertFalse(fsTarget.exists(test)); + assertTrue(vfs.mkdirs(p)); + assertTrue(fsTarget.exists(test)); + } + } + + /** + * Tests that directory making should be successful when attempting to create + * the root directory as it's already exist. + */ + @Test + public void testMkdirsOfRootWithFallbackLinkAndMountWithSameDirTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil + .addLink(conf, "/user1", new Path(targetTestRoot.toString()).toUri()); + Path dir1 = new Path(targetTestRoot, "fallbackDir/user1"); + fsTarget.mkdirs(dir1); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path p = new Path("/"); + Path test = Path.mergePaths(fallbackTarget, p); + assertTrue(fsTarget.exists(test)); + assertTrue(vfs.mkdirs(p)); + assertTrue(fsTarget.exists(test)); + } + } + + /** + * Tests the making of a new directory which is not matching to any of + * internal directory under the root. + */ + @Test + public void testMkdirsOfNewDirWithOutMatchingToMountOrFallbackDirTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + // user2 does not exist in fallback + Path p = new Path("/user2"); + Path test = Path.mergePaths(fallbackTarget, p); + assertFalse(fsTarget.exists(test)); + assertTrue(vfs.mkdirs(p)); + assertTrue(fsTarget.exists(test)); + } + } + + /** + * Tests that when the parent dirs does not exist in fallback but the parent + * dir is same as mount internal directory, then we create parent structure + * (mount internal directory tree structure) in fallback. + */ + @Test + public void testMkdirsWithFallbackLinkWithMountPathMatchingDirExist() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + //user1 does not exist in fallback + Path immediateLevelToInternalDir = new Path("/user1/test"); + Path test = Path.mergePaths(fallbackTarget, immediateLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + assertTrue(vfs.mkdirs(immediateLevelToInternalDir)); + assertTrue(fsTarget.exists(test)); + } + } + + /** + * Tests that when the parent dirs does not exist in fallback but the + * immediate parent dir is not same as mount internal directory, then we + * create parent structure (mount internal directory tree structure) in + * fallback. + */ + @Test + public void testMkdirsOfDeepTreeWithFallbackLinkAndMountPathMatchingDirExist() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + //user1 does not exist in fallback + Path multipleLevelToInternalDir = new Path("/user1/test/test"); + Path test = Path.mergePaths(fallbackTarget, multipleLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + assertTrue(vfs.mkdirs(multipleLevelToInternalDir)); + assertTrue(fsTarget.exists(test)); + } + } + + /** + * Tests that mkdirs should return false when there is a problem with + * fallbackfs. + */ + @Test + public void testMkdirsShouldReturnFalseWhenFallbackFSNotAvailable() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/test", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + //user1/test1 does not exist in fallback + Path nextLevelToInternalDir = new Path("/user1/test1"); + Path test = Path.mergePaths(fallbackTarget, nextLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + // user1 exists in viewFS mount. + assertNotNull(vfs.getFileStatus(new Path("/user1"))); + // user1 does not exists in fallback. + assertFalse(fsTarget.exists(test.getParent())); + cluster.shutdownNameNodes(); // Stopping fallback server + // /user1/test1 does not exist in mount internal dir tree, it would + // attempt to create in fallback. + assertFalse(vfs.mkdirs(nextLevelToInternalDir)); + cluster.restartNameNodes(); + // should return true succeed when fallback fs is back to normal. + assertTrue(vfs.mkdirs(nextLevelToInternalDir)); + assertTrue(fsTarget.exists(test)); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java new file mode 100644 index 0000000000000..49c0957c446d1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java @@ -0,0 +1,297 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.AbstractFileSystem; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Test for viewfs with LinkFallback mount table entries. + */ +public class TestViewFsLinkFallback { + private static FileSystem fsDefault; + private FileSystem fsTarget; + private static MiniDFSCluster cluster; + private static URI viewFsDefaultClusterUri; + private Path targetTestRoot; + + @BeforeClass + public static void clusterSetupAtBeginning() + throws IOException, URISyntaxException { + int nameSpacesCount = 3; + int dataNodesCount = 3; + int fsIndexDefault = 0; + Configuration conf = new Configuration(); + FileSystem[] fsHdfs = new FileSystem[nameSpacesCount]; + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, + true); + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleFederatedTopology( + nameSpacesCount)) + .numDataNodes(dataNodesCount) + .build(); + cluster.waitClusterUp(); + + for (int i = 0; i < nameSpacesCount; i++) { + fsHdfs[i] = cluster.getFileSystem(i); + } + fsDefault = fsHdfs[fsIndexDefault]; + viewFsDefaultClusterUri = new URI(FsConstants.VIEWFS_SCHEME, + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null); + + } + + @AfterClass + public static void clusterShutdownAtEnd() throws Exception { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Before + public void setUp() throws Exception { + fsTarget = fsDefault; + initializeTargetTestRoot(); + } + + private void initializeTargetTestRoot() throws IOException { + targetTestRoot = fsDefault.makeQualified(new Path("/")); + for (FileStatus status : fsDefault.listStatus(targetTestRoot)) { + fsDefault.delete(status.getPath(), true); + } + } + + /** + * Tests that directory making should be successful when the parent directory + * is same as the existent fallback directory. The new dir should be created + * in fallback instead failing. + */ + @Test + public void testMkdirOfLinkParentWithFallbackLinkWithSameMountDirectoryTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path dir1 = new Path(targetTestRoot, + "fallbackDir/user1/hive/warehouse/partition-0"); + fsTarget.mkdirs(dir1); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path p = new Path("/user1/hive/warehouse/test"); + Path test = Path.mergePaths(fallbackTarget, p); + assertFalse(fsTarget.exists(test)); + vfs.mkdir(p, null, true); + assertTrue(fsTarget.exists(test)); + } + + /** + * Tests that directory making should be successful when attempting to create + * the root directory as it's already exist. + */ + @Test + public void testMkdirOfRootWithFallbackLinkAndMountWithSameDirTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil + .addLink(conf, "/user1", new Path(targetTestRoot.toString()).toUri()); + Path dir1 = new Path(targetTestRoot, "fallbackDir/user1"); + fsTarget.mkdirs(dir1); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path p = new Path("/"); + Path test = Path.mergePaths(fallbackTarget, p); + assertTrue(fsTarget.exists(test)); + vfs.mkdir(p, null, true); + assertTrue(fsTarget.exists(test)); + } + + /** + * Tests the making of a new directory which is not matching to any of + * internal directory under the root. + */ + @Test + public void testMkdirOfNewDirWithOutMatchingToMountOrFallbackDirTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + // user2 does not exist in fallback + Path p = new Path("/user2"); + Path test = Path.mergePaths(fallbackTarget, p); + assertFalse(fsTarget.exists(test)); + vfs.mkdir(p, null, true); + assertTrue(fsTarget.exists(test)); + } + + /** + * Tests that when the parent dirs does not exist in fallback but the parent + * dir is same as mount internal directory, then we create parent structure + * (mount internal directory tree structure) in fallback. + */ + @Test + public void testMkdirWithFallbackLinkWithMountPathMatchingDirExist() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + //user1 does not exist in fallback + Path immediateLevelToInternalDir = new Path("/user1/test"); + Path test = Path.mergePaths(fallbackTarget, immediateLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + vfs.mkdir(immediateLevelToInternalDir, null, true); + assertTrue(fsTarget.exists(test)); + } + + /** + * Tests that when the parent dirs does not exist in fallback but the + * immediate parent dir is not same as mount internal directory, then we + * create parent structure (mount internal directory tree structure) in + * fallback. + */ + @Test + public void testMkdirOfDeepTreeWithFallbackLinkAndMountPathMatchingDirExist() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + //user1 does not exist in fallback + Path multipleLevelToInternalDir = new Path("/user1/test/test"); + Path test = Path.mergePaths(fallbackTarget, multipleLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + vfs.mkdir(multipleLevelToInternalDir, null, true); + assertTrue(fsTarget.exists(test)); + } + + /** + * Tests that mkdir with createParent false should still create parent in + * fallback when the same mount dir exist. + */ + @Test + public void testMkdirShouldCreateParentDirInFallbackWhenMountDirExist() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/test", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + AbstractFileSystem vfs = AbstractFileSystem.get(viewFsDefaultClusterUri, + conf); + //user1/hive/test1 does not exist in fallback + Path multipleLevelToInternalDir = new Path("/user1/hive/test1"); + Path test = Path.mergePaths(fallbackTarget, multipleLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + // user1/hive exist in viewFS. + assertNotNull(vfs.getFileStatus(new Path("/user1/hive"))); + // user1/hive does not exists in fallback. + assertFalse(fsTarget.exists(test.getParent())); + + vfs.mkdir(multipleLevelToInternalDir, FsPermission.getDirDefault(), false); + assertTrue(fsTarget.exists(test)); + + } + + /** + * Tests that mkdir should fail with IOE when there is a problem with + * fallbackfs. + */ + @Test + public void testMkdirShouldFailWhenFallbackFSNotAvailable() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/test", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + AbstractFileSystem vfs = AbstractFileSystem.get(viewFsDefaultClusterUri, + conf); + //user1/test1 does not exist in fallback + Path nextLevelToInternalDir = new Path("/user1/test1"); + Path test = Path.mergePaths(fallbackTarget, nextLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + // user1 exists in viewFS mount. + assertNotNull(vfs.getFileStatus(new Path("/user1"))); + // user1 does not exists in fallback. + assertFalse(fsTarget.exists(test.getParent())); + cluster.shutdownNameNodes(); + try { + // /user1/test1 does not exist in mount internal dir tree, it would + // attempt to create in fallback. + vfs.mkdir(nextLevelToInternalDir, FsPermission.getDirDefault(), + false); + Assert.fail("It should throw IOE when fallback fs not available."); + } catch (IOException e) { + cluster.restartNameNodes(); + // should succeed when fallback fs is back to normal. + vfs.mkdir(nextLevelToInternalDir, FsPermission.getDirDefault(), + false); + } + assertTrue(fsTarget.exists(test)); + } + +} From 33d3df7ee2b3942560e9affe1409854ac3e8bb96 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 26 Jun 2020 11:13:11 +0200 Subject: [PATCH 055/131] YARN-10277. CapacityScheduler test TestUserGroupMappingPlacementRule should build proper hierarchy. Contributed by Szilard Nemeth --- .../TestUserGroupMappingPlacementRule.java | 204 +++++++++++++----- 1 file changed, 151 insertions(+), 53 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java index 432c009fbe926..5028ce6c1322b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java @@ -24,7 +24,11 @@ import static org.mockito.Mockito.isNull; import java.util.Arrays; +import java.util.List; +import java.util.Map; +import com.google.common.collect.Maps; +import org.apache.commons.compress.utils.Lists; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.security.Groups; @@ -35,6 +39,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping.MappingType; import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping.QueueMappingBuilder; import org.apache.hadoop.yarn.server.resourcemanager.placement.TestUserGroupMappingPlacementRule.QueueMappingTestData.QueueMappingTestDataBuilder; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AbstractCSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerQueueManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ManagedParentQueue; @@ -45,8 +50,147 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestUserGroupMappingPlacementRule { + private static final Logger LOG = + LoggerFactory.getLogger(TestUserGroupMappingPlacementRule.class); + + private static class MockQueueHierarchyBuilder { + private static final String ROOT = "root"; + private static final String QUEUE_SEP = "."; + private List queuePaths = Lists.newArrayList(); + private List managedParentQueues = Lists.newArrayList(); + private CapacitySchedulerQueueManager queueManager; + + public static MockQueueHierarchyBuilder create() { + return new MockQueueHierarchyBuilder(); + } + + public MockQueueHierarchyBuilder withQueueManager( + CapacitySchedulerQueueManager queueManager) { + this.queueManager = queueManager; + return this; + } + + public MockQueueHierarchyBuilder withQueue(String queue) { + this.queuePaths.add(queue); + return this; + } + + public MockQueueHierarchyBuilder withManagedParentQueue( + String managedQueue) { + this.managedParentQueues.add(managedQueue); + return this; + } + + public void build() { + if (this.queueManager == null) { + throw new IllegalStateException( + "QueueManager instance is not provided!"); + } + + for (String managedParentQueue : managedParentQueues) { + if (!queuePaths.contains(managedParentQueue)) { + queuePaths.add(managedParentQueue); + } else { + throw new IllegalStateException("Cannot add a managed parent " + + "and a simple queue with the same path"); + } + } + + Map queues = Maps.newHashMap(); + for (String queuePath : queuePaths) { + LOG.info("Processing queue path: " + queuePath); + addQueues(queues, queuePath); + } + } + + private void addQueues(Map queues, + String queuePath) { + final String[] pathComponents = queuePath.split("\\" + QUEUE_SEP); + + String currentQueuePath = ""; + for (int i = 0; i < pathComponents.length; ++i) { + boolean isLeaf = i == pathComponents.length - 1; + String queueName = pathComponents[i]; + String parentPath = currentQueuePath; + currentQueuePath += currentQueuePath.equals("") ? + queueName : QUEUE_SEP + queueName; + + if (managedParentQueues.contains(parentPath) && !isLeaf) { + throw new IllegalStateException("Cannot add a queue under " + + "managed parent"); + } + if (!queues.containsKey(currentQueuePath)) { + ParentQueue parentQueue = (ParentQueue) queues.get(parentPath); + AbstractCSQueue queue = createQueue(parentQueue, queueName, + currentQueuePath, isLeaf); + queues.put(currentQueuePath, queue); + } + } + } + + private AbstractCSQueue createQueue(ParentQueue parentQueue, + String queueName, String currentQueuePath, boolean isLeaf) { + if (queueName.equals(ROOT)) { + return createRootQueue(ROOT); + } else if (managedParentQueues.contains(currentQueuePath)) { + return addManagedParentQueueAsChildOf(parentQueue, queueName); + } else if (isLeaf) { + return addLeafQueueAsChildOf(parentQueue, queueName); + } else { + return addParentQueueAsChildOf(parentQueue, queueName); + } + } + + private AbstractCSQueue createRootQueue(String rootQueueName) { + ParentQueue root = mock(ParentQueue.class); + when(root.getQueuePath()).thenReturn(rootQueueName); + when(queueManager.getQueue(rootQueueName)).thenReturn(root); + return root; + } + + private AbstractCSQueue addParentQueueAsChildOf(ParentQueue parent, + String queueName) { + ParentQueue queue = mock(ParentQueue.class); + setQueueFields(parent, queue, queueName); + return queue; + } + + private AbstractCSQueue addManagedParentQueueAsChildOf(ParentQueue parent, + String queueName) { + ManagedParentQueue queue = mock(ManagedParentQueue.class); + setQueueFields(parent, queue, queueName); + return queue; + } + + private AbstractCSQueue addLeafQueueAsChildOf(ParentQueue parent, + String queueName) { + LeafQueue queue = mock(LeafQueue.class); + setQueueFields(parent, queue, queueName); + return queue; + } + + private void setQueueFields(ParentQueue parent, AbstractCSQueue newQueue, + String queueName) { + String fullPathOfQueue = parent.getQueuePath() + QUEUE_SEP + queueName; + addQueueToQueueManager(queueName, newQueue, fullPathOfQueue); + + when(newQueue.getParent()).thenReturn(parent); + when(newQueue.getQueuePath()).thenReturn(fullPathOfQueue); + when(newQueue.getQueueName()).thenReturn(queueName); + } + + private void addQueueToQueueManager(String queueName, AbstractCSQueue queue, + String fullPathOfQueue) { + when(queueManager.getQueue(queueName)).thenReturn(queue); + when(queueManager.getQueue(fullPathOfQueue)).thenReturn(queue); + when(queueManager.getQueueByFullName(fullPathOfQueue)).thenReturn(queue); + } + } + YarnConfiguration conf = new YarnConfiguration(); @Before @@ -71,61 +215,15 @@ private void verifyQueueMapping(QueueMappingTestData queueMappingTestData) CapacitySchedulerQueueManager queueManager = mock(CapacitySchedulerQueueManager.class); - ParentQueue root = mock(ParentQueue.class); - when(root.getQueuePath()).thenReturn("root"); - - ParentQueue agroup = mock(ParentQueue.class); - when(agroup.getQueuePath()).thenReturn("root.agroup"); - ParentQueue bsubgroup2 = mock(ParentQueue.class); - when(bsubgroup2.getQueuePath()).thenReturn("root.bsubgroup2"); - when(bsubgroup2.getParent()).thenReturn(root); - - ManagedParentQueue managedParent = mock(ManagedParentQueue.class); - when(managedParent.getQueueName()).thenReturn("managedParent"); - when(managedParent.getQueuePath()).thenReturn("root.managedParent"); - - LeafQueue a = mock(LeafQueue.class); - when(a.getQueuePath()).thenReturn("root.agroup.a"); - when(a.getParent()).thenReturn(agroup); - LeafQueue b = mock(LeafQueue.class); - when(b.getQueuePath()).thenReturn("root.bsubgroup2.b"); - when(b.getParent()).thenReturn(bsubgroup2); - LeafQueue asubgroup2 = mock(LeafQueue.class); - when(asubgroup2.getQueuePath()).thenReturn("root.asubgroup2"); - when(asubgroup2.getParent()).thenReturn(root); + MockQueueHierarchyBuilder.create() + .withQueueManager(queueManager) + .withQueue("root.agroup.a") + .withQueue("root.asubgroup2") + .withQueue("root.bsubgroup2.b") + .withManagedParentQueue("root.managedParent") + .build(); when(queueManager.getQueue(isNull())).thenReturn(null); - when(queueManager.getQueue("a")).thenReturn(a); - when(a.getParent()).thenReturn(agroup); - when(queueManager.getQueue("b")).thenReturn(b); - when(b.getParent()).thenReturn(bsubgroup2); - when(queueManager.getQueue("agroup")).thenReturn(agroup); - when(agroup.getParent()).thenReturn(root); - when(queueManager.getQueue("bsubgroup2")).thenReturn(bsubgroup2); - when(bsubgroup2.getParent()).thenReturn(root); - when(queueManager.getQueue("asubgroup2")).thenReturn(asubgroup2); - when(asubgroup2.getParent()).thenReturn(root); - when(queueManager.getQueue("managedParent")).thenReturn(managedParent); - when(managedParent.getParent()).thenReturn(root); - - when(queueManager.getQueue("root")).thenReturn(root); - when(queueManager.getQueue("root.agroup")).thenReturn(agroup); - when(queueManager.getQueue("root.bsubgroup2")).thenReturn(bsubgroup2); - when(queueManager.getQueue("root.asubgroup2")).thenReturn(asubgroup2); - when(queueManager.getQueue("root.agroup.a")).thenReturn(a); - when(queueManager.getQueue("root.bsubgroup2.b")).thenReturn(b); - when(queueManager.getQueue("root.managedParent")).thenReturn(managedParent); - - when(queueManager.getQueueByFullName("root.agroup")).thenReturn(agroup); - when(queueManager.getQueueByFullName("root.bsubgroup2")) - .thenReturn(bsubgroup2); - when(queueManager.getQueueByFullName("root.asubgroup2")) - .thenReturn(asubgroup2); - when(queueManager.getQueueByFullName("root.agroup.a")).thenReturn(a); - when(queueManager.getQueueByFullName("root.bsubgroup2.b")).thenReturn(b); - when(queueManager.getQueueByFullName("root.managedParent")) - .thenReturn(managedParent); - rule.setQueueManager(queueManager); ApplicationSubmissionContext asc = Records.newRecord( From 6459cc73e6c0878be51c0f2f70ec9868325dec34 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 26 Jun 2020 13:15:14 +0200 Subject: [PATCH 056/131] YARN-10318. ApplicationHistory Web UI incorrect column indexing. Contributed by Andras Gyori --- .../org/apache/hadoop/yarn/server/webapp/AppsBlock.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java index b89ca02e09dca..6737e4d8fa9f8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java @@ -150,7 +150,8 @@ protected void renderData(Block html) { TBODY> tbody = html.table("#apps").thead().tr().th(".id", "ID").th(".user", "User") .th(".name", "Name").th(".type", "Application Type") - .th(".queue", "Queue").th(".priority", "Application Priority") + .th(".apptag", "Application Tags").th(".queue", "Queue") + .th(".priority", "Application Priority") .th(".starttime", "StartTime") .th(".launchtime", "LaunchTime") .th(".finishtime", "FinishTime") @@ -185,6 +186,10 @@ protected void renderData(Block html) { .append( StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app .getType()))) + .append("\",\"") + .append( + StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4( + app.getApplicationTags() == null ? "" : app.getApplicationTags()))) .append("\",\"") .append( StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app From e0c1d8a96905bfbedbddde9000fc08ce2af1d277 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 26 Jun 2020 13:21:55 +0200 Subject: [PATCH 057/131] YARN-10327. Remove duplication of checking for invalid application ID in TestLogsCLI. Contributed by Marton Hudaky --- .../apache/hadoop/yarn/client/cli/TestLogsCLI.java | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java index 80f39b8f90302..24256a0147142 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java @@ -167,19 +167,6 @@ public void testInvalidOpts() throws Exception { "options parsing failed: Unrecognized option: -InvalidOpts")); } - @Test(timeout = 5000l) - public void testInvalidApplicationId() throws Exception { - YarnClient mockYarnClient = createMockYarnClient( - YarnApplicationState.FINISHED, - UserGroupInformation.getCurrentUser().getShortUserName()); - LogsCLI cli = new LogsCLIForTest(mockYarnClient); - cli.setConf(conf); - - int exitCode = cli.run( new String[] { "-applicationId", "not_an_app_id"}); - assertTrue(exitCode == -1); - assertTrue(sysErrStream.toString().startsWith("Invalid ApplicationId specified")); - } - @Test(timeout = 5000L) public void testInvalidAMContainerId() throws Exception { conf.setBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED, true); From 2c03524fa4be754aa95889d4ac0f5d57dca8cda8 Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Fri, 26 Jun 2020 20:43:27 +0530 Subject: [PATCH 058/131] YARN-6526. Refactoring SQLFederationStateStore by avoiding to recreate a connection at every call. COntributed by Bilwa S T. --- .../store/impl/SQLFederationStateStore.java | 124 ++++++++---------- .../FederationStateStoreClientMetrics.java | 18 +++ .../utils/FederationStateStoreUtils.java | 14 ++ .../impl/FederationStateStoreBaseTest.java | 15 ++- .../impl/HSQLDBFederationStateStore.java | 3 +- .../impl/TestSQLFederationStateStore.java | 28 ++++ .../TestZookeeperFederationStateStore.java | 4 +- 7 files changed, 130 insertions(+), 76 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java index 07dc7e479995a..8ceef4310f225 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java @@ -78,6 +78,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.annotations.VisibleForTesting; import com.zaxxer.hikari.HikariDataSource; /** @@ -141,6 +142,8 @@ public class SQLFederationStateStore implements FederationStateStore { private int maximumPoolSize; private HikariDataSource dataSource = null; private final Clock clock = new MonotonicClock(); + @VisibleForTesting + Connection conn = null; @Override public void init(Configuration conf) throws YarnException { @@ -173,6 +176,13 @@ public void init(Configuration conf) throws YarnException { dataSource.setMaximumPoolSize(maximumPoolSize); LOG.info("Initialized connection pool to the Federation StateStore " + "database at address: " + url); + try { + conn = getConnection(); + LOG.debug("Connection created"); + } catch (SQLException e) { + FederationStateStoreUtils.logAndThrowRetriableException(LOG, + "Not able to get Connection", e); + } } @Override @@ -185,15 +195,13 @@ public SubClusterRegisterResponse registerSubCluster( .validate(registerSubClusterRequest); CallableStatement cstmt = null; - Connection conn = null; SubClusterInfo subClusterInfo = registerSubClusterRequest.getSubClusterInfo(); SubClusterId subClusterId = subClusterInfo.getSubClusterId(); try { - conn = getConnection(); - cstmt = conn.prepareCall(CALL_SP_REGISTER_SUBCLUSTER); + cstmt = getCallableStatement(CALL_SP_REGISTER_SUBCLUSTER); // Set the parameters for the stored procedure cstmt.setString(1, subClusterId.getId()); @@ -238,9 +246,10 @@ public SubClusterRegisterResponse registerSubCluster( + " into the StateStore", e); } finally { - // Return to the pool the CallableStatement and the Connection - FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + // Return to the pool the CallableStatement + FederationStateStoreUtils.returnToPool(LOG, cstmt); } + return SubClusterRegisterResponse.newInstance(); } @@ -254,14 +263,12 @@ public SubClusterDeregisterResponse deregisterSubCluster( .validate(subClusterDeregisterRequest); CallableStatement cstmt = null; - Connection conn = null; SubClusterId subClusterId = subClusterDeregisterRequest.getSubClusterId(); SubClusterState state = subClusterDeregisterRequest.getState(); try { - conn = getConnection(); - cstmt = conn.prepareCall(CALL_SP_DEREGISTER_SUBCLUSTER); + cstmt = getCallableStatement(CALL_SP_DEREGISTER_SUBCLUSTER); // Set the parameters for the stored procedure cstmt.setString(1, subClusterId.getId()); @@ -299,8 +306,8 @@ public SubClusterDeregisterResponse deregisterSubCluster( + state.toString(), e); } finally { - // Return to the pool the CallableStatement and the Connection - FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + // Return to the pool the CallableStatement + FederationStateStoreUtils.returnToPool(LOG, cstmt); } return SubClusterDeregisterResponse.newInstance(); } @@ -315,14 +322,12 @@ public SubClusterHeartbeatResponse subClusterHeartbeat( .validate(subClusterHeartbeatRequest); CallableStatement cstmt = null; - Connection conn = null; SubClusterId subClusterId = subClusterHeartbeatRequest.getSubClusterId(); SubClusterState state = subClusterHeartbeatRequest.getState(); try { - conn = getConnection(); - cstmt = conn.prepareCall(CALL_SP_SUBCLUSTER_HEARTBEAT); + cstmt = getCallableStatement(CALL_SP_SUBCLUSTER_HEARTBEAT); // Set the parameters for the stored procedure cstmt.setString(1, subClusterId.getId()); @@ -362,8 +367,8 @@ public SubClusterHeartbeatResponse subClusterHeartbeat( + subClusterId, e); } finally { - // Return to the pool the CallableStatement and the Connection - FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + // Return to the pool the CallableStatement + FederationStateStoreUtils.returnToPool(LOG, cstmt); } return SubClusterHeartbeatResponse.newInstance(); } @@ -376,14 +381,12 @@ public GetSubClusterInfoResponse getSubCluster( FederationMembershipStateStoreInputValidator.validate(subClusterRequest); CallableStatement cstmt = null; - Connection conn = null; SubClusterInfo subClusterInfo = null; SubClusterId subClusterId = subClusterRequest.getSubClusterId(); try { - conn = getConnection(); - cstmt = conn.prepareCall(CALL_SP_GET_SUBCLUSTER); + cstmt = getCallableStatement(CALL_SP_GET_SUBCLUSTER); cstmt.setString(1, subClusterId.getId()); // Set the parameters for the stored procedure @@ -443,8 +446,8 @@ public GetSubClusterInfoResponse getSubCluster( FederationStateStoreUtils.logAndThrowRetriableException(LOG, "Unable to obtain the SubCluster information for " + subClusterId, e); } finally { - // Return to the pool the CallableStatement and the Connection - FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + // Return to the pool the CallableStatement + FederationStateStoreUtils.returnToPool(LOG, cstmt); } return GetSubClusterInfoResponse.newInstance(subClusterInfo); } @@ -453,13 +456,11 @@ public GetSubClusterInfoResponse getSubCluster( public GetSubClustersInfoResponse getSubClusters( GetSubClustersInfoRequest subClustersRequest) throws YarnException { CallableStatement cstmt = null; - Connection conn = null; ResultSet rs = null; List subClusters = new ArrayList(); try { - conn = getConnection(); - cstmt = conn.prepareCall(CALL_SP_GET_SUBCLUSTERS); + cstmt = getCallableStatement(CALL_SP_GET_SUBCLUSTERS); // Execute the query long startTime = clock.getTime(); @@ -510,8 +511,8 @@ public GetSubClustersInfoResponse getSubClusters( FederationStateStoreUtils.logAndThrowRetriableException(LOG, "Unable to obtain the information for all the SubClusters ", e); } finally { - // Return to the pool the CallableStatement and the Connection - FederationStateStoreUtils.returnToPool(LOG, cstmt, conn, rs); + // Return to the pool the CallableStatement + FederationStateStoreUtils.returnToPool(LOG, cstmt, null, rs); } return GetSubClustersInfoResponse.newInstance(subClusters); } @@ -524,7 +525,6 @@ public AddApplicationHomeSubClusterResponse addApplicationHomeSubCluster( FederationApplicationHomeSubClusterStoreInputValidator.validate(request); CallableStatement cstmt = null; - Connection conn = null; String subClusterHome = null; ApplicationId appId = @@ -533,8 +533,7 @@ public AddApplicationHomeSubClusterResponse addApplicationHomeSubCluster( request.getApplicationHomeSubCluster().getHomeSubCluster(); try { - conn = getConnection(); - cstmt = conn.prepareCall(CALL_SP_ADD_APPLICATION_HOME_SUBCLUSTER); + cstmt = getCallableStatement(CALL_SP_ADD_APPLICATION_HOME_SUBCLUSTER); // Set the parameters for the stored procedure cstmt.setString(1, appId.toString()); @@ -596,8 +595,8 @@ public AddApplicationHomeSubClusterResponse addApplicationHomeSubCluster( + request.getApplicationHomeSubCluster().getApplicationId(), e); } finally { - // Return to the pool the CallableStatement and the Connection - FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + // Return to the pool the CallableStatement + FederationStateStoreUtils.returnToPool(LOG, cstmt); } return AddApplicationHomeSubClusterResponse .newInstance(SubClusterId.newInstance(subClusterHome)); @@ -611,7 +610,6 @@ public UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubCluster( FederationApplicationHomeSubClusterStoreInputValidator.validate(request); CallableStatement cstmt = null; - Connection conn = null; ApplicationId appId = request.getApplicationHomeSubCluster().getApplicationId(); @@ -619,8 +617,7 @@ public UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubCluster( request.getApplicationHomeSubCluster().getHomeSubCluster(); try { - conn = getConnection(); - cstmt = conn.prepareCall(CALL_SP_UPDATE_APPLICATION_HOME_SUBCLUSTER); + cstmt = getCallableStatement(CALL_SP_UPDATE_APPLICATION_HOME_SUBCLUSTER); // Set the parameters for the stored procedure cstmt.setString(1, appId.toString()); @@ -660,8 +657,8 @@ public UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubCluster( + request.getApplicationHomeSubCluster().getApplicationId(), e); } finally { - // Return to the pool the CallableStatement and the Connection - FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + // Return to the pool the CallableStatement + FederationStateStoreUtils.returnToPool(LOG, cstmt); } return UpdateApplicationHomeSubClusterResponse.newInstance(); } @@ -673,13 +670,11 @@ public GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster( FederationApplicationHomeSubClusterStoreInputValidator.validate(request); CallableStatement cstmt = null; - Connection conn = null; SubClusterId homeRM = null; try { - conn = getConnection(); - cstmt = conn.prepareCall(CALL_SP_GET_APPLICATION_HOME_SUBCLUSTER); + cstmt = getCallableStatement(CALL_SP_GET_APPLICATION_HOME_SUBCLUSTER); // Set the parameters for the stored procedure cstmt.setString(1, request.getApplicationId().toString()); @@ -711,9 +706,8 @@ public GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster( + "for the specified application " + request.getApplicationId(), e); } finally { - - // Return to the pool the CallableStatement and the Connection - FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + // Return to the pool the CallableStatement + FederationStateStoreUtils.returnToPool(LOG, cstmt); } return GetApplicationHomeSubClusterResponse .newInstance(ApplicationHomeSubCluster @@ -724,14 +718,12 @@ public GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster( public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( GetApplicationsHomeSubClusterRequest request) throws YarnException { CallableStatement cstmt = null; - Connection conn = null; ResultSet rs = null; List appsHomeSubClusters = new ArrayList(); try { - conn = getConnection(); - cstmt = conn.prepareCall(CALL_SP_GET_APPLICATIONS_HOME_SUBCLUSTER); + cstmt = getCallableStatement(CALL_SP_GET_APPLICATIONS_HOME_SUBCLUSTER); // Execute the query long startTime = clock.getTime(); @@ -757,8 +749,8 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( FederationStateStoreUtils.logAndThrowRetriableException(LOG, "Unable to obtain the information for all the applications ", e); } finally { - // Return to the pool the CallableStatement and the Connection - FederationStateStoreUtils.returnToPool(LOG, cstmt, conn, rs); + // Return to the pool the CallableStatement + FederationStateStoreUtils.returnToPool(LOG, cstmt, null, rs); } return GetApplicationsHomeSubClusterResponse .newInstance(appsHomeSubClusters); @@ -772,11 +764,9 @@ public DeleteApplicationHomeSubClusterResponse deleteApplicationHomeSubCluster( FederationApplicationHomeSubClusterStoreInputValidator.validate(request); CallableStatement cstmt = null; - Connection conn = null; try { - conn = getConnection(); - cstmt = conn.prepareCall(CALL_SP_DELETE_APPLICATION_HOME_SUBCLUSTER); + cstmt = getCallableStatement(CALL_SP_DELETE_APPLICATION_HOME_SUBCLUSTER); // Set the parameters for the stored procedure cstmt.setString(1, request.getApplicationId().toString()); @@ -812,8 +802,8 @@ public DeleteApplicationHomeSubClusterResponse deleteApplicationHomeSubCluster( FederationStateStoreUtils.logAndThrowRetriableException(LOG, "Unable to delete the application " + request.getApplicationId(), e); } finally { - // Return to the pool the CallableStatement and the Connection - FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + // Return to the pool the CallableStatement + FederationStateStoreUtils.returnToPool(LOG, cstmt); } return DeleteApplicationHomeSubClusterResponse.newInstance(); } @@ -826,12 +816,10 @@ public GetSubClusterPolicyConfigurationResponse getPolicyConfiguration( FederationPolicyStoreInputValidator.validate(request); CallableStatement cstmt = null; - Connection conn = null; SubClusterPolicyConfiguration subClusterPolicyConfiguration = null; try { - conn = getConnection(); - cstmt = conn.prepareCall(CALL_SP_GET_POLICY_CONFIGURATION); + cstmt = getCallableStatement(CALL_SP_GET_POLICY_CONFIGURATION); // Set the parameters for the stored procedure cstmt.setString(1, request.getQueue()); @@ -864,8 +852,8 @@ public GetSubClusterPolicyConfigurationResponse getPolicyConfiguration( "Unable to select the policy for the queue :" + request.getQueue(), e); } finally { - // Return to the pool the CallableStatement and the Connection - FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + // Return to the pool the CallableStatement + FederationStateStoreUtils.returnToPool(LOG, cstmt); } return GetSubClusterPolicyConfigurationResponse .newInstance(subClusterPolicyConfiguration); @@ -879,13 +867,11 @@ public SetSubClusterPolicyConfigurationResponse setPolicyConfiguration( FederationPolicyStoreInputValidator.validate(request); CallableStatement cstmt = null; - Connection conn = null; SubClusterPolicyConfiguration policyConf = request.getPolicyConfiguration(); try { - conn = getConnection(); - cstmt = conn.prepareCall(CALL_SP_SET_POLICY_CONFIGURATION); + cstmt = getCallableStatement(CALL_SP_SET_POLICY_CONFIGURATION); // Set the parameters for the stored procedure cstmt.setString(1, policyConf.getQueue()); @@ -925,8 +911,8 @@ public SetSubClusterPolicyConfigurationResponse setPolicyConfiguration( + policyConf.getQueue(), e); } finally { - // Return to the pool the CallableStatement and the Connection - FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + // Return to the pool the CallableStatement + FederationStateStoreUtils.returnToPool(LOG, cstmt); } return SetSubClusterPolicyConfigurationResponse.newInstance(); } @@ -936,14 +922,12 @@ public GetSubClusterPoliciesConfigurationsResponse getPoliciesConfigurations( GetSubClusterPoliciesConfigurationsRequest request) throws YarnException { CallableStatement cstmt = null; - Connection conn = null; ResultSet rs = null; List policyConfigurations = new ArrayList(); try { - conn = getConnection(); - cstmt = conn.prepareCall(CALL_SP_GET_POLICIES_CONFIGURATIONS); + cstmt = getCallableStatement(CALL_SP_GET_POLICIES_CONFIGURATIONS); // Execute the query long startTime = clock.getTime(); @@ -971,8 +955,8 @@ public GetSubClusterPoliciesConfigurationsResponse getPoliciesConfigurations( FederationStateStoreUtils.logAndThrowRetriableException(LOG, "Unable to obtain the policy information for all the queues.", e); } finally { - // Return to the pool the CallableStatement and the Connection - FederationStateStoreUtils.returnToPool(LOG, cstmt, conn, rs); + // Return to the pool the CallableStatement + FederationStateStoreUtils.returnToPool(LOG, cstmt, null, rs); } return GetSubClusterPoliciesConfigurationsResponse @@ -993,6 +977,8 @@ public Version loadVersion() { public void close() throws Exception { if (dataSource != null) { dataSource.close(); + LOG.debug("Connection closed"); + FederationStateStoreClientMetrics.decrConnections(); } } @@ -1003,9 +989,15 @@ public void close() throws Exception { * @throws SQLException on failure */ public Connection getConnection() throws SQLException { + FederationStateStoreClientMetrics.incrConnections(); return dataSource.getConnection(); } + private CallableStatement getCallableStatement(String procedure) + throws SQLException { + return conn.prepareCall(procedure); + } + private static byte[] getByteArray(ByteBuffer bb) { byte[] ba = new byte[bb.limit()]; bb.get(ba); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/FederationStateStoreClientMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/FederationStateStoreClientMetrics.java index 27b46cde8e58f..d04f850b5dd96 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/FederationStateStoreClientMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/FederationStateStoreClientMetrics.java @@ -31,6 +31,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; import org.apache.hadoop.metrics2.lib.MutableQuantiles; import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; @@ -80,6 +81,9 @@ public final class FederationStateStoreClientMetrics implements MetricsSource { @Metric("Total number of failed StateStore calls") private static MutableCounterLong totalFailedCalls; + @Metric("Total number of Connections") + private static MutableGaugeInt totalConnections; + // This after the static members are initialized, or the constructor will // throw a NullPointerException private static final FederationStateStoreClientMetrics S_INSTANCE = @@ -146,6 +150,14 @@ public static void succeededStateStoreCall(long duration) { methodQuantileMetric.add(duration); } + public static void incrConnections() { + totalConnections.incr(); + } + + public static void decrConnections() { + totalConnections.decr(); + } + @Override public void getMetrics(MetricsCollector collector, boolean all) { REGISTRY.snapshot(collector.addRecord(REGISTRY.info()), all); @@ -181,4 +193,10 @@ static long getNumSucceededCalls() { static double getLatencySucceededCalls() { return totalSucceededCalls.lastStat().mean(); } + + @VisibleForTesting + public static int getNumConnections() { + return totalConnections.value(); + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationStateStoreUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationStateStoreUtils.java index 3b870debefc7f..27a4f7dba5327 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationStateStoreUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationStateStoreUtils.java @@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreException; import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreInvalidInputException; import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreRetriableException; +import org.apache.hadoop.yarn.server.federation.store.metrics.FederationStateStoreClientMetrics; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,6 +70,7 @@ public static void returnToPool(Logger log, CallableStatement cstmt, if (conn != null) { try { conn.close(); + FederationStateStoreClientMetrics.decrConnections(); } catch (SQLException e) { logAndThrowException(log, "Exception while trying to close Connection", e); @@ -98,6 +100,18 @@ public static void returnToPool(Logger log, CallableStatement cstmt, returnToPool(log, cstmt, conn, null); } + /** + * Returns the SQL FederationStateStore connections to the pool. + * + * @param log the logger interface + * @param cstmt the interface used to execute SQL stored procedures + * @throws YarnException on failure + */ + public static void returnToPool(Logger log, CallableStatement cstmt) + throws YarnException { + returnToPool(log, cstmt, null); + } + /** * Throws an exception due to an error in FederationStateStore. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java index b17f8702a94ad..d0e6485b02883 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java @@ -68,7 +68,7 @@ public abstract class FederationStateStoreBaseTest { private static final MonotonicClock CLOCK = new MonotonicClock(); - private FederationStateStore stateStore = createStateStore(); + private FederationStateStore stateStore; protected abstract FederationStateStore createStateStore(); @@ -76,6 +76,7 @@ public abstract class FederationStateStoreBaseTest { @Before public void before() throws IOException, YarnException { + stateStore = createStateStore(); stateStore.init(conf); } @@ -516,7 +517,7 @@ public void testGetPoliciesConfigurations() throws Exception { // Convenience methods - private SubClusterInfo createSubClusterInfo(SubClusterId subClusterId) { + SubClusterInfo createSubClusterInfo(SubClusterId subClusterId) { String amRMAddress = "1.2.3.4:1"; String clientRMAddress = "1.2.3.4:2"; @@ -535,7 +536,7 @@ private SubClusterPolicyConfiguration createSCPolicyConf(String queueName, return SubClusterPolicyConfiguration.newInstance(queueName, policyType, bb); } - private void addApplicationHomeSC(ApplicationId appId, + void addApplicationHomeSC(ApplicationId appId, SubClusterId subClusterId) throws YarnException { ApplicationHomeSubCluster ahsc = ApplicationHomeSubCluster.newInstance(appId, subClusterId); @@ -558,14 +559,14 @@ private void registerSubCluster(SubClusterInfo subClusterInfo) SubClusterRegisterRequest.newInstance(subClusterInfo)); } - private SubClusterInfo querySubClusterInfo(SubClusterId subClusterId) + SubClusterInfo querySubClusterInfo(SubClusterId subClusterId) throws YarnException { GetSubClusterInfoRequest request = GetSubClusterInfoRequest.newInstance(subClusterId); return stateStore.getSubCluster(request).getSubClusterInfo(); } - private SubClusterId queryApplicationHomeSC(ApplicationId appId) + SubClusterId queryApplicationHomeSC(ApplicationId appId) throws YarnException { GetApplicationHomeSubClusterRequest request = GetApplicationHomeSubClusterRequest.newInstance(appId); @@ -594,4 +595,8 @@ protected Configuration getConf() { return conf; } + protected FederationStateStore getStateStore() { + return stateStore; + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java index 289a3a6112600..c3d0a9e1bbd53 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java @@ -209,7 +209,7 @@ public void init(Configuration conf) { LOG.error("ERROR: failed to init HSQLDB " + e1.getMessage()); } try { - conn = getConnection(); + conn = super.conn; LOG.info("Database Init: Start"); @@ -234,7 +234,6 @@ public void init(Configuration conf) { conn.prepareStatement(SP_GETPOLICIESCONFIGURATIONS).execute(); LOG.info("Database Init: Complete"); - conn.close(); } catch (SQLException e) { LOG.error("ERROR: failed to inizialize HSQLDB " + e.getMessage()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestSQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestSQLFederationStateStore.java index d4e6cc53f674a..3c1d327b393aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestSQLFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestSQLFederationStateStore.java @@ -17,8 +17,16 @@ package org.apache.hadoop.yarn.server.federation.store.impl; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.metrics.FederationStateStoreClientMetrics; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; +import org.junit.Assert; +import org.junit.Test; /** * Unit tests for SQLFederationStateStore. @@ -46,4 +54,24 @@ protected FederationStateStore createStateStore() { super.setConf(conf); return new HSQLDBFederationStateStore(); } + + @Test + public void testSqlConnectionsCreatedCount() throws YarnException { + FederationStateStore stateStore = getStateStore(); + SubClusterId subClusterId = SubClusterId.newInstance("SC"); + ApplicationId appId = ApplicationId.newInstance(1, 1); + + SubClusterInfo subClusterInfo = createSubClusterInfo(subClusterId); + + stateStore.registerSubCluster( + SubClusterRegisterRequest.newInstance(subClusterInfo)); + Assert.assertEquals(subClusterInfo, querySubClusterInfo(subClusterId)); + + addApplicationHomeSC(appId, subClusterId); + Assert.assertEquals(subClusterId, queryApplicationHomeSC(appId)); + + // Verify if connection is created only once at statestore init + Assert.assertEquals(1, + FederationStateStoreClientMetrics.getNumConnections()); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java index 390b8037b1654..fe28641eb2560 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java @@ -72,7 +72,6 @@ public void before() throws IOException, YarnException { @After public void after() throws Exception { super.after(); - curatorFramework.close(); try { curatorTestingServer.stop(); @@ -82,8 +81,7 @@ public void after() throws Exception { @Override protected FederationStateStore createStateStore() { - Configuration conf = new Configuration(); - super.setConf(conf); + super.setConf(getConf()); return new ZookeeperFederationStateStore(); } } \ No newline at end of file From bed0a3a37404e9defda13a5bffe5609e72466e46 Mon Sep 17 00:00:00 2001 From: Virajith Jalaparti Date: Fri, 26 Jun 2020 13:19:16 -0700 Subject: [PATCH 059/131] HDFS-15436. Default mount table name used by ViewFileSystem should be configurable (#2100) * HDFS-15436. Default mount table name used by ViewFileSystem should be configurable * Replace Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE use in tests * Address Uma's comments on PR#2100 * Sort lists in test to match without concern to order * Address comments, fix checkstyle and fix failing tests * Fix checkstyle --- .../apache/hadoop/fs/viewfs/ConfigUtil.java | 33 +++++---- .../apache/hadoop/fs/viewfs/Constants.java | 10 ++- .../apache/hadoop/fs/viewfs/InodeTree.java | 2 +- .../TestViewFsWithAuthorityLocalFs.java | 5 +- .../hadoop/fs/viewfs/ViewFsBaseTest.java | 10 ++- .../hadoop/fs/viewfs/ViewFsTestSetup.java | 2 +- .../src/site/markdown/ViewFsOverloadScheme.md | 33 +++++++++ .../fs/viewfs/TestViewFileSystemHdfs.java | 6 +- ...ileSystemOverloadSchemeWithHdfsScheme.java | 68 +++++++++++++++++-- 9 files changed, 141 insertions(+), 28 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java index 6dd1f6589478e..7d29b8f44ca62 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java @@ -66,8 +66,7 @@ public static void addLink(Configuration conf, final String mountTableName, */ public static void addLink(final Configuration conf, final String src, final URI target) { - addLink( conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, - src, target); + addLink(conf, getDefaultMountTableName(conf), src, target); } /** @@ -88,8 +87,7 @@ public static void addLinkMergeSlash(Configuration conf, * @param target */ public static void addLinkMergeSlash(Configuration conf, final URI target) { - addLinkMergeSlash(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, - target); + addLinkMergeSlash(conf, getDefaultMountTableName(conf), target); } /** @@ -110,8 +108,7 @@ public static void addLinkFallback(Configuration conf, * @param target */ public static void addLinkFallback(Configuration conf, final URI target) { - addLinkFallback(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, - target); + addLinkFallback(conf, getDefaultMountTableName(conf), target); } /** @@ -132,7 +129,7 @@ public static void addLinkMerge(Configuration conf, * @param targets */ public static void addLinkMerge(Configuration conf, final URI[] targets) { - addLinkMerge(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, targets); + addLinkMerge(conf, getDefaultMountTableName(conf), targets); } /** @@ -166,8 +163,7 @@ public static void addLinkNfly(Configuration conf, String mountTableName, public static void addLinkNfly(final Configuration conf, final String src, final URI ... targets) { - addLinkNfly(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, src, null, - targets); + addLinkNfly(conf, getDefaultMountTableName(conf), src, null, targets); } /** @@ -177,8 +173,7 @@ public static void addLinkNfly(final Configuration conf, final String src, */ public static void setHomeDirConf(final Configuration conf, final String homedir) { - setHomeDirConf( conf, - Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, homedir); + setHomeDirConf(conf, getDefaultMountTableName(conf), homedir); } /** @@ -202,7 +197,7 @@ public static void setHomeDirConf(final Configuration conf, * @return home dir value, null if variable is not in conf */ public static String getHomeDirValue(final Configuration conf) { - return getHomeDirValue(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE); + return getHomeDirValue(conf, getDefaultMountTableName(conf)); } /** @@ -216,4 +211,18 @@ public static String getHomeDirValue(final Configuration conf, return conf.get(getConfigViewFsPrefix(mountTableName) + "." + Constants.CONFIG_VIEWFS_HOMEDIR); } + + /** + * Get the name of the default mount table to use. If + * {@link Constants#CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY} is specified, + * it's value is returned. Otherwise, + * {@link Constants#CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE} is returned. + * + * @param conf Configuration to use. + * @return the name of the default mount table to use. + */ + public static String getDefaultMountTableName(final Configuration conf) { + return conf.get(Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java index f454f63084cda..28ebf73cf5534 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java @@ -41,12 +41,18 @@ public interface Constants { * then the hadoop default value (/user) is used. */ public static final String CONFIG_VIEWFS_HOMEDIR = "homedir"; - + + /** + * Config key to specify the name of the default mount table. + */ + String CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY = + "fs.viewfs.mounttable.default.name.key"; + /** * Config variable name for the default mount table. */ public static final String CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE = "default"; - + /** * Config variable full prefix for the default mount table. */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java index d1e5d3a4e5fba..3d709b13bfc09 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java @@ -465,7 +465,7 @@ protected InodeTree(final Configuration config, final String viewName) FileAlreadyExistsException, IOException { String mountTableName = viewName; if (mountTableName == null) { - mountTableName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE; + mountTableName = ConfigUtil.getDefaultMountTableName(config); } homedirPrefix = ConfigUtil.getHomeDirValue(config, mountTableName); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java index 2e498f2c0a023..fd5de72ed71ad 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java @@ -48,10 +48,9 @@ public void setUp() throws Exception { fcTarget = FileContext.getLocalFSFileContext(); super.setUp(); // this sets up conf (and fcView which we replace) - // Now create a viewfs using a mount table called "default" - // hence viewfs://default/ + // Now create a viewfs using a mount table using the {MOUNT_TABLE_NAME} schemeWithAuthority = - new URI(FsConstants.VIEWFS_SCHEME, "default", "/", null, null); + new URI(FsConstants.VIEWFS_SCHEME, MOUNT_TABLE_NAME, "/", null, null); fcView = FileContext.getFileContext(schemeWithAuthority, conf); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java index 90722aab2f8a7..21b0c159e2aae 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java @@ -97,6 +97,8 @@ *

*/ abstract public class ViewFsBaseTest { + protected static final String MOUNT_TABLE_NAME = "mycluster"; + FileContext fcView; // the view file system - the mounts are here FileContext fcTarget; // the target file system - the mount will point here Path targetTestRoot; @@ -130,6 +132,9 @@ public void setUp() throws Exception { // Set up the defaultMT in the config with our mount point links conf = new Configuration(); + conf.set( + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + MOUNT_TABLE_NAME); ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri()); ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri()); @@ -1011,9 +1016,8 @@ public void testListStatusWithNoGroups() throws Exception { userUgi.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws Exception { - String clusterName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE; - URI viewFsUri = - new URI(FsConstants.VIEWFS_SCHEME, clusterName, "/", null, null); + URI viewFsUri = new URI( + FsConstants.VIEWFS_SCHEME, MOUNT_TABLE_NAME, "/", null, null); FileSystem vfs = FileSystem.get(viewFsUri, conf); LambdaTestUtils.intercept(IOException.class, "There is no primary group for UGI", () -> vfs diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java index efced73943ed5..b2d7416aa7675 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java @@ -153,7 +153,7 @@ static void addMountLinksToFile(String mountTable, String[] sources, String prefix = new StringBuilder(Constants.CONFIG_VIEWFS_PREFIX).append(".") .append((mountTable == null - ? Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE + ? ConfigUtil.getDefaultMountTableName(conf) : mountTable)) .append(".").toString(); out.writeBytes(""); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md index e65c5458676ea..feb0ba2718385 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md @@ -150,6 +150,39 @@ DFSAdmin commands with View File System Overload Scheme Please refer to the [HDFSCommands Guide](./HDFSCommands.html#dfsadmin_with_ViewFsOverloadScheme) +Accessing paths without authority +--------------------------------- + +Accessing paths like `hdfs:///foo/bar`, `hdfs:/foo/bar` or `viewfs:/foo/bar`, where the authority (cluster name or hostname) of the path is not specified, is very common. +This is especially true when the same code is expected to run on multiple clusters with different names or HDFS Namenodes. + +When `ViewFileSystemOverloadScheme` is used (as described above), and if (a) the scheme of the path being accessed is different from the scheme of the path specified as `fs.defaultFS` +and (b) if the path doesn't have an authority specified, accessing the path can result in an error like `Empty Mount table in config for viewfs://default/`. +For example, when the following configuration is used but a path like `viewfs:/foo/bar` or `viewfs:///foo/bar` is accessed, such an error arises. +```xml + + fs.hdfs.impl + org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme + + + + fs.defaultFS + hdfs://cluster/ + +``` + +#### Solution +To avoid the above problem, the configuration `fs.viewfs.mounttable.default.name.key` has to be set to the name of the cluster, i.e, the following should be added to `core-site.xml` +```xml + + fs.viewfs.mounttable.default.name.key + cluster + +``` +The string in this configuration `cluster` should match the name of the authority in the value of `fs.defaultFS`. Further, the configuration should have a mount table +configured correctly as in the above examples, i.e., the configurations `fs.viewfs.mounttable.*cluster*.link.` should be set (note the same string +`cluster` is used in these configurations). + Appendix: A Mount Table Configuration with XInclude --------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java index b8bed1df84a6e..b3836956c79db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java @@ -299,14 +299,16 @@ private void testNflyRepair(NflyFSystem.NflyKey repairKey) new URI(uri2.getScheme(), uri2.getAuthority(), "/", null, null) }; + String clusterName = "mycluster"; final Configuration testConf = new Configuration(conf); + testConf.set(Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + clusterName); testConf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1); final String testString = "Hello Nfly!"; final Path nflyRoot = new Path("/nflyroot"); - ConfigUtil.addLinkNfly(testConf, - Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, + clusterName, nflyRoot.toString(), "minReplication=2," + repairKey + "=true", testUris); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java index 3060bd6722e3d..f0f3aae1ba6c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java @@ -17,15 +17,14 @@ */ package org.apache.hadoop.fs.viewfs; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - import java.io.File; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -47,6 +46,9 @@ import org.junit.Before; import org.junit.Test; +import static org.junit.Assert.*; + + /** * Tests ViewFileSystemOverloadScheme with configured mount links. */ @@ -236,6 +238,64 @@ public void testListStatusOnNonMountedPath() throws Exception { } } + /** + * Create mount links as follows + * hdfs://localhost:xxx/HDFSUser --> hdfs://localhost:xxx/HDFSUser/ + * hdfs://localhost:xxx/local --> file://TEST_ROOT_DIR/root/ + * Check that "viewfs:/" paths without authority can work when the + * default mount table name is set correctly. + */ + @Test + public void testAccessViewFsPathWithoutAuthority() throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + addMountLinks(defaultFSURI.getAuthority(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER }, + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString() }, + conf); + + // /HDFSUser/test + Path hdfsDir = new Path(HDFS_USER_FOLDER, "test"); + // /local/test + Path localDir = new Path(LOCAL_FOLDER, "test"); + FileStatus[] expectedStatus; + + try (FileSystem fs = FileSystem.get(conf)) { + fs.mkdirs(hdfsDir); // /HDFSUser/test + fs.mkdirs(localDir); // /local/test + expectedStatus = fs.listStatus(new Path("/")); + } + + // check for viewfs path without authority + Path viewFsRootPath = new Path("viewfs:/"); + try { + viewFsRootPath.getFileSystem(conf); + Assert.fail( + "Mount table with authority default should not be initialized"); + } catch (IOException e) { + assertTrue(e.getMessage().contains( + "Empty Mount table in config for viewfs://default/")); + } + + // set the name of the default mount table here and + // subsequent calls should succeed. + conf.set(Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + defaultFSURI.getAuthority()); + + try (FileSystem fs = viewFsRootPath.getFileSystem(conf)) { + FileStatus[] status = fs.listStatus(viewFsRootPath); + // compare only the final components of the paths as + // full paths have different schemes (hdfs:/ vs. viewfs:/). + List expectedPaths = Arrays.stream(expectedStatus) + .map(s -> s.getPath().getName()).sorted() + .collect(Collectors.toList()); + List paths = Arrays.stream(status) + .map(s -> s.getPath().getName()).sorted() + .collect(Collectors.toList()); + assertEquals(expectedPaths, paths); + } + } + /** * Create mount links as follows hdfs://localhost:xxx/HDFSUser --> * hdfs://localhost:xxx/HDFSUser/ hdfs://localhost:xxx/local --> From 8db38c98a6c6ce9215ea998a2f544b2eabca4340 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sat, 27 Jun 2020 19:13:01 +0530 Subject: [PATCH 060/131] HDFS-15378. TestReconstructStripedFile#testErasureCodingWorkerXmitsWeight is failing on trunk. Contributed by hemanthboyina. --- .../org/apache/hadoop/hdfs/TestReconstructStripedFile.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index b119e7855b776..6156c3da33c1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -571,7 +571,8 @@ public void stripedBlockReconstruction() throws IOException { DataNodeFaultInjector.set(oldInjector); for (final DataNode curDn : cluster.getDataNodes()) { GenericTestUtils.waitFor(() -> curDn.getXceiverCount() <= 1, 10, 60000); - assertEquals(0, curDn.getXmitsInProgress()); + GenericTestUtils.waitFor(() -> curDn.getXmitsInProgress() == 0, 10, + 2500); } } } From c71ce7ac3370e220995bad0ae8b59d962c8d30a7 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Sun, 28 Jun 2020 16:02:47 +0900 Subject: [PATCH 061/131] HDFS-15421. IBR leak causes standby NN to be stuck in safe mode. --- .../hdfs/server/namenode/FSDirTruncateOp.java | 6 +- .../hdfs/server/namenode/FSEditLogLoader.java | 8 +- ...iling.java => TestUpdateBlockTailing.java} | 116 +++++++++++++++++- 3 files changed, 124 insertions(+), 6 deletions(-) rename hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/{TestAddBlockTailing.java => TestUpdateBlockTailing.java} (61%) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index bf55d30591074..ee50ee92a8320 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -262,7 +262,11 @@ static Block prepareFileForTruncate(FSNamesystem fsn, INodesInPath iip, uc.setTruncateBlock(new BlockInfoContiguous(oldBlock, oldBlock.getReplication())); uc.getTruncateBlock().setNumBytes(oldBlock.getNumBytes() - lastBlockDelta); - uc.getTruncateBlock().setGenerationStamp(newBlock.getGenerationStamp()); + final long newGenerationStamp = newBlock.getGenerationStamp(); + uc.getTruncateBlock().setGenerationStamp(newGenerationStamp); + // Update global generation stamp in Standby NameNode + blockManager.getBlockIdManager().setGenerationStampIfGreater( + newGenerationStamp); truncatedBlockUC = oldBlock; NameNode.stateChangeLog.debug("BLOCK* prepareFileForTruncate: " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 294296d2d36d5..c390b652eeaeb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -1150,8 +1150,12 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, oldBlock.setNumBytes(newBlock.getNumBytes()); boolean changeMade = oldBlock.getGenerationStamp() != newBlock.getGenerationStamp(); - oldBlock.setGenerationStamp(newBlock.getGenerationStamp()); - + final long newGenerationStamp = newBlock.getGenerationStamp(); + oldBlock.setGenerationStamp(newGenerationStamp); + // Update global generation stamp in Standby NameNode + fsNamesys.getBlockManager().getBlockIdManager(). + setGenerationStampIfGreater(newGenerationStamp); + if (!oldBlock.isComplete() && (!isLastBlock || op.shouldCompleteLastBlock())) { changeMade = true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestAddBlockTailing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestUpdateBlockTailing.java similarity index 61% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestAddBlockTailing.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestUpdateBlockTailing.java index 48c09eda7948c..1462314f01445 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestAddBlockTailing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestUpdateBlockTailing.java @@ -22,9 +22,13 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.EnumSet; +import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -43,17 +47,18 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; /** - * Tests the race condition that IBR and add block may result + * Tests the race condition that IBR and update block may result * in inconsistent block genstamp. */ -public class TestAddBlockTailing { +public class TestUpdateBlockTailing { private static final int BLOCK_SIZE = 8192; - private static final String TEST_DIR = "/TestAddBlockTailing"; + private static final String TEST_DIR = "/TestUpdateBlockTailing"; private static MiniQJMHACluster qjmhaCluster; private static MiniDFSCluster dfsCluster; @@ -87,6 +92,12 @@ public static void shutDownCluster() throws IOException { } } + @Before + public void reset() throws Exception { + dfsCluster.transitionToStandby(1); + dfsCluster.transitionToActive(0); + } + @Test public void testStandbyAddBlockIBRRace() throws Exception { String testFile = TEST_DIR +"/testStandbyAddBlockIBRRace"; @@ -161,4 +172,103 @@ public void testStandbyAddBlockIBRRace() throws Exception { rpc1.delete(testFile, false); } + + @Test + public void testStandbyAppendBlock() throws Exception { + final String testFile = TEST_DIR +"/testStandbyAppendBlock"; + final long fileLen = 1 << 16; + // Create a file + DFSTestUtil.createFile(dfs, new Path(testFile), fileLen, (short)1, 0); + // NN1 tails OP_SET_GENSTAMP_V2 and OP_ADD_BLOCK + fsn0.getEditLog().logSync(); + fsn1.getEditLogTailer().doTailEdits(); + assertEquals("Global Generation stamps on NN0 and " + + "NN1 should be equal", + NameNodeAdapter.getGenerationStamp(fsn0), + NameNodeAdapter.getGenerationStamp(fsn1)); + + // Append block without newBlock flag + try (FSDataOutputStream out = dfs.append(new Path(testFile))) { + final byte[] data = new byte[1 << 16]; + ThreadLocalRandom.current().nextBytes(data); + out.write(data); + } + + // NN1 tails OP_APPEND, OP_SET_GENSTAMP_V2, and OP_UPDATE_BLOCKS + fsn0.getEditLog().logSync(); + fsn1.getEditLogTailer().doTailEdits(); + assertEquals("Global Generation stamps on NN0 and " + + "NN1 should be equal", + NameNodeAdapter.getGenerationStamp(fsn0), + NameNodeAdapter.getGenerationStamp(fsn1)); + + // Remove the testFile + final ClientProtocol rpc0 = dfsCluster.getNameNode(0).getRpcServer(); + rpc0.delete(testFile, false); + } + + @Test + public void testStandbyAppendNewBlock() throws Exception { + final String testFile = TEST_DIR +"/testStandbyAppendNewBlock"; + final long fileLen = 1 << 16; + // Create a file + DFSTestUtil.createFile(dfs, new Path(testFile), fileLen, (short)1, 0); + // NN1 tails OP_SET_GENSTAMP_V2 and OP_ADD_BLOCK + fsn0.getEditLog().logSync(); + fsn1.getEditLogTailer().doTailEdits(); + assertEquals("Global Generation stamps on NN0 and " + + "NN1 should be equal", + NameNodeAdapter.getGenerationStamp(fsn0), + NameNodeAdapter.getGenerationStamp(fsn1)); + + // Append block with newBlock flag + try (FSDataOutputStream out = dfs.append(new Path(testFile), + EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null)) { + final byte[] data = new byte[1 << 16]; + ThreadLocalRandom.current().nextBytes(data); + out.write(data); + } + + // NN1 tails OP_APPEND, OP_SET_GENSTAMP_V2, and OP_ADD_BLOCK + fsn0.getEditLog().logSync(); + fsn1.getEditLogTailer().doTailEdits(); + assertEquals("Global Generation stamps on NN0 and " + + "NN1 should be equal", + NameNodeAdapter.getGenerationStamp(fsn0), + NameNodeAdapter.getGenerationStamp(fsn1)); + + // Remove the testFile + final ClientProtocol rpc0 = dfsCluster.getNameNode(0).getRpcServer(); + rpc0.delete(testFile, false); + } + + @Test + public void testStandbyTruncateBlock() throws Exception { + final String testFile = TEST_DIR +"/testStandbyTruncateBlock"; + final long fileLen = 1 << 16; + // Create a file + DFSTestUtil.createFile(dfs, new Path(testFile), fileLen, (short)1, 0); + // NN1 tails OP_SET_GENSTAMP_V2 and OP_ADD_BLOCK + fsn0.getEditLog().logSync(); + fsn1.getEditLogTailer().doTailEdits(); + assertEquals("Global Generation stamps on NN0 and " + + "NN1 should be equal", + NameNodeAdapter.getGenerationStamp(fsn0), + NameNodeAdapter.getGenerationStamp(fsn1)); + + // Truncate block + dfs.truncate(new Path(testFile), fileLen/2); + + // NN1 tails OP_SET_GENSTAMP_V2 and OP_TRUNCATE + fsn0.getEditLog().logSync(); + fsn1.getEditLogTailer().doTailEdits(); + assertEquals("Global Generation stamps on NN0 and " + + "NN1 should be equal", + NameNodeAdapter.getGenerationStamp(fsn0), + NameNodeAdapter.getGenerationStamp(fsn1)); + + // Remove the testFile + final ClientProtocol rpc0 = dfsCluster.getNameNode(0).getRpcServer(); + rpc0.delete(testFile, false); + } } From 0be26811f3db49abb62d12e6a051a31553495da8 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Mon, 29 Jun 2020 09:21:24 -0700 Subject: [PATCH 062/131] YARN-10328. Fixed ZK Curator NodeExists exception in YARN service AM logs Contributed by Bilwa S T via eyang --- .../yarn/service/registry/YarnRegistryViewForProviders.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java index cecca5f6cf266..06066d546e043 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java @@ -143,7 +143,10 @@ public void putComponent(String serviceClass, ServiceRecord record) throws IOException { String path = RegistryUtils.componentPath( user, serviceClass, serviceName, componentName); - registryOperations.mknode(RegistryPathUtils.parentOf(path), true); + String parentPath = RegistryPathUtils.parentOf(path); + if (!registryOperations.exists(parentPath)) { + registryOperations.mknode(parentPath, true); + } registryOperations.bind(path, record, BindFlags.OVERWRITE); } From 74fc13cf91818a70f434401244f7560c4db3a676 Mon Sep 17 00:00:00 2001 From: Eric E Payne Date: Mon, 29 Jun 2020 18:39:53 +0000 Subject: [PATCH 063/131] YARN-9903: Support reservations continue looking for Node Labels. Contributed by Jim Brennan (Jim_Brennan). --- .../scheduler/capacity/AbstractCSQueue.java | 10 +- .../scheduler/capacity/LeafQueue.java | 3 +- .../allocator/RegularContainerAllocator.java | 16 +- .../TestNodeLabelContainerAllocation.java | 289 ++++++++++++++++++ 4 files changed, 300 insertions(+), 18 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 968d971ce1ffb..f1467a10626a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -1076,14 +1076,12 @@ boolean canAssignToThisQueue(Resource clusterResource, if (Resources.greaterThanOrEqual(resourceCalculator, clusterResource, usedExceptKillable, currentLimitResource)) { - // if reservation continous looking enabled, check to see if could we + // if reservation continue looking enabled, check to see if could we // potentially use this node instead of a reserved node if the application // has reserved containers. - // TODO, now only consider reservation cases when the node has no label - if (this.reservationsContinueLooking && nodePartition.equals( - RMNodeLabelsManager.NO_LABEL) && Resources.greaterThan( - resourceCalculator, clusterResource, resourceCouldBeUnreserved, - Resources.none())) { + if (this.reservationsContinueLooking + && Resources.greaterThan(resourceCalculator, clusterResource, + resourceCouldBeUnreserved, Resources.none())) { // resource-without-reserved = used - reserved Resource newTotalWithoutReservedResource = Resources.subtract( usedExceptKillable, resourceCouldBeUnreserved); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 4d83538c981f3..05150a373ce8c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -1574,8 +1574,7 @@ protected boolean canAssignToUser(Resource clusterResource, user.getUsed(nodePartition), limit)) { // if enabled, check to see if could we potentially use this node instead // of a reserved node if the application has reserved containers - if (this.reservationsContinueLooking && nodePartition.equals( - CommonNodeLabelsManager.NO_LABEL)) { + if (this.reservationsContinueLooking) { if (Resources.lessThanOrEqual(resourceCalculator, clusterResource, Resources.subtract(user.getUsed(), application.getCurrentReservation()), limit)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java index 287dc67ded43e..cced238b60164 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java @@ -79,12 +79,11 @@ private boolean checkHeadroom(Resource clusterResource, String nodePartition) { // If headroom + currentReservation < required, we cannot allocate this // require - Resource resourceCouldBeUnReserved = application.getCurrentReservation(); - if (!application.getCSLeafQueue().getReservationContinueLooking() - || !nodePartition.equals(RMNodeLabelsManager.NO_LABEL)) { - // If we don't allow reservation continuous looking, OR we're looking at - // non-default node partition, we won't allow to unreserve before - // allocation. + Resource resourceCouldBeUnReserved = + application.getAppAttemptResourceUsage().getReserved(nodePartition); + if (!application.getCSLeafQueue().getReservationContinueLooking()) { + // If we don't allow reservation continuous looking, + // we won't allow to unreserve before allocation. resourceCouldBeUnReserved = Resources.none(); } return Resources.greaterThanOrEqual(rc, clusterResource, Resources.add( @@ -583,13 +582,10 @@ private ContainerAllocation assignContainer(Resource clusterResource, // Allocate... // We will only do continuous reservation when this is not allocated from // reserved container - if (rmContainer == null && reservationsContinueLooking - && node.getLabels().isEmpty()) { + if (rmContainer == null && reservationsContinueLooking) { // when reservationsContinueLooking is set, we may need to unreserve // some containers to meet this queue, its parents', or the users' // resource limits. - // TODO, need change here when we want to support continuous reservation - // looking for labeled partitions. if (!shouldAllocOrReserveNewContainer || needToUnreserve) { if (!needToUnreserve) { // If we shouldn't allocate/reserve new container then we should diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java index 55f98d2ec8031..4ac57dd809ca6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java @@ -642,6 +642,295 @@ public RMNodeLabelsManager createNodeLabelManager() { rm1.close(); } + @Test (timeout = 120000) + public void testContainerReservationContinueLookingWithLabels() + throws Exception { + // set node -> label + mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x")); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), + toSet("x"), NodeId.newInstance("h2", 0), toSet("x"))); + + // inject node label manager + MockRM rm1 = new MockRM( + TestUtils.getConfigurationWithQueueLabels(conf)) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB); // label = x + + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId()); + LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1"); + + ContainerId containerId; + + // launch an app to queue a1 (label = x) + MockRMAppSubmissionData data1 = + MockRMAppSubmissionData.Builder.createWithMemory(2 * GB, rm1) + .withAppName("app1") + .withUser("user") + .withAcls(null) + .withQueue("a1") + .withUnmanagedAM(false) + .withAmLabel("x") + .build(); + RMApp app1 = MockRMAppSubmitter.submit(rm1, data1); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1 + .getApplicationAttemptId()); + + // Verify live on node1 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 1); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + Assert.assertEquals(1, schedulerApp1.getLiveContainers().size()); + Assert.assertFalse(schedulerApp1.getReservedContainers().size() > 0); + Assert.assertEquals(2 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(2 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // request map containers for app1. + am1.allocate("*", 5 * GB, 2, 5, new ArrayList(), "x"); + + // Do node heartbeat to allocate first mapper on node1 + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + + // Verify live on node1 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + Assert.assertEquals(2, schedulerApp1.getLiveContainers().size()); + Assert.assertFalse(schedulerApp1.getReservedContainers().size() > 0); + Assert.assertEquals(7 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(7 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // Do node heartbeat to allocate second mapper on node2 + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + + // Verify live on node2 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // node1 7 GB used, node2 5 GB used + Assert.assertEquals(3, schedulerApp1.getLiveContainers().size()); + Assert.assertFalse(schedulerApp1.getReservedContainers().size() > 0); + Assert.assertEquals(12 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(12 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // request reducer containers for app1. + am1.allocate("*", 3 * GB, 2, 10, new ArrayList(), "x"); + + // Do node heartbeat to reserve reducer on node1 + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + + // node1 7 GB used and 3 GB reserved, node2 5 GB used + Assert.assertEquals(3, schedulerApp1.getLiveContainers().size()); + Assert.assertEquals(1, schedulerApp1.getReservedContainers().size()); + Assert.assertEquals(15 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(3 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(15 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(3 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // Do node heartbeat to allocate container for second reducer on node2 + // This should unreserve the reserved container + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + + // Verify live on node2 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 5); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // node1 7 GB used and 0 GB reserved, node2 8 GB used + Assert.assertEquals(4, schedulerApp1.getLiveContainers().size()); + Assert.assertEquals(0, schedulerApp1.getReservedContainers().size()); + Assert.assertEquals(15 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(15 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + rm1.close(); + } + + @Test (timeout = 120000) + public void testContainerReservationContinueLookingWithDefaultLabels() + throws Exception { + // This is the same as testContainerReservationContinueLookingWithLabels, + // but this test doesn't specify the label expression in the + // ResourceRequest, instead it uses default queue label expressions + mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x")); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), + toSet("x"), NodeId.newInstance("h2", 0), toSet("x"))); + + // inject node label manager + MockRM rm1 = new MockRM( + TestUtils.getConfigurationWithDefaultQueueLabels(conf)) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB); // label = x + + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId()); + LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1"); + + ContainerId containerId; + + // launch an app to queue a1 (label = x) + MockRMAppSubmissionData data1 = + MockRMAppSubmissionData.Builder.createWithMemory(2 * GB, rm1) + .withAppName("app1") + .withUser("user") + .withAcls(null) + .withQueue("a1") + .withUnmanagedAM(false) + .build(); + RMApp app1 = MockRMAppSubmitter.submit(rm1, data1); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1 + .getApplicationAttemptId()); + + // Verify live on node1 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 1); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + Assert.assertEquals(1, schedulerApp1.getLiveContainers().size()); + Assert.assertFalse(schedulerApp1.getReservedContainers().size() > 0); + Assert.assertEquals(2 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(2 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // request map containers for app1. + am1.allocate("*", 5 * GB, 2, 5, new ArrayList(), null); + + // Do node heartbeat to allocate first mapper on node1 + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + + // Verify live on node1 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + Assert.assertEquals(2, schedulerApp1.getLiveContainers().size()); + Assert.assertFalse(schedulerApp1.getReservedContainers().size() > 0); + Assert.assertEquals(7 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(7 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // Do node heartbeat to allocate second mapper on node2 + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + + // Verify live on node2 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // node1 7 GB used, node2 5 GB used + Assert.assertEquals(3, schedulerApp1.getLiveContainers().size()); + Assert.assertFalse(schedulerApp1.getReservedContainers().size() > 0); + Assert.assertEquals(12 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(12 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // request reducer containers for app1. + am1.allocate("*", 3 * GB, 2, 10, new ArrayList(), null); + + // Do node heartbeat to reserve reducer on node1 + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + + // node1 7 GB used and 3 GB reserved, node2 5 GB used + Assert.assertEquals(3, schedulerApp1.getLiveContainers().size()); + Assert.assertEquals(1, schedulerApp1.getReservedContainers().size()); + Assert.assertEquals(15 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(3 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(15 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(3 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // Do node heartbeat to allocate container for second reducer on node2 + // This should unreserve the reserved container + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + + // Verify live on node2 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 5); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // node1 7 GB used and 0 GB reserved, node2 8 GB used + Assert.assertEquals(4, schedulerApp1.getLiveContainers().size()); + Assert.assertEquals(0, schedulerApp1.getReservedContainers().size()); + Assert.assertEquals(15 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(15 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + rm1.close(); + } + @Test (timeout = 120000) public void testRMContainerLeakInLeafQueue() throws Exception { // set node -> label From 7e73cad97419671b1020906bef36ad60aaa70ab9 Mon Sep 17 00:00:00 2001 From: Masatake Iwasaki Date: Tue, 30 Jun 2020 10:52:25 +0900 Subject: [PATCH 064/131] MAPREDUCE-7280. MiniMRYarnCluster has hard-coded timeout waiting to start history server, with no way to disable. (#2065) --- .../hadoop/mapred/TestMiniMRBringup.java | 24 +++++++++++-- .../mapreduce/v2/MiniMRYarnCluster.java | 34 +++++++++++++------ 2 files changed, 45 insertions(+), 13 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRBringup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRBringup.java index b608d756a49df..fc49fa569024e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRBringup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRBringup.java @@ -18,9 +18,13 @@ package org.apache.hadoop.mapred; +import java.io.IOException; + +import org.junit.Assert; import org.junit.Test; -import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster; /** * A Unit-test to test bringup and shutdown of Mini Map-Reduce Cluster. @@ -36,5 +40,21 @@ public void testBringUp() throws IOException { if (mr != null) { mr.shutdown(); } } } - + + @Test + public void testMiniMRYarnClusterWithoutJHS() throws IOException { + MiniMRYarnCluster mr = null; + try { + final Configuration conf = new Configuration(); + conf.setBoolean(MiniMRYarnCluster.MR_HISTORY_MINICLUSTER_ENABLED, false); + mr = new MiniMRYarnCluster("testMiniMRYarnClusterWithoutJHS"); + mr.init(conf); + mr.start(); + Assert.assertEquals(null, mr.getHistoryServer()); + } finally { + if (mr != null) { + mr.stop(); + } + } + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java index 299383d76bf8a..dbd87e24914a8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java @@ -69,6 +69,10 @@ public class MiniMRYarnCluster extends MiniYARNCluster { private JobHistoryServer historyServer; private JobHistoryServerWrapper historyServerWrapper; private static final String TIMELINE_AUX_SERVICE_NAME = "timeline_collector"; + public static final String MR_HISTORY_MINICLUSTER_ENABLED = + JHAdminConfig.MR_HISTORY_PREFIX + "minicluster.enabled"; + public static final String MR_HISTORY_MINICLUSTER_LAUNCH_TIMEOUT_MS = + JHAdminConfig.MR_HISTORY_PREFIX + "minicluster.launch.timeout.ms"; public MiniMRYarnCluster(String testName) { this(testName, 1); @@ -77,11 +81,10 @@ public MiniMRYarnCluster(String testName) { public MiniMRYarnCluster(String testName, int noOfNMs) { this(testName, noOfNMs, false); } + @Deprecated public MiniMRYarnCluster(String testName, int noOfNMs, boolean enableAHS) { super(testName, 1, noOfNMs, 4, 4, enableAHS); - historyServerWrapper = new JobHistoryServerWrapper(); - addService(historyServerWrapper); } public static String getResolvedMRHistoryWebAppURLWithoutScheme( @@ -118,6 +121,11 @@ public static String getResolvedMRHistoryWebAppURLWithoutScheme( @Override public void serviceInit(Configuration conf) throws Exception { + if (conf.getBoolean(MR_HISTORY_MINICLUSTER_ENABLED, true)) { + historyServerWrapper = new JobHistoryServerWrapper(); + addService(historyServerWrapper); + } + conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME); String stagingDir = conf.get(MRJobConfig.MR_AM_STAGING_DIR); if (stagingDir == null || @@ -212,11 +220,13 @@ public void serviceInit(Configuration conf) throws Exception { protected void serviceStart() throws Exception { super.serviceStart(); - //need to do this because historyServer.init creates a new Configuration - getConfig().set(JHAdminConfig.MR_HISTORY_ADDRESS, - historyServer.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS)); - MRWebAppUtil.setJHSWebappURLWithoutScheme(getConfig(), - MRWebAppUtil.getJHSWebappURLWithoutScheme(historyServer.getConfig())); + if (historyServer != null) { + //need to do this because historyServer.init creates a new Configuration + getConfig().set(JHAdminConfig.MR_HISTORY_ADDRESS, + historyServer.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS)); + MRWebAppUtil.setJHSWebappURLWithoutScheme(getConfig(), + MRWebAppUtil.getJHSWebappURLWithoutScheme(historyServer.getConfig())); + } LOG.info("MiniMRYARN ResourceManager address: " + getConfig().get(YarnConfiguration.RM_ADDRESS)); @@ -233,7 +243,6 @@ private class JobHistoryServerWrapper extends AbstractService { public JobHistoryServerWrapper() { super(JobHistoryServerWrapper.class.getName()); } - private volatile boolean jhsStarted = false; @Override public synchronized void serviceStart() throws Exception { @@ -255,12 +264,15 @@ public synchronized void serviceStart() throws Exception { new Thread() { public void run() { historyServer.start(); - jhsStarted = true; }; }.start(); - GenericTestUtils.waitFor(() -> jhsStarted, 1500, 60_000); - + final int launchTimeout = getConfig().getInt( + MR_HISTORY_MINICLUSTER_LAUNCH_TIMEOUT_MS, 60_000); + GenericTestUtils.waitFor( + () -> historyServer.getServiceState() == STATE.STARTED + || historyServer.getServiceState() == STATE.STOPPED, + 100, launchTimeout); if (historyServer.getServiceState() != STATE.STARTED) { throw new IOException("HistoryServer failed to start"); } From cd188ea9f0e807df1e2cc13f62be3e4c956b1e69 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Tue, 30 Jun 2020 16:52:57 +0900 Subject: [PATCH 065/131] YARN-10331. Upgrade node.js to 10.21.0. (#2106) --- dev-support/docker/Dockerfile | 6 +++--- dev-support/docker/Dockerfile_aarch64 | 6 +++--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 818d394bf921f..fd2d2938419c8 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -123,10 +123,10 @@ RUN pip2 install \ RUN pip2 install python-dateutil==2.7.3 ### -# Install node.js 8.17.0 for web UI framework (4.2.6 ships with Xenial) +# Install node.js 10.21.0 for web UI framework (4.2.6 ships with Xenial) ### -RUN curl -L -s -S https://deb.nodesource.com/setup_8.x | bash - \ - && apt-get install -y --no-install-recommends nodejs=8.17.0-1nodesource1 \ +RUN curl -L -s -S https://deb.nodesource.com/setup_10.x | bash - \ + && apt-get install -y --no-install-recommends nodejs=10.21.0-1nodesource1 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* \ && npm install -g bower@1.8.8 diff --git a/dev-support/docker/Dockerfile_aarch64 b/dev-support/docker/Dockerfile_aarch64 index d0cfa5a2fa24f..5628c60cf9fb3 100644 --- a/dev-support/docker/Dockerfile_aarch64 +++ b/dev-support/docker/Dockerfile_aarch64 @@ -184,10 +184,10 @@ RUN pip2 install \ RUN pip2 install python-dateutil==2.7.3 ### -# Install node.js 8.17.0 for web UI framework (4.2.6 ships with Xenial) +# Install node.js 10.21.0 for web UI framework (4.2.6 ships with Xenial) ### -RUN curl -L -s -S https://deb.nodesource.com/setup_8.x | bash - \ - && apt-get install -y --no-install-recommends nodejs=8.17.0-1nodesource1 \ +RUN curl -L -s -S https://deb.nodesource.com/setup_10.x | bash - \ + && apt-get install -y --no-install-recommends nodejs=10.21.0-1nodesource1 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* \ && npm install -g bower@1.8.8 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml index 58b4b3d534faa..f3bce21498f13 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml @@ -184,7 +184,7 @@ install-node-and-yarn - v8.17.0 + v10.21.0 v1.21.1 From 4249c04d454ca82aadeed152ab777e93474754ab Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Tue, 30 Jun 2020 10:44:51 +0100 Subject: [PATCH 066/131] HADOOP-16798. S3A Committer thread pool shutdown problems. (#1963) Contributed by Steve Loughran. Fixes a condition which can cause job commit to fail if a task was aborted < 60s before the job commit commenced: the task abort will shut down the thread pool with a hard exit after 60s; the job commit POST requests would be scheduled through the same pool, so be interrupted and fail. At present the access is synchronized, but presumably the executor shutdown code is calling wait() and releasing locks. Task abort is triggered from the AM when task attempts succeed but there are still active speculative task attempts running. Thus it only surfaces when speculation is enabled and the final tasks are speculating, which, given they are the stragglers, is not unheard of. Note: this problem has never been seen in production; it has surfaced in the hadoop-aws tests on a heavily overloaded desktop --- .../fs/s3a/commit/AbstractS3ACommitter.java | 129 ++++++++++++++---- .../apache/hadoop/fs/s3a/commit/Tasks.java | 23 +++- .../staging/PartitionedStagingCommitter.java | 7 +- .../s3a/commit/staging/StagingCommitter.java | 2 +- .../hadoop/fs/s3a/commit/TestTasks.java | 21 ++- 5 files changed, 142 insertions(+), 40 deletions(-) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java index e82fbda63dd0c..32d00a4353e98 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java @@ -24,6 +24,7 @@ import java.util.Date; import java.util.List; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -472,7 +473,7 @@ protected void commitPendingUploads( Tasks.foreach(pending.getSourceFiles()) .stopOnFailure() .suppressExceptions(false) - .executeWith(buildThreadPool(context)) + .executeWith(buildSubmitter(context)) .abortWith(path -> loadAndAbort(commitContext, pending, path, true, false)) .revertWith(path -> @@ -502,7 +503,7 @@ protected void precommitCheckPendingFiles( Tasks.foreach(pending.getSourceFiles()) .stopOnFailure() .suppressExceptions(false) - .executeWith(buildThreadPool(context)) + .executeWith(buildSubmitter(context)) .run(path -> PendingSet.load(sourceFS, path)); } } @@ -525,7 +526,7 @@ private void loadAndCommit( Tasks.foreach(pendingSet.getCommits()) .stopOnFailure() .suppressExceptions(false) - .executeWith(singleCommitThreadPool()) + .executeWith(singleThreadSubmitter()) .onFailure((commit, exception) -> commitContext.abortSingleCommit(commit)) .abortWith(commitContext::abortSingleCommit) @@ -580,7 +581,7 @@ private void loadAndAbort( path); FileSystem fs = getDestFS(); Tasks.foreach(pendingSet.getCommits()) - .executeWith(singleCommitThreadPool()) + .executeWith(singleThreadSubmitter()) .suppressExceptions(suppressExceptions) .run(commit -> { try { @@ -674,7 +675,7 @@ protected void abortPendingUploadsInCleanup( return; } Tasks.foreach(pending) - .executeWith(buildThreadPool(getJobContext())) + .executeWith(buildSubmitter(getJobContext())) .suppressExceptions(suppressExceptions) .run(u -> commitContext.abortMultipartCommit( u.getKey(), u.getUploadId())); @@ -838,44 +839,116 @@ protected String getRole() { } /** - * Returns an {@link ExecutorService} for parallel tasks. The number of + * Returns an {@link Tasks.Submitter} for parallel tasks. The number of * threads in the thread-pool is set by fs.s3a.committer.threads. * If num-threads is 0, this will return null; + * this is used in Tasks as a cue + * to switch to single-threaded execution. * * @param context the JobContext for this commit - * @return an {@link ExecutorService} or null for the number of threads + * @return a submitter or null */ - protected final synchronized ExecutorService buildThreadPool( + protected Tasks.Submitter buildSubmitter( JobContext context) { + if (getThreadCount(context) > 0) { + return new PoolSubmitter(context); + } else { + return null; + } + } + /** + * Returns an {@link ExecutorService} for parallel tasks. The number of + * threads in the thread-pool is set by fs.s3a.committer.threads. + * If num-threads is 0, this will raise an exception. + * + * @param context the JobContext for this commit + * @param numThreads threads + * @return an {@link ExecutorService} for the number of threads + */ + private synchronized ExecutorService buildThreadPool( + JobContext context, int numThreads) { + Preconditions.checkArgument(numThreads > 0, + "Cannot create a thread pool with no threads"); if (threadPool == null) { - int numThreads = context.getConfiguration().getInt( - FS_S3A_COMMITTER_THREADS, - DEFAULT_COMMITTER_THREADS); LOG.debug("{}: creating thread pool of size {}", getRole(), numThreads); - if (numThreads > 0) { - threadPool = HadoopExecutors.newFixedThreadPool(numThreads, - new ThreadFactoryBuilder() - .setDaemon(true) - .setNameFormat(THREAD_PREFIX + context.getJobID() + "-%d") - .build()); - } else { - return null; - } + threadPool = HadoopExecutors.newFixedThreadPool(numThreads, + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat(THREAD_PREFIX + context.getJobID() + "-%d") + .build()); } return threadPool; } + /** + * Get the thread count for this job's commit operations. + * @param context the JobContext for this commit + * @return a possibly zero thread count. + */ + private int getThreadCount(final JobContext context) { + return context.getConfiguration().getInt( + FS_S3A_COMMITTER_THREADS, + DEFAULT_COMMITTER_THREADS); + } + + /** + * Submit a runnable. + * This will demand-create the thread pool if needed. + *

+ * This is synchronized to ensure the thread pool is always valid when + * work is synchronized. See HADOOP-16798. + * @param context the JobContext for this commit + * @param task task to execute + * @return the future of the submitted task. + */ + private synchronized Future submitRunnable( + final JobContext context, + final Runnable task) { + return buildThreadPool(context, getThreadCount(context)).submit(task); + } + + /** + * The real task submitter, which hands off the work to + * the current thread pool. + */ + private final class PoolSubmitter implements Tasks.Submitter { + + private final JobContext context; + + private final int numThreads; + + private PoolSubmitter(final JobContext context) { + this.numThreads = getThreadCount(context); + Preconditions.checkArgument(numThreads > 0, + "Cannot create a thread pool with no threads"); + this.context = context; + } + + @Override + public Future submit(final Runnable task) { + return submitRunnable(context, task); + } + + } + /** * Destroy any thread pools; wait for that to finish, * but don't overreact if it doesn't finish in time. */ - protected synchronized void destroyThreadPool() { - if (threadPool != null) { + protected void destroyThreadPool() { + ExecutorService pool; + // reset the thread pool in a sync block, then shut it down + // afterwards. This allows for other threads to create a + // new thread pool on demand. + synchronized(this) { + pool = this.threadPool; + threadPool = null; + } + if (pool != null) { LOG.debug("Destroying thread pool"); - HadoopExecutors.shutdown(threadPool, LOG, + HadoopExecutors.shutdown(pool, LOG, THREAD_POOL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS); - threadPool = null; } } @@ -884,11 +957,9 @@ protected synchronized void destroyThreadPool() { * within the commit of all uploads of a single task. * This is currently null; it is here to allow the Tasks class to * provide the logic for execute/revert. - * Why not use the existing thread pool? Too much fear of deadlocking, - * and tasks are being committed in parallel anyway. * @return null. always. */ - protected final synchronized ExecutorService singleCommitThreadPool() { + protected final synchronized Tasks.Submitter singleThreadSubmitter() { return null; } @@ -932,7 +1003,7 @@ protected void abortPendingUploads(JobContext context, CommitOperations.CommitContext commitContext = initiateCommitOperation()) { Tasks.foreach(pending) - .executeWith(buildThreadPool(context)) + .executeWith(buildSubmitter(context)) .suppressExceptions(suppressExceptions) .run(commitContext::abortSingleCommit); } @@ -961,7 +1032,7 @@ protected void abortPendingUploads( CommitOperations.CommitContext commitContext = initiateCommitOperation()) { Tasks.foreach(pending.getSourceFiles()) - .executeWith(buildThreadPool(context)) + .executeWith(buildSubmitter(context)) .suppressExceptions(suppressExceptions) .run(path -> loadAndAbort(commitContext, diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/Tasks.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/Tasks.java index b6b6b9707ebc5..c318e86605e0c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/Tasks.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/Tasks.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicBoolean; @@ -76,7 +75,7 @@ public interface FailureTask { */ public static class Builder { private final Iterable items; - private ExecutorService service = null; + private Submitter service = null; private FailureTask onFailure = null; private boolean stopOnFailure = false; private boolean suppressExceptions = false; @@ -96,11 +95,11 @@ public static class Builder { /** * Declare executor service: if null, the tasks are executed in a single * thread. - * @param executorService service to schedule tasks with. + * @param submitter service to schedule tasks with. * @return this builder. */ - public Builder executeWith(ExecutorService executorService) { - this.service = executorService; + public Builder executeWith(Submitter submitter) { + this.service = submitter; return this; } @@ -407,4 +406,18 @@ private static void castAndThrow(Exception e) throws E { } throw (E) e; } + + /** + * Interface to whatever lets us submit tasks. + */ + public interface Submitter { + + /** + * Submit work. + * @param task task to execute + * @return the future of the submitted task. + */ + Future submit(Runnable task); + } + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitter.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitter.java index 20aca3cf49ae0..7be54062d28f5 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitter.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitter.java @@ -23,7 +23,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -187,7 +186,7 @@ private void replacePartitions( Map partitions = new ConcurrentHashMap<>(); FileSystem sourceFS = pending.getSourceFS(); - ExecutorService pool = buildThreadPool(context); + Tasks.Submitter submitter = buildSubmitter(context); try (DurationInfo ignored = new DurationInfo(LOG, "Replacing partitions")) { @@ -198,7 +197,7 @@ private void replacePartitions( Tasks.foreach(pending.getSourceFiles()) .stopOnFailure() .suppressExceptions(false) - .executeWith(pool) + .executeWith(submitter) .run(path -> { PendingSet pendingSet = PendingSet.load(sourceFS, path); Path lastParent = null; @@ -216,7 +215,7 @@ private void replacePartitions( Tasks.foreach(partitions.keySet()) .stopOnFailure() .suppressExceptions(false) - .executeWith(pool) + .executeWith(submitter) .run(partitionPath -> { LOG.debug("{}: removing partition path to be replaced: " + getRole(), partitionPath); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java index 7eca1b42659e5..91e68af8bb1d0 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java @@ -699,7 +699,7 @@ protected int commitTaskInternal(final TaskAttemptContext context, Tasks.foreach(taskOutput) .stopOnFailure() .suppressExceptions(false) - .executeWith(buildThreadPool(context)) + .executeWith(buildSubmitter(context)) .run(stat -> { Path path = stat.getPath(); File localFile = new File(path.toUri().getPath()); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestTasks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestTasks.java index 4ee39f1bfa08e..4211c62a77b9c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestTasks.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestTasks.java @@ -25,6 +25,7 @@ import java.util.Optional; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.stream.Collectors; @@ -57,6 +58,12 @@ public class TestTasks extends HadoopTestBase { * Thread pool for task execution. */ private ExecutorService threadPool; + + /** + * Task submitter bonded to the thread pool, or + * null for the 0-thread case. + */ + Tasks.Submitter submitter; private final CounterTask failingTask = new CounterTask("failing committer", FAILPOINT, Item::commit); @@ -117,6 +124,9 @@ public void setup() { .setDaemon(true) .setNameFormat(getMethodName() + "-pool-%d") .build()); + submitter = new PoolSubmitter(); + } else { + submitter = null; } } @@ -129,12 +139,21 @@ public void teardown() { } } + private class PoolSubmitter implements Tasks.Submitter { + + @Override + public Future submit(final Runnable task) { + return threadPool.submit(task); + } + + } + /** * create the builder. * @return pre-inited builder */ private Tasks.Builder builder() { - return Tasks.foreach(items).executeWith(threadPool); + return Tasks.foreach(items).executeWith(submitter); } private void assertRun(Tasks.Builder builder, From 2a67e2b1a0e3a5f91056f5b977ef9c4c07ba6718 Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Tue, 30 Jun 2020 07:09:26 -0700 Subject: [PATCH 067/131] HDFS-15160. ReplicaMap, Disk Balancer, Directory Scanner and various FsDatasetImpl methods should use datanode readlock. Contributed by Stephen O'Donnell. Signed-off-by: Wei-Chiu Chuang --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 + .../hdfs/server/datanode/BlockSender.java | 2 +- .../hadoop/hdfs/server/datanode/DataNode.java | 2 +- .../server/datanode/DirectoryScanner.java | 2 +- .../hdfs/server/datanode/DiskBalancer.java | 2 +- .../datanode/fsdataset/FsDatasetSpi.java | 8 +- .../fsdataset/impl/FsDatasetImpl.java | 64 ++++++----- .../datanode/fsdataset/impl/ReplicaMap.java | 31 ++++-- .../src/main/resources/hdfs-default.xml | 13 +++ .../fsdataset/impl/TestFsDatasetImpl.java | 101 +++++++++++++++++- 10 files changed, 187 insertions(+), 42 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 3a0a6782ba062..9de33ff60a62e 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -606,6 +606,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_LOCK_FAIR_KEY = "dfs.datanode.lock.fair"; public static final boolean DFS_DATANODE_LOCK_FAIR_DEFAULT = true; + public static final String DFS_DATANODE_LOCK_READ_WRITE_ENABLED_KEY = + "dfs.datanode.lock.read.write.enabled"; + public static final Boolean DFS_DATANODE_LOCK_READ_WRITE_ENABLED_DEFAULT = + true; public static final String DFS_DATANODE_LOCK_REPORTING_THRESHOLD_MS_KEY = "dfs.datanode.lock-reporting-threshold-ms"; public static final long diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 6102a592c2661..b396bf970523c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -255,7 +255,7 @@ class BlockSender implements java.io.Closeable { // the append write. ChunkChecksum chunkChecksum = null; final long replicaVisibleLength; - try(AutoCloseableLock lock = datanode.data.acquireDatasetLock()) { + try(AutoCloseableLock lock = datanode.data.acquireDatasetReadLock()) { replica = getReplica(block, datanode); replicaVisibleLength = replica.getVisibleLength(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 2e498e47504e2..e242cc826dbe3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -3060,7 +3060,7 @@ void transferReplicaForPipelineRecovery(final ExtendedBlock b, final BlockConstructionStage stage; //get replica information - try(AutoCloseableLock lock = data.acquireDatasetLock()) { + try(AutoCloseableLock lock = data.acquireDatasetReadLock()) { Block storedBlock = data.getStoredBlock(b.getBlockPoolId(), b.getBlockId()); if (null == storedBlock) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 35625ce121d94..b2e521c695a1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -473,7 +473,7 @@ private void scan() { blockPoolReport.sortBlocks(); // Hold FSDataset lock to prevent further changes to the block map - try (AutoCloseableLock lock = dataset.acquireDatasetLock()) { + try (AutoCloseableLock lock = dataset.acquireDatasetReadLock()) { for (final String bpid : blockPoolReport.getBlockPoolIds()) { List blockpoolReport = blockPoolReport.getScanInfo(bpid); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java index 958c0cfeeb1b4..ac10e8f249f2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java @@ -504,7 +504,7 @@ private Map getStorageIDToVolumeBasePathMap() Map storageIDToVolBasePathMap = new HashMap<>(); FsDatasetSpi.FsVolumeReferences references; try { - try(AutoCloseableLock lock = this.dataset.acquireDatasetLock()) { + try(AutoCloseableLock lock = this.dataset.acquireDatasetReadLock()) { references = this.dataset.getFsVolumeReferences(); for (int ndx = 0; ndx < references.size(); ndx++) { FsVolumeSpi vol = references.get(ndx); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java index 2e5135d841f4b..177c62e017411 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java @@ -657,12 +657,16 @@ ReplicaInfo moveBlockAcrossVolumes(final ExtendedBlock block, FsVolumeSpi destination) throws IOException; /** - * Acquire the lock of the data set. + * Acquire the lock of the data set. This prevents other threads from + * modifying the volume map structure inside the datanode, but other changes + * are still possible. For example modifying the genStamp of a block instance. */ AutoCloseableLock acquireDatasetLock(); /*** - * Acquire the read lock of the data set. + * Acquire the read lock of the data set. This prevents other threads from + * modifying the volume map structure inside the datanode, but other changes + * are still possible. For example modifying the genStamp of a block instance. * @return The AutoClosable read lock instance. */ AutoCloseableLock acquireDatasetReadLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index a083012a2cf7a..de898e93432f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -183,7 +183,7 @@ public StorageReport[] getStorageReports(String bpid) @Override public FsVolumeImpl getVolume(final ExtendedBlock b) { - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { + try (AutoCloseableLock lock = datasetReadLock.acquire()) { final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock()); return r != null ? (FsVolumeImpl) r.getVolume() : null; @@ -193,7 +193,7 @@ public FsVolumeImpl getVolume(final ExtendedBlock b) { @Override // FsDatasetSpi public Block getStoredBlock(String bpid, long blkid) throws IOException { - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { + try (AutoCloseableLock lock = datasetReadLock.acquire()) { ReplicaInfo r = volumeMap.get(bpid, blkid); if (r == null) { return null; @@ -206,7 +206,7 @@ public Block getStoredBlock(String bpid, long blkid) public Set deepCopyReplica(String bpid) throws IOException { Set replicas = null; - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { + try (AutoCloseableLock lock = datasetReadLock.acquire()) { replicas = new HashSet<>(volumeMap.replicas(bpid) == null ? Collections. EMPTY_SET : volumeMap.replicas(bpid)); } @@ -302,7 +302,20 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b) DFSConfigKeys.DFS_DATANODE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT, TimeUnit.MILLISECONDS)); this.datasetWriteLock = new AutoCloseableLock(datasetRWLock.writeLock()); - this.datasetReadLock = new AutoCloseableLock(datasetRWLock.readLock()); + boolean enableRL = conf.getBoolean( + DFSConfigKeys.DFS_DATANODE_LOCK_READ_WRITE_ENABLED_KEY, + DFSConfigKeys.DFS_DATANODE_LOCK_READ_WRITE_ENABLED_DEFAULT); + // The read lock can be disabled by the above config key. If it is disabled + // then we simply make the both the read and write lock variables hold + // the write lock. All accesses to the lock are via these variables, so that + // effectively disables the read lock. + if (enableRL) { + LOG.info("The datanode lock is a read write lock"); + this.datasetReadLock = new AutoCloseableLock(datasetRWLock.readLock()); + } else { + LOG.info("The datanode lock is an exclusive write lock"); + this.datasetReadLock = this.datasetWriteLock; + } this.datasetWriteLockCondition = datasetWriteLock.newCondition(); // The number of volumes required for operation is the total number @@ -342,7 +355,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b) } storageMap = new ConcurrentHashMap(); - volumeMap = new ReplicaMap(datasetRWLock); + volumeMap = new ReplicaMap(datasetReadLock, datasetWriteLock); ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this); @SuppressWarnings("unchecked") @@ -475,7 +488,8 @@ private void addVolume(Storage.StorageDirectory sd) throws IOException { .setConf(this.conf) .build(); FsVolumeReference ref = fsVolume.obtainReference(); - ReplicaMap tempVolumeMap = new ReplicaMap(datasetRWLock); + ReplicaMap tempVolumeMap = + new ReplicaMap(datasetReadLock, datasetWriteLock); fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker); activateVolume(tempVolumeMap, sd, storageLocation.getStorageType(), ref); @@ -810,7 +824,7 @@ public InputStream getBlockInputStream(ExtendedBlock b, long seekOffset) throws IOException { ReplicaInfo info; - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { + try (AutoCloseableLock lock = datasetReadLock.acquire()) { info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock()); } @@ -898,7 +912,7 @@ ReplicaInfo getReplicaInfo(String bpid, long blkid) @Override // FsDatasetSpi public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkOffset, long metaOffset) throws IOException { - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { + try (AutoCloseableLock lock = datasetReadLock.acquire()) { ReplicaInfo info = getReplicaInfo(b); FsVolumeReference ref = info.getVolume().obtainReference(); try { @@ -1023,7 +1037,7 @@ public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block, } FsVolumeReference volumeRef = null; - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { + try (AutoCloseableLock lock = datasetReadLock.acquire()) { volumeRef = volumes.getNextVolume(targetStorageType, targetStorageId, block.getNumBytes()); } @@ -1137,7 +1151,7 @@ public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block, FsVolumeSpi FsVolumeReference volumeRef = null; - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { + try (AutoCloseableLock lock = datasetReadLock.acquire()) { volumeRef = destination.obtainReference(); } @@ -1930,7 +1944,7 @@ public Map getBlockReports(String bpid) { new HashMap(); List curVolumes = null; - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { + try (AutoCloseableLock lock = datasetReadLock.acquire()) { curVolumes = volumes.getVolumes(); for (FsVolumeSpi v : curVolumes) { builders.put(v.getStorageID(), BlockListAsLongs.builder(maxDataLength)); @@ -1989,7 +2003,7 @@ public Map getBlockReports(String bpid) { */ @Override public List getFinalizedBlocks(String bpid) { - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { + try (AutoCloseableLock lock = datasetReadLock.acquire()) { final List finalized = new ArrayList( volumeMap.size(bpid)); for (ReplicaInfo b : volumeMap.replicas(bpid)) { @@ -2082,9 +2096,7 @@ private boolean isValid(final ExtendedBlock b, final ReplicaState state) { ReplicaInfo validateBlockFile(String bpid, long blockId) { //Should we check for metadata file too? final ReplicaInfo r; - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { - r = volumeMap.get(bpid, blockId); - } + r = volumeMap.get(bpid, blockId); if (r != null) { if (r.blockDataExists()) { return r; @@ -2327,7 +2339,7 @@ public boolean isCached(String bpid, long blockId) { @Override // FsDatasetSpi public boolean contains(final ExtendedBlock block) { - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { + try (AutoCloseableLock lock = datasetReadLock.acquire()) { final long blockId = block.getLocalBlock().getBlockId(); final String bpid = block.getBlockPoolId(); final ReplicaInfo r = volumeMap.get(bpid, blockId); @@ -2655,7 +2667,7 @@ public ReplicaInfo getReplica(String bpid, long blockId) { @Override public String getReplicaString(String bpid, long blockId) { - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { + try (AutoCloseableLock lock = datasetReadLock.acquire()) { final Replica r = volumeMap.get(bpid, blockId); return r == null ? "null" : r.toString(); } @@ -2882,7 +2894,7 @@ private ReplicaInfo updateReplicaUnderRecovery( @Override // FsDatasetSpi public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException { - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { + try (AutoCloseableLock lock = datasetReadLock.acquire()) { final Replica replica = getReplicaInfo(block.getBlockPoolId(), block.getBlockId()); if (replica.getGenerationStamp() < block.getGenerationStamp()) { @@ -3032,18 +3044,20 @@ public void deleteBlockPool(String bpid, boolean force) @Override // FsDatasetSpi public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException { - try (AutoCloseableLock lock = datasetWriteLock.acquire()) { + try (AutoCloseableLock lock = datasetReadLock.acquire()) { final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId()); if (replica == null) { throw new ReplicaNotFoundException(block); } - if (replica.getGenerationStamp() < block.getGenerationStamp()) { - throw new IOException( - "Replica generation stamp < block generation stamp, block=" - + block + ", replica=" + replica); - } else if (replica.getGenerationStamp() > block.getGenerationStamp()) { - block.setGenerationStamp(replica.getGenerationStamp()); + synchronized(replica) { + if (replica.getGenerationStamp() < block.getGenerationStamp()) { + throw new IOException( + "Replica generation stamp < block generation stamp, block=" + + block + ", replica=" + replica); + } else if (replica.getGenerationStamp() > block.getGenerationStamp()) { + block.setGenerationStamp(replica.getGenerationStamp()); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java index df14f2aad01e2..5dfcc77174cd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java @@ -33,7 +33,6 @@ * Maintains the replica map. */ class ReplicaMap { - private final ReadWriteLock rwLock; // Lock object to synchronize this instance. private final AutoCloseableLock readLock; private final AutoCloseableLock writeLock; @@ -53,18 +52,22 @@ public int compare(Object o1, Object o2) { } }; - ReplicaMap(ReadWriteLock lock) { - if (lock == null) { + ReplicaMap(AutoCloseableLock readLock, AutoCloseableLock writeLock) { + if (readLock == null || writeLock == null) { throw new HadoopIllegalArgumentException( "Lock to synchronize on cannot be null"); } - this.rwLock = lock; - this.readLock = new AutoCloseableLock(rwLock.readLock()); - this.writeLock = new AutoCloseableLock(rwLock.writeLock()); + this.readLock = readLock; + this.writeLock = writeLock; + } + + ReplicaMap(ReadWriteLock lock) { + this(new AutoCloseableLock(lock.readLock()), + new AutoCloseableLock(lock.writeLock())); } String[] getBlockPoolList() { - try (AutoCloseableLock l = writeLock.acquire()) { + try (AutoCloseableLock l = readLock.acquire()) { return map.keySet().toArray(new String[map.keySet().size()]); } } @@ -109,7 +112,7 @@ ReplicaInfo get(String bpid, Block block) { */ ReplicaInfo get(String bpid, long blockId) { checkBlockPool(bpid); - try (AutoCloseableLock l = writeLock.acquire()) { + try (AutoCloseableLock l = readLock.acquire()) { FoldedTreeSet set = map.get(bpid); if (set == null) { return null; @@ -235,7 +238,7 @@ ReplicaInfo remove(String bpid, long blockId) { * @return the number of replicas in the map */ int size(String bpid) { - try (AutoCloseableLock l = writeLock.acquire()) { + try (AutoCloseableLock l = readLock.acquire()) { FoldedTreeSet set = map.get(bpid); return set != null ? set.size() : 0; } @@ -281,4 +284,14 @@ void cleanUpBlockPool(String bpid) { AutoCloseableLock getLock() { return writeLock; } + + /** + * Get the lock object used for synchronizing the ReplicasMap for read only + * operations. + * @return The read lock object + */ + AutoCloseableLock getReadLock() { + return readLock; + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 1e8490addbe0c..689ecfe2f3342 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -3250,6 +3250,19 @@ + + dfs.datanode.lock.read.write.enabled + true + If this is true, the FsDataset lock will be a read write lock. If + it is false, all locks will be a write lock. + Enabling this should give better datanode throughput, as many read only + functions can run concurrently under the read lock, when they would + previously have required the exclusive write lock. As the feature is + experimental, this switch can be used to disable the shared read lock, and + cause all lock acquisitions to use the exclusive write lock. + + + dfs.datanode.lock-reporting-threshold-ms 300 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index 273feb0491112..8b445c5a51a90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -65,6 +65,7 @@ import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.FakeTimer; import org.apache.hadoop.util.StringUtils; import org.junit.Assert; @@ -84,6 +85,7 @@ import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DN_CACHED_DFSUSED_CHECK_INTERVAL_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY; @@ -198,6 +200,101 @@ public void setUp() throws IOException { assertEquals(0, dataset.getNumFailedVolumes()); } + @Test + public void testReadLockEnabledByDefault() + throws IOException, InterruptedException { + final FsDatasetSpi ds = dataset; + AtomicBoolean accessed = new AtomicBoolean(false); + CountDownLatch latch = new CountDownLatch(1); + CountDownLatch waiterLatch = new CountDownLatch(1); + + Thread holder = new Thread() { + public void run() { + try (AutoCloseableLock l = ds.acquireDatasetReadLock()) { + latch.countDown(); + sleep(10000); + } catch (Exception e) { + } + } + }; + + Thread waiter = new Thread() { + public void run() { + try (AutoCloseableLock l = ds.acquireDatasetReadLock()) { + waiterLatch.countDown(); + accessed.getAndSet(true); + } catch (Exception e) { + } + } + }; + + holder.start(); + latch.await(); + waiter.start(); + waiterLatch.await(); + // The holder thread is still holding the lock, but the waiter can still + // run as the lock is a shared read lock. + assertEquals(true, accessed.get()); + holder.interrupt(); + holder.join(); + waiter.join(); + } + + @Test(timeout=10000) + public void testReadLockCanBeDisabledByConfig() + throws IOException, InterruptedException { + HdfsConfiguration conf = new HdfsConfiguration(); + conf.setBoolean( + DFSConfigKeys.DFS_DATANODE_LOCK_READ_WRITE_ENABLED_KEY, false); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(1).build(); + try { + cluster.waitActive(); + DataNode dn = cluster.getDataNodes().get(0); + final FsDatasetSpi ds = DataNodeTestUtils.getFSDataset(dn); + + CountDownLatch latch = new CountDownLatch(1); + CountDownLatch waiterLatch = new CountDownLatch(1); + AtomicBoolean accessed = new AtomicBoolean(false); + + Thread holder = new Thread() { + public void run() { + try (AutoCloseableLock l = ds.acquireDatasetReadLock()) { + latch.countDown(); + sleep(10000); + } catch (Exception e) { + } + } + }; + + Thread waiter = new Thread() { + public void run() { + try (AutoCloseableLock l = ds.acquireDatasetReadLock()) { + accessed.getAndSet(true); + waiterLatch.countDown(); + } catch (Exception e) { + } + } + }; + + holder.start(); + latch.await(); + waiter.start(); + Thread.sleep(200); + // Waiting thread should not have been able to update the variable + // as the read lock is disabled and hence an exclusive lock. + assertEquals(false, accessed.get()); + holder.interrupt(); + holder.join(); + waiterLatch.await(); + // After the holder thread exits, the variable is updated. + assertEquals(true, accessed.get()); + waiter.join(); + } finally { + cluster.shutdown(); + } + } + @Test public void testAddVolumes() throws IOException { final int numNewVolumes = 3; @@ -244,8 +341,8 @@ public void testAddVolumes() throws IOException { @Test public void testAddVolumeWithSameStorageUuid() throws IOException { - HdfsConfiguration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + HdfsConfiguration config = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(config) .numDataNodes(1).build(); try { cluster.waitActive(); From e8dc862d3856e9eaea124c625dade36f1dd53fe2 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Tue, 30 Jun 2020 11:39:16 -0700 Subject: [PATCH 068/131] YARN-9809. Added node manager health status to resource manager registration call. Contributed by Eric Badger via eyang --- .../hadoop/yarn/conf/YarnConfiguration.java | 7 ++ .../src/main/resources/yarn-default.xml | 7 ++ .../RegisterNodeManagerRequest.java | 19 +++++- .../pb/RegisterNodeManagerRequestPBImpl.java | 39 ++++++++++- .../yarn_server_common_service_protos.proto | 1 + .../nodemanager/NodeStatusUpdaterImpl.java | 3 +- .../health/NodeHealthScriptRunner.java | 11 +++- .../health/TimedHealthReporterService.java | 20 +++++- .../server/nodemanager/TestEventFlow.java | 5 ++ .../BaseContainerManagerTest.java | 66 +++++++++++-------- .../TestContainerManager.java | 6 +- .../containermanager/TestNMProxy.java | 4 +- .../TestContainerSchedulerQueuing.java | 2 +- .../ResourceTrackerService.java | 5 +- .../resourcemanager/rmnode/RMNodeImpl.java | 58 ++++++++++++---- .../rmnode/RMNodeStartedEvent.java | 10 ++- .../yarn/server/resourcemanager/MockNM.java | 22 +++++++ .../yarn/server/resourcemanager/MockRM.java | 7 +- .../server/resourcemanager/NodeManager.java | 3 +- .../TestRMNodeTransitions.java | 55 +++++++++++++--- .../resourcemanager/TestResourceManager.java | 29 +++++--- .../TestResourceTrackerService.java | 6 ++ .../TestRMAppLogAggregationStatus.java | 7 +- .../resourcetracker/TestNMExpiry.java | 7 ++ .../resourcetracker/TestNMReconnect.java | 7 ++ .../scheduler/TestAbstractYarnScheduler.java | 5 ++ .../scheduler/TestSchedulerHealth.java | 18 +++-- .../capacity/TestCapacityScheduler.java | 63 ++++++++++++------ .../scheduler/fair/TestFairScheduler.java | 21 ++++-- .../scheduler/fifo/TestFifoScheduler.java | 25 +++++-- .../webapp/TestRMWebServicesNodes.java | 5 +- 31 files changed, 429 insertions(+), 114 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 85d5a58036233..54e8888f0d2aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -2013,6 +2013,13 @@ public static boolean isAclEnabled(Configuration conf) { NM_PREFIX + "health-checker.interval-ms"; public static final long DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS = 10 * 60 * 1000; + /** Whether or not to run the node health script before the NM + * starts up.*/ + public static final String NM_HEALTH_CHECK_RUN_BEFORE_STARTUP = + NM_PREFIX + "health-checker.run-before-startup"; + public static final boolean DEFAULT_NM_HEALTH_CHECK_RUN_BEFORE_STARTUP = + false; + /** Health check time out period for all scripts.*/ public static final String NM_HEALTH_CHECK_TIMEOUT_MS = NM_PREFIX + "health-checker.timeout-ms"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index f09186ecf4c88..2f97a7cce7acd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -1668,6 +1668,13 @@ 1200000 + + Whether or not to run the node health script + before the NM starts up. + yarn.nodemanager.health-checker.run-before-startup + false + + Frequency of running node health scripts. yarn.nodemanager.health-checker.interval-ms diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java index acec16fd56b21..54b39155c63ca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java @@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.util.Records; public abstract class RegisterNodeManagerRequest { @@ -53,14 +54,15 @@ public static RegisterNodeManagerRequest newInstance(NodeId nodeId, Resource physicalResource) { return newInstance(nodeId, httpPort, resource, nodeManagerVersionId, containerStatuses, runningApplications, nodeLabels, physicalResource, - null); + null, null); } public static RegisterNodeManagerRequest newInstance(NodeId nodeId, int httpPort, Resource resource, String nodeManagerVersionId, List containerStatuses, List runningApplications, Set nodeLabels, - Resource physicalResource, Set nodeAttributes) { + Resource physicalResource, Set nodeAttributes, + NodeStatus nodeStatus) { RegisterNodeManagerRequest request = Records.newRecord(RegisterNodeManagerRequest.class); request.setHttpPort(httpPort); @@ -72,6 +74,7 @@ public static RegisterNodeManagerRequest newInstance(NodeId nodeId, request.setNodeLabels(nodeLabels); request.setPhysicalResource(physicalResource); request.setNodeAttributes(nodeAttributes); + request.setNodeStatus(nodeStatus); return request; } @@ -133,4 +136,16 @@ public abstract void setLogAggregationReportsForApps( public abstract Set getNodeAttributes(); public abstract void setNodeAttributes(Set nodeAttributes); + + /** + * Get the status of the node. + * @return The status of the node. + */ + public abstract NodeStatus getNodeStatus(); + + /** + * Set the status of the node. + * @param nodeStatus The status of the node. + */ + public abstract void setNodeStatus(NodeStatus nodeStatus); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java index 317f8abd6f113..d91cff2531a5f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java @@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.LogAggregationReportProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NMContainerStatusProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto; @@ -51,7 +52,9 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; - +import org.apache.hadoop.yarn.server.api.records.NodeStatus; +import org.apache.hadoop.yarn.server.api.records.impl.pb.NodeStatusPBImpl; + public class RegisterNodeManagerRequestPBImpl extends RegisterNodeManagerRequest { RegisterNodeManagerRequestProto proto = RegisterNodeManagerRequestProto.getDefaultInstance(); RegisterNodeManagerRequestProto.Builder builder = null; @@ -68,6 +71,7 @@ public class RegisterNodeManagerRequestPBImpl extends RegisterNodeManagerRequest /** Physical resources in the node. */ private Resource physicalResource = null; + private NodeStatus nodeStatus; public RegisterNodeManagerRequestPBImpl() { builder = RegisterNodeManagerRequestProto.newBuilder(); @@ -121,6 +125,9 @@ private synchronized void mergeLocalToBuilder() { if (this.logAggregationReportsForApps != null) { addLogAggregationStatusForAppsToProto(); } + if (this.nodeStatus != null) { + builder.setNodeStatus(convertToProtoFormat(this.nodeStatus)); + } } private void addLogAggregationStatusForAppsToProto() { @@ -359,6 +366,28 @@ public synchronized void setPhysicalResource(Resource pPhysicalResource) { this.physicalResource = pPhysicalResource; } + @Override + public synchronized NodeStatus getNodeStatus() { + RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder; + if (this.nodeStatus != null) { + return this.nodeStatus; + } + if (!p.hasNodeStatus()) { + return null; + } + this.nodeStatus = convertFromProtoFormat(p.getNodeStatus()); + return this.nodeStatus; + } + + @Override + public synchronized void setNodeStatus(NodeStatus pNodeStatus) { + maybeInitBuilder(); + if (pNodeStatus == null) { + builder.clearNodeStatus(); + } + this.nodeStatus = pNodeStatus; + } + @Override public int hashCode() { return getProto().hashCode(); @@ -533,4 +562,12 @@ public synchronized void setLogAggregationReportsForApps( } this.logAggregationReportsForApps = logAggregationStatusForApps; } + + private NodeStatusPBImpl convertFromProtoFormat(NodeStatusProto s) { + return new NodeStatusPBImpl(s); + } + + private NodeStatusProto convertToProtoFormat(NodeStatus s) { + return ((NodeStatusPBImpl)s).getProto(); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto index ff7153eca8e15..c643179888efe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto @@ -74,6 +74,7 @@ message RegisterNodeManagerRequestProto { optional ResourceProto physicalResource = 9; repeated LogAggregationReportProto log_aggregation_reports_for_apps = 10; optional NodeAttributesProto nodeAttributes = 11; + optional NodeStatusProto nodeStatus = 12; } message RegisterNodeManagerResponseProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 5e3693ae9c1b6..0725d423096c7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -392,10 +392,11 @@ protected void registerWithRM() // during RM recovery synchronized (this.context) { List containerReports = getNMContainerStatuses(); + NodeStatus nodeStatus = getNodeStatus(0); RegisterNodeManagerRequest request = RegisterNodeManagerRequest.newInstance(nodeId, httpPort, totalResource, nodeManagerVersionId, containerReports, getRunningApplications(), - nodeLabels, physicalResource, nodeAttributes); + nodeLabels, physicalResource, nodeAttributes, nodeStatus); if (containerReports != null) { LOG.info("Registering with RM using containers :" + containerReports); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/NodeHealthScriptRunner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/NodeHealthScriptRunner.java index 1c9bd82bd46ed..af92b15e9c73c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/NodeHealthScriptRunner.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/NodeHealthScriptRunner.java @@ -60,8 +60,9 @@ public class NodeHealthScriptRunner extends TimedHealthReporterService { "Node health script timed out"; private NodeHealthScriptRunner(String scriptName, long checkInterval, - long timeout, String[] scriptArgs) { - super(NodeHealthScriptRunner.class.getName(), checkInterval); + long timeout, String[] scriptArgs, boolean runBeforeStartup) { + super(NodeHealthScriptRunner.class.getName(), checkInterval, + runBeforeStartup); this.nodeHealthScript = scriptName; this.scriptTimeout = timeout; setTimerTask(new NodeHealthMonitorExecutor(scriptArgs)); @@ -91,6 +92,10 @@ public static NodeHealthScriptRunner newInstance(String scriptName, "interval-ms can not be set to a negative number."); } + boolean runBeforeStartup = conf.getBoolean( + YarnConfiguration.NM_HEALTH_CHECK_RUN_BEFORE_STARTUP, + YarnConfiguration.DEFAULT_NM_HEALTH_CHECK_RUN_BEFORE_STARTUP); + // Determine time out String scriptTimeoutConfig = String.format( YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS_TEMPLATE, @@ -113,7 +118,7 @@ public static NodeHealthScriptRunner newInstance(String scriptName, String[] scriptArgs = conf.getStrings(scriptArgsConfig, new String[]{}); return new NodeHealthScriptRunner(nodeHealthScript, - checkIntervalMs, scriptTimeout, scriptArgs); + checkIntervalMs, scriptTimeout, scriptArgs, runBeforeStartup); } private enum HealthCheckerExitStatus { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/TimedHealthReporterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/TimedHealthReporterService.java index a0c4d8b8ebd41..6a7a2911d27bf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/TimedHealthReporterService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/TimedHealthReporterService.java @@ -45,6 +45,7 @@ public abstract class TimedHealthReporterService extends AbstractService private Timer timer; private TimerTask task; private long intervalMs; + private boolean runBeforeStartup; TimedHealthReporterService(String name, long intervalMs) { super(name); @@ -52,6 +53,17 @@ public abstract class TimedHealthReporterService extends AbstractService this.healthReport = ""; this.lastReportedTime = System.currentTimeMillis(); this.intervalMs = intervalMs; + this.runBeforeStartup = false; + } + + TimedHealthReporterService(String name, long intervalMs, + boolean runBeforeStartup) { + super(name); + this.isHealthy = true; + this.healthReport = ""; + this.lastReportedTime = System.currentTimeMillis(); + this.intervalMs = intervalMs; + this.runBeforeStartup = runBeforeStartup; } @VisibleForTesting @@ -73,7 +85,13 @@ public void serviceStart() throws Exception { throw new Exception("Health reporting task hasn't been set!"); } timer = new Timer("HealthReporterService-Timer", true); - timer.scheduleAtFixedRate(task, 0, intervalMs); + long delay = 0; + if (runBeforeStartup) { + delay = intervalMs; + task.run(); + } + + timer.scheduleAtFixedRate(task, delay, intervalMs); super.serviceStart(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java index b1fc2f1aa2617..3f4879b23ead7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager; +import static org.mockito.Mockito.mock; + import java.io.File; import java.io.IOException; import java.util.ArrayList; @@ -134,6 +136,9 @@ public long getRMIdentifier() { new DummyContainerManager(context, exec, del, nodeStatusUpdater, metrics, dirsHandler); nodeStatusUpdater.init(conf); + NodeResourceMonitorImpl nodeResourceMonitor = mock( + NodeResourceMonitorImpl.class); + ((NMContext) context).setNodeResourceMonitor(nodeResourceMonitor); ((NMContext)context).setContainerManager(containerManager); nodeStatusUpdater.start(); ((NMContext)context).setNodeStatusUpdater(nodeStatusUpdater); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java index 7a85bfab44efc..9ee3ce6bc8b45 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java @@ -26,6 +26,7 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.doNothing; +import org.apache.hadoop.yarn.server.nodemanager.NodeResourceMonitorImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -156,32 +157,20 @@ public NMLogAggregationStatusTracker getNMLogAggregationStatusTracker() { protected NodeHealthCheckerService nodeHealthChecker; protected LocalDirsHandlerService dirsHandler; protected final long DUMMY_RM_IDENTIFIER = 1234; + private NodeResourceMonitorImpl nodeResourceMonitor = mock( + NodeResourceMonitorImpl.class); + private NodeHealthCheckerService nodeHealthCheckerService; + private NodeStatusUpdater nodeStatusUpdater; + protected ContainerManagerImpl containerManager = null; - protected NodeStatusUpdater nodeStatusUpdater = new NodeStatusUpdaterImpl( - context, new AsyncDispatcher(), null, metrics) { - @Override - protected ResourceTracker getRMClient() { - return new LocalRMInterface(); - }; - - @Override - protected void stopRMProxy() { - return; - } - - @Override - protected void startStatusUpdater() { - return; // Don't start any updating thread. - } - - @Override - public long getRMIdentifier() { - // There is no real RM registration, simulate and set RMIdentifier - return DUMMY_RM_IDENTIFIER; - } - }; + public NodeStatusUpdater getNodeStatusUpdater() { + return nodeStatusUpdater; + } - protected ContainerManagerImpl containerManager = null; + public void setNodeStatusUpdater( + NodeStatusUpdater nodeStatusUpdater) { + this.nodeStatusUpdater = nodeStatusUpdater; + } protected ContainerExecutor createContainerExecutor() { DefaultContainerExecutor exec = new DefaultContainerExecutor(); @@ -218,11 +207,36 @@ public void setup() throws IOException { delSrvc.init(conf); dirsHandler = new LocalDirsHandlerService(); - nodeHealthChecker = new NodeHealthCheckerService(dirsHandler); - nodeHealthChecker.init(conf); + dirsHandler.init(conf); + nodeHealthCheckerService = new NodeHealthCheckerService(dirsHandler); + nodeStatusUpdater = new NodeStatusUpdaterImpl( + context, new AsyncDispatcher(), nodeHealthCheckerService, metrics) { + @Override + protected ResourceTracker getRMClient() { + return new LocalRMInterface(); + }; + + @Override + protected void stopRMProxy() { + return; + } + + @Override + protected void startStatusUpdater() { + return; // Don't start any updating thread. + } + + @Override + public long getRMIdentifier() { + // There is no real RM registration, simulate and set RMIdentifier + return DUMMY_RM_IDENTIFIER; + } + }; + containerManager = createContainerManager(delSrvc); ((NMContext)context).setContainerManager(containerManager); ((NMContext)context).setContainerExecutor(exec); + ((NMContext)context).setNodeResourceMonitor(nodeResourceMonitor); nodeStatusUpdater.init(conf); containerManager.init(conf); nodeStatusUpdater.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java index e215980882450..4e63417bbad3e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java @@ -193,8 +193,8 @@ public int launchContainer(ContainerStartContext ctx) @Override protected ContainerManagerImpl createContainerManager(DeletionService delSrvc) { - return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater, - metrics, dirsHandler) { + return new ContainerManagerImpl(context, exec, delSrvc, + getNodeStatusUpdater(), metrics, dirsHandler) { @Override protected UserGroupInformation getRemoteUgi() throws YarnException { @@ -1704,7 +1704,7 @@ public void testStartContainerFailureWithUnknownAuxService() throws Exception { @Test public void testNullTokens() throws Exception { ContainerManagerImpl cMgrImpl = - new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater, + new ContainerManagerImpl(context, exec, delSrvc, getNodeStatusUpdater(), metrics, dirsHandler); String strExceptionMsg = ""; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java index 5f023f02df1d9..32ff5724c38b3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java @@ -65,8 +65,8 @@ public void setUp() throws Exception { @Override protected ContainerManagerImpl createContainerManager(DeletionService delSrvc) { - return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater, - metrics, dirsHandler) { + return new ContainerManagerImpl(context, exec, delSrvc, + getNodeStatusUpdater(), metrics, dirsHandler) { @Override public StartContainersResponse startContainers( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java index b21850cbcf2c9..508b8bd091505 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java @@ -131,7 +131,7 @@ public void postTransition(ContainerImpl op, protected ContainerManagerImpl createContainerManager( DeletionService delSrvc) { return new ContainerManagerImpl(context, exec, delSrvc, - nodeStatusUpdater, metrics, dirsHandler) { + getNodeStatusUpdater(), metrics, dirsHandler) { @Override protected UserGroupInformation getRemoteUgi() throws YarnException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index 2c89ddd9e9bd3..7d6feea6f1938 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -335,6 +335,7 @@ public RegisterNodeManagerResponse registerNodeManager( Resource capability = request.getResource(); String nodeManagerVersion = request.getNMVersion(); Resource physicalResource = request.getPhysicalResource(); + NodeStatus nodeStatus = request.getNodeStatus(); RegisterNodeManagerResponse response = recordFactory .newRecordInstance(RegisterNodeManagerResponse.class); @@ -426,7 +427,7 @@ public RegisterNodeManagerResponse registerNodeManager( if (oldNode == null) { RMNodeStartedEvent startEvent = new RMNodeStartedEvent(nodeId, request.getNMContainerStatuses(), - request.getRunningApplications()); + request.getRunningApplications(), nodeStatus); if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) { if (LOG.isDebugEnabled()) { @@ -462,7 +463,7 @@ public RegisterNodeManagerResponse registerNodeManager( this.rmContext.getRMNodes().put(nodeId, rmNode); this.rmContext.getDispatcher().getEventHandler() - .handle(new RMNodeStartedEvent(nodeId, null, null)); + .handle(new RMNodeStartedEvent(nodeId, null, null, nodeStatus)); } else { // Reset heartbeat ID since node just restarted. oldNode.resetLastNodeHeartBeatResponse(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index a565fe75656a0..68f44dc6d54e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -36,6 +36,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import org.apache.commons.collections.keyvalue.DefaultMapEntry; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -208,7 +209,8 @@ public class RMNodeImpl implements RMNode, EventHandler { RMNodeEventType, RMNodeEvent>(NodeState.NEW) //Transitions from NEW state - .addTransition(NodeState.NEW, NodeState.RUNNING, + .addTransition(NodeState.NEW, + EnumSet.of(NodeState.RUNNING, NodeState.UNHEALTHY), RMNodeEventType.STARTED, new AddNodeTransition()) .addTransition(NodeState.NEW, NodeState.NEW, RMNodeEventType.RESOURCE_UPDATE, @@ -707,7 +709,6 @@ public void handle(RMNodeEvent event) { private void updateMetricsForRejoinedNode(NodeState previousNodeState) { ClusterMetrics metrics = ClusterMetrics.getMetrics(); - metrics.incrNumActiveNodes(); switch (previousNodeState) { case LOST: @@ -850,10 +851,10 @@ private static NodeHealthStatus updateRMNodeFromStatusEvents( } public static class AddNodeTransition implements - SingleArcTransition { + MultipleArcTransition { @Override - public void transition(RMNodeImpl rmNode, RMNodeEvent event) { + public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { // Inform the scheduler RMNodeStartedEvent startEvent = (RMNodeStartedEvent) event; List containers = null; @@ -871,8 +872,6 @@ public void transition(RMNodeImpl rmNode, RMNodeEvent event) { if (previousRMNode != null) { ClusterMetrics.getMetrics().decrDecommisionedNMs(); } - // Increment activeNodes explicitly because this is a new node. - ClusterMetrics.getMetrics().incrNumActiveNodes(); containers = startEvent.getNMContainerStatuses(); if (containers != null && !containers.isEmpty()) { for (NMContainerStatus container : containers) { @@ -889,17 +888,37 @@ public void transition(RMNodeImpl rmNode, RMNodeEvent event) { } } - rmNode.context.getDispatcher().getEventHandler() - .handle(new NodeAddedSchedulerEvent(rmNode, containers)); - rmNode.context.getDispatcher().getEventHandler().handle( - new NodesListManagerEvent( - NodesListManagerEventType.NODE_USABLE, rmNode)); + NodeState nodeState; + NodeStatus nodeStatus = + startEvent.getNodeStatus(); + + if (nodeStatus == null) { + nodeState = NodeState.RUNNING; + reportNodeRunning(rmNode, containers); + } else { + RMNodeStatusEvent rmNodeStatusEvent = + new RMNodeStatusEvent(nodeId, nodeStatus); + + NodeHealthStatus nodeHealthStatus = + updateRMNodeFromStatusEvents(rmNode, rmNodeStatusEvent); + + if (nodeHealthStatus.getIsNodeHealthy()) { + nodeState = NodeState.RUNNING; + reportNodeRunning(rmNode, containers); + } else { + nodeState = NodeState.UNHEALTHY; + reportNodeUnusable(rmNode, nodeState); + } + } + List logAggregationReportsForApps = startEvent.getLogAggregationReportsForApps(); if (logAggregationReportsForApps != null && !logAggregationReportsForApps.isEmpty()) { rmNode.handleLogAggregationStatus(logAggregationReportsForApps); } + + return nodeState; } } @@ -1110,6 +1129,22 @@ public static void deactivateNode(RMNodeImpl rmNode, NodeState finalState) { } } + /** + * Report node is RUNNING. + * @param rmNode + * @param containers + */ + public static void reportNodeRunning(RMNodeImpl rmNode, + List containers) { + rmNode.context.getDispatcher().getEventHandler() + .handle(new NodeAddedSchedulerEvent(rmNode, containers)); + rmNode.context.getDispatcher().getEventHandler().handle( + new NodesListManagerEvent( + NodesListManagerEventType.NODE_USABLE, rmNode)); + // Increment activeNodes explicitly because this is a new node. + ClusterMetrics.getMetrics().incrNumActiveNodes(); + } + /** * Report node is UNUSABLE and update metrics. * @param rmNode @@ -1301,6 +1336,7 @@ public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { // notifiers get update metadata because they will very likely query it // upon notification // Update metrics + ClusterMetrics.getMetrics().incrNumActiveNodes(); rmNode.updateMetricsForRejoinedNode(NodeState.UNHEALTHY); return NodeState.RUNNING; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStartedEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStartedEvent.java index 397699453fb36..2bf04d0fe76fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStartedEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStartedEvent.java @@ -24,19 +24,23 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; public class RMNodeStartedEvent extends RMNodeEvent { + private final NodeStatus nodeStatus; private List containerStatuses; private List runningApplications; private List logAggregationReportsForApps; public RMNodeStartedEvent(NodeId nodeId, List containerReports, - List runningApplications) { + List runningApplications, + NodeStatus nodeStatus) { super(nodeId, RMNodeEventType.STARTED); this.containerStatuses = containerReports; this.runningApplications = runningApplications; + this.nodeStatus = nodeStatus; } public List getNMContainerStatuses() { @@ -47,6 +51,10 @@ public List getRunningApplications() { return runningApplications; } + public NodeStatus getNodeStatus() { + return nodeStatus; + } + public List getLogAggregationReportsForApps() { return this.logAggregationReportsForApps; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java index 3543bc4707ec6..d433753701b25 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java @@ -18,6 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -187,6 +190,17 @@ public RegisterNodeManagerResponse registerNode( req.setNodeLabels(nodeLabels); } + NodeStatus status = Records.newRecord(NodeStatus.class); + status.setResponseId(0); + status.setNodeId(nodeId); + status.setContainersStatuses(new ArrayList<>(containerStats.values())); + NodeHealthStatus healthStatus = Records.newRecord(NodeHealthStatus.class); + healthStatus.setHealthReport(""); + healthStatus.setIsNodeHealthy(true); + healthStatus.setLastHealthReportTime(1); + status.setNodeHealthStatus(healthStatus); + req.setNodeStatus(status); + RegisterNodeManagerResponse registrationResponse = resourceTracker.registerNodeManager(req); this.currentContainerTokenMasterKey = @@ -364,6 +378,14 @@ public NodeHeartbeatResponse nodeHeartbeat(List updatedStats, return heartbeatResponse; } + public static NodeStatus createMockNodeStatus() { + NodeStatus mockNodeStatus = mock(NodeStatus.class); + NodeHealthStatus mockNodeHealthStatus = mock(NodeHealthStatus.class); + when(mockNodeStatus.getNodeHealthStatus()).thenReturn(mockNodeHealthStatus); + when(mockNodeHealthStatus.getIsNodeHealthy()).thenReturn(true); + return mockNodeStatus; + } + public long getMemory() { return capability.getMemorySize(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index b3888c3cd6679..90c554361c03b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.token.Token; @@ -54,6 +56,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; @@ -543,7 +546,9 @@ public MockNM registerNode(String nodeIdStr, Resource nodeCapability) public void sendNodeStarted(MockNM nm) throws Exception { RMNodeImpl node = (RMNodeImpl) getRMContext().getRMNodes().get( nm.getNodeId()); - node.handle(new RMNodeStartedEvent(nm.getNodeId(), null, null)); + NodeStatus mockNodeStatus = createMockNodeStatus(); + node.handle(new RMNodeStartedEvent(nm.getNodeId(), null, null, + mockNodeStatus)); drainEventsImplicitly(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java index 1e4b050816138..06c4527e5ba73 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java @@ -98,7 +98,7 @@ public class NodeManager implements ContainerManagementProtocol { public NodeManager(String hostName, int containerManagerPort, int httpPort, String rackName, Resource capability, - ResourceManager resourceManager) + ResourceManager resourceManager, NodeStatus nodestatus) throws IOException, YarnException { this.containerManagerAddress = hostName + ":" + containerManagerPort; this.nodeHttpAddress = hostName + ":" + httpPort; @@ -113,6 +113,7 @@ public NodeManager(String hostName, int containerManagerPort, int httpPort, request.setResource(capability); request.setNodeId(this.nodeId); request.setNMVersion(YarnVersionInfo.getVersion()); + request.setNodeStatus(nodestatus); resourceTrackerService.registerNodeManager(request); this.resourceManager = resourceManager; resourceManager.getResourceScheduler().getNodeReport(this.nodeId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index 1f1e164cf5b04..c907cb778ebe6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.yarn.server.resourcemanager; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -216,8 +217,9 @@ private RMNodeStatusEvent getMockRMNodeStatusEventWithoutRunningApps() { @Test (timeout = 5000) public void testExpiredContainer() { + NodeStatus mockNodeStatus = createMockNodeStatus(); // Start the node - node.handle(new RMNodeStartedEvent(null, null, null)); + node.handle(new RMNodeStartedEvent(null, null, null, mockNodeStatus)); verify(scheduler).handle(any(NodeAddedSchedulerEvent.class)); // Expire a container @@ -280,12 +282,13 @@ public void testRecommissionNode() { @Test (timeout = 5000) public void testContainerUpdate() throws InterruptedException{ + NodeStatus mockNodeStatus = createMockNodeStatus(); //Start the node - node.handle(new RMNodeStartedEvent(null, null, null)); + node.handle(new RMNodeStartedEvent(null, null, null, mockNodeStatus)); NodeId nodeId = BuilderUtils.newNodeId("localhost:1", 1); RMNodeImpl node2 = new RMNodeImpl(nodeId, rmContext, null, 0, 0, null, null, null); - node2.handle(new RMNodeStartedEvent(null, null, null)); + node2.handle(new RMNodeStartedEvent(null, null, null, mockNodeStatus)); ApplicationId app0 = BuilderUtils.newApplicationId(0, 0); ApplicationId app1 = BuilderUtils.newApplicationId(1, 1); @@ -341,8 +344,9 @@ public void testContainerUpdate() throws InterruptedException{ @Test (timeout = 5000) public void testStatusChange(){ + NodeStatus mockNodeStatus = createMockNodeStatus(); //Start the node - node.handle(new RMNodeStartedEvent(null, null, null)); + node.handle(new RMNodeStartedEvent(null, null, null, mockNodeStatus)); //Add info to the queue first node.setNextHeartBeat(false); @@ -607,6 +611,33 @@ public void testUnhealthyRebooting() { Assert.assertEquals(NodeState.REBOOTED, node.getState()); } + @Test + public void testAddUnhealthyNode() { + ClusterMetrics cm = ClusterMetrics.getMetrics(); + int initialUnhealthy = cm.getUnhealthyNMs(); + int initialActive = cm.getNumActiveNMs(); + int initialLost = cm.getNumLostNMs(); + int initialDecommissioned = cm.getNumDecommisionedNMs(); + int initialRebooted = cm.getNumRebootedNMs(); + + NodeHealthStatus status = NodeHealthStatus.newInstance(false, "sick", + System.currentTimeMillis()); + NodeStatus nodeStatus = NodeStatus.newInstance(node.getNodeID(), 0, + new ArrayList<>(), null, status, null, null, null); + node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null, + nodeStatus)); + + Assert.assertEquals("Unhealthy Nodes", + initialUnhealthy + 1, cm.getUnhealthyNMs()); + Assert.assertEquals("Active Nodes", initialActive, cm.getNumActiveNMs()); + Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); + Assert.assertEquals("Decommissioned Nodes", + initialDecommissioned, cm.getNumDecommisionedNMs()); + Assert.assertEquals("Rebooted Nodes", + initialRebooted, cm.getNumRebootedNMs()); + Assert.assertEquals(NodeState.UNHEALTHY, node.getState()); + } + @Test public void testNMShutdown() { RMNodeImpl node = getRunningNode(); @@ -712,7 +743,9 @@ private RMNodeImpl getRunningNode(String nmVersion, int port) { Resource capability = Resource.newInstance(4096, 4); RMNodeImpl node = new RMNodeImpl(nodeId, rmContext, null, 0, 0, null, capability, nmVersion); - node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null)); + NodeStatus mockNodeStatus = createMockNodeStatus(); + node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null, + mockNodeStatus)); Assert.assertEquals(NodeState.RUNNING, node.getState()); return node; } @@ -763,7 +796,10 @@ private RMNodeImpl getRebootedNode() { Resource capability = Resource.newInstance(4096, 4); RMNodeImpl node = new RMNodeImpl(nodeId, rmContext,null, 0, 0, null, capability, null); - node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null)); + NodeStatus mockNodeStatus = createMockNodeStatus(); + + node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null, + mockNodeStatus)); Assert.assertEquals(NodeState.RUNNING, node.getState()); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.REBOOTING)); Assert.assertEquals(NodeState.REBOOTED, node.getState()); @@ -779,7 +815,9 @@ public void testAdd() { int initialUnhealthy = cm.getUnhealthyNMs(); int initialDecommissioned = cm.getNumDecommisionedNMs(); int initialRebooted = cm.getNumRebootedNMs(); - node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null)); + NodeStatus mockNodeStatus = createMockNodeStatus(); + node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null, + mockNodeStatus)); Assert.assertEquals("Active Nodes", initialActive + 1, cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes", @@ -1075,8 +1113,9 @@ public void testDisappearingContainer() { @Test public void testForHandlingDuplicatedCompltedContainers() { + NodeStatus mockNodeStatus = createMockNodeStatus(); // Start the node - node.handle(new RMNodeStartedEvent(null, null, null)); + node.handle(new RMNodeStartedEvent(null, null, null, mockNodeStatus)); // Add info to the queue first node.setNextHeartBeat(false); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java index 411b8482170a1..1cb5e1d0e7633 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; @@ -37,6 +38,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; @@ -88,12 +90,12 @@ public void tearDown() throws Exception { private org.apache.hadoop.yarn.server.resourcemanager.NodeManager registerNode(String hostName, int containerManagerPort, int httpPort, - String rackName, Resource capability) throws IOException, - YarnException { + String rackName, Resource capability, NodeStatus nodeStatus) + throws IOException, YarnException { org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm = new org.apache.hadoop.yarn.server.resourcemanager.NodeManager( hostName, containerManagerPort, httpPort, rackName, capability, - resourceManager); + resourceManager, nodeStatus); NodeAddedSchedulerEvent nodeAddEvent1 = new NodeAddedSchedulerEvent(resourceManager.getRMContext() .getRMNodes().get(nm.getNodeId())); @@ -109,26 +111,30 @@ public void testResourceAllocation() final int memory = 4 * 1024; final int vcores = 4; - + + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host1 = "host1"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm1 = registerNode(host1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(memory, vcores)); + Resources.createResource(memory, vcores), mockNodeStatus); // Register node2 String host2 = "host2"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm2 = registerNode(host2, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(memory/2, vcores/2)); + Resources.createResource(memory/2, vcores/2), mockNodeStatus); // nodes should be in RUNNING state RMNodeImpl node1 = (RMNodeImpl) resourceManager.getRMContext().getRMNodes().get( nm1.getNodeId()); RMNodeImpl node2 = (RMNodeImpl) resourceManager.getRMContext().getRMNodes().get( nm2.getNodeId()); - node1.handle(new RMNodeStartedEvent(nm1.getNodeId(), null, null)); - node2.handle(new RMNodeStartedEvent(nm2.getNodeId(), null, null)); + node1.handle(new RMNodeStartedEvent(nm1.getNodeId(), null, null, + mockNodeStatus)); + node2.handle(new RMNodeStartedEvent(nm2.getNodeId(), null, null, + mockNodeStatus)); // Submit an application Application application = new Application("user1", resourceManager); @@ -216,9 +222,12 @@ private void nodeUpdate( public void testNodeHealthReportIsNotNull() throws Exception{ String host1 = "host1"; final int memory = 4 * 1024; + + NodeStatus mockNodeStatus = createMockNodeStatus(); + org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm1 = - registerNode(host1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(memory, 1)); + registerNode(host1, 1234, 2345, NetworkTopology.DEFAULT_RACK, + Resources.createResource(memory, 1), mockNodeStatus); nm1.heartbeat(); nm1.heartbeat(); Collection values = resourceManager.getRMContext().getRMNodes().values(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java index 6690339d892b6..066e39456168d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java @@ -31,6 +31,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.NodeEventDispatcher; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.FileSystemNodeAttributeStore; + +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -2712,10 +2714,14 @@ protected Dispatcher createDispatcher() { RegisterNodeManagerRequest.class); NodeId nodeId = NodeId.newInstance("host2", 1234); Resource capability = BuilderUtils.newResource(1024, 1); + + NodeStatus mockNodeStatus = createMockNodeStatus(); + req.setResource(capability); req.setNodeId(nodeId); req.setHttpPort(1234); req.setNMVersion(YarnVersionInfo.getVersion()); + req.setNodeStatus(mockNodeStatus); ContainerId c1 = ContainerId.newContainerId(appAttemptId, 1); ContainerId c2 = ContainerId.newContainerId(appAttemptId, 2); ContainerId c3 = ContainerId.newContainerId(appAttemptId, 3); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java index 8d31fe1a8ba83..6836288ed1cd1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.logaggregationstatus; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -139,13 +140,15 @@ public void testLogAggregationStatus() throws Exception { Resource capability = Resource.newInstance(4096, 4); RMNodeImpl node1 = new RMNodeImpl(nodeId1, rmContext, null, 0, 0, null, capability, null); - node1.handle(new RMNodeStartedEvent(nodeId1, null, null)); + NodeStatus mockNodeStatus = createMockNodeStatus(); + node1.handle(new RMNodeStartedEvent(nodeId1, null, null, mockNodeStatus)); rmApp.handle(new RMAppRunningOnNodeEvent(this.appId, nodeId1)); NodeId nodeId2 = NodeId.newInstance("localhost", 2345); RMNodeImpl node2 = new RMNodeImpl(nodeId2, rmContext, null, 0, 0, null, capability, null); - node2.handle(new RMNodeStartedEvent(node2.getNodeID(), null, null)); + node2.handle(new RMNodeStartedEvent(node2.getNodeID(), null, null, + mockNodeStatus)); rmApp.handle(new RMAppRunningOnNodeEvent(this.appId, nodeId2)); // The initial log aggregation status for these two nodes diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java index f69faf4ea557e..017a1e021d7cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java @@ -18,6 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.resourcetracker; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; + +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.junit.Assert; import org.slf4j.Logger; @@ -135,12 +138,15 @@ public void testNMExpiry() throws Exception { String hostname3 = "localhost3"; Resource capability = BuilderUtils.newResource(1024, 1); + NodeStatus mockNodeStatus = createMockNodeStatus(); + RegisterNodeManagerRequest request1 = recordFactory .newRecordInstance(RegisterNodeManagerRequest.class); NodeId nodeId1 = NodeId.newInstance(hostname1, 0); request1.setNodeId(nodeId1); request1.setHttpPort(0); request1.setResource(capability); + request1.setNodeStatus(mockNodeStatus); resourceTrackerService.registerNodeManager(request1); RegisterNodeManagerRequest request2 = recordFactory @@ -149,6 +155,7 @@ public void testNMExpiry() throws Exception { request2.setNodeId(nodeId2); request2.setHttpPort(0); request2.setResource(capability); + request2.setNodeStatus(mockNodeStatus); resourceTrackerService.registerNodeManager(request2); int waitCount = 0; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java index 3c4e6b424de0a..817fb9dfc3398 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.resourcetracker; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -36,6 +38,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse; import org.apache.hadoop.yarn.server.api.records.NodeAction; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.ParameterizedSchedulerTestBase; @@ -178,9 +181,13 @@ public void testCompareRMNodeAfterReconnect() throws Exception { RegisterNodeManagerRequest request1 = recordFactory .newRecordInstance(RegisterNodeManagerRequest.class); NodeId nodeId1 = NodeId.newInstance(hostname1, 0); + + NodeStatus mockNodeStatus = createMockNodeStatus(); + request1.setNodeId(nodeId1); request1.setHttpPort(0); request1.setResource(capability); + request1.setNodeStatus(mockNodeStatus); resourceTrackerService.registerNodeManager(request1); Assert.assertNotNull(context.getRMNodes().get(nodeId1)); // verify Scheduler and RMContext use same RMNode reference. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java index e67deb5245fbb..2860335d8ac7a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -49,6 +50,7 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; @@ -1051,9 +1053,12 @@ public void testNodemanagerReconnect() throws Exception { RegisterNodeManagerRequest request1 = recordFactory.newRecordInstance(RegisterNodeManagerRequest.class); NodeId nodeId1 = NodeId.newInstance(hostname1, 0); + NodeStatus mockNodeStatus = createMockNodeStatus(); + request1.setNodeId(nodeId1); request1.setHttpPort(0); request1.setResource(capability); + request1.setNodeStatus(mockNodeStatus); privateResourceTrackerService.registerNodeManager(request1); privateDispatcher.await(); Resource clusterResource = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java index 83a354de5a244..a75be7745fb88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java @@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.NodeManager; import org.apache.hadoop.yarn.server.resourcemanager.Application; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; @@ -43,6 +44,7 @@ import java.io.IOException; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.junit.Assume.assumeTrue; public class TestSchedulerHealth { @@ -170,11 +172,11 @@ public void testResourceUpdate() { } private NodeManager registerNode(String hostName, int containerManagerPort, - int httpPort, String rackName, Resource capability) throws IOException, - YarnException { + int httpPort, String rackName, Resource capability, NodeStatus nodeStatus) + throws IOException, YarnException { NodeManager nm = new NodeManager(hostName, containerManagerPort, httpPort, rackName, - capability, resourceManager); + capability, resourceManager, nodeStatus); NodeAddedSchedulerEvent nodeAddEvent1 = new NodeAddedSchedulerEvent(resourceManager.getRMContext().getRMNodes() .get(nm.getNodeId())); @@ -200,11 +202,13 @@ public void testCapacitySchedulerAllocation() throws Exception { assumeTrue("This test is only supported on Capacity Scheduler", isCapacityScheduler); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(5 * 1024, 1)); + Resources.createResource(5 * 1024, 1), mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); @@ -275,15 +279,17 @@ public void testCapacitySchedulerReservation() throws Exception { assumeTrue("This test is only supported on Capacity Scheduler", isCapacityScheduler); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register nodes String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(2 * 1024, 1)); + Resources.createResource(2 * 1024, 1), mockNodeStatus); String host_1 = "host_1"; NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(5 * 1024, 1)); + Resources.createResource(5 * 1024, 1), mockNodeStatus); nodeUpdate(nm_0); nodeUpdate(nm_1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index a746f06f2746c..1fe7a53107a38 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.assertj.core.api.Assertions.assertThat; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.MAXIMUM_ALLOCATION; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.MAXIMUM_ALLOCATION_MB; @@ -54,6 +55,7 @@ import com.google.common.collect.Sets; import org.apache.hadoop.service.ServiceStateException; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -242,9 +244,10 @@ public void tearDown() throws Exception { private NodeManager registerNode(ResourceManager rm, String hostName, int containerManagerPort, int httpPort, String rackName, - Resource capability) throws IOException, YarnException { + Resource capability, NodeStatus nodeStatus) + throws IOException, YarnException { NodeManager nm = new NodeManager(hostName, - containerManagerPort, httpPort, rackName, capability, rm); + containerManagerPort, httpPort, rackName, capability, rm, nodeStatus); NodeAddedSchedulerEvent nodeAddEvent1 = new NodeAddedSchedulerEvent(rm.getRMContext().getRMNodes() .get(nm.getNodeId())); @@ -286,11 +289,11 @@ public void testConfValidation() throws Exception { } private NodeManager registerNode(String hostName, int containerManagerPort, - int httpPort, String rackName, - Resource capability) - throws IOException, YarnException { + int httpPort, String rackName, + Resource capability, NodeStatus nodeStatus) + throws IOException, YarnException { NodeManager nm = new NodeManager(hostName, containerManagerPort, httpPort, - rackName, capability, resourceManager); + rackName, capability, resourceManager, nodeStatus); NodeAddedSchedulerEvent nodeAddEvent1 = new NodeAddedSchedulerEvent(resourceManager.getRMContext() .getRMNodes().get(nm.getNodeId())); @@ -303,17 +306,19 @@ public void testCapacityScheduler() throws Exception { LOG.info("--- START: testCapacityScheduler ---"); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(4 * GB, 1)); + Resources.createResource(4 * GB, 1), mockNodeStatus); // Register node2 String host_1 = "host_1"; NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(2 * GB, 1)); + Resources.createResource(2 * GB, 1), mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); @@ -443,11 +448,13 @@ protected RMNodeLabelsManager createNodeLabelManager() { when(mC.getConfigurationProvider()).thenReturn( new LocalConfigurationProvider()); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host0 = "host_0"; NodeManager nm0 = registerNode(rm, host0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(10 * GB, 10)); + Resources.createResource(10 * GB, 10), mockNodeStatus); // ResourceRequest priorities Priority priority0 = Priority.newInstance(0); @@ -545,11 +552,13 @@ protected RMNodeLabelsManager createNodeLabelManager() { when(mC.getConfigurationProvider()).thenReturn( new LocalConfigurationProvider()); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host0 = "host_0"; NodeManager nm0 = registerNode(rm, host0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(10 * GB, 10)); + Resources.createResource(10 * GB, 10), mockNodeStatus); // ResourceRequest priorities Priority priority0 = Priority.newInstance(0); @@ -2097,17 +2106,20 @@ public void testMoveAppSameParent() throws Exception { public void testMoveAppForMoveToQueueWithFreeCap() throws Exception { ResourceScheduler scheduler = resourceManager.getResourceScheduler(); + + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(4 * GB, 1)); + Resources.createResource(4 * GB, 1), mockNodeStatus); // Register node2 String host_1 = "host_1"; NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(2 * GB, 1)); + Resources.createResource(2 * GB, 1), mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); @@ -2213,17 +2225,19 @@ public void testMoveAppSuccess() throws Exception { ResourceScheduler scheduler = resourceManager.getResourceScheduler(); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(5 * GB, 1)); + Resources.createResource(5 * GB, 1), mockNodeStatus); // Register node2 String host_1 = "host_1"; NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(5 * GB, 1)); + Resources.createResource(5 * GB, 1), mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); @@ -2335,11 +2349,13 @@ protected RMNodeLabelsManager createNodeLabelManager() { ResourceScheduler scheduler = resourceManager.getResourceScheduler(); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(6 * GB, 1)); + Resources.createResource(6 * GB, 1), mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); @@ -2383,17 +2399,19 @@ protected RMNodeLabelsManager createNodeLabelManager() { public void testMoveAppQueueMetricsCheck() throws Exception { ResourceScheduler scheduler = resourceManager.getResourceScheduler(); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(5 * GB, 1)); + Resources.createResource(5 * GB, 1), mockNodeStatus); // Register node2 String host_1 = "host_1"; NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(5 * GB, 1)); + Resources.createResource(5 * GB, 1), mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); @@ -4594,9 +4612,12 @@ private void sentRMContainerLaunched(MockRM rm, ContainerId containerId) { } @Test public void testRemovedNodeDecomissioningNode() throws Exception { + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register nodemanager NodeManager nm = registerNode("host_decom", 1234, 2345, - NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4), + mockNodeStatus); RMNode node = resourceManager.getRMContext().getRMNodes().get(nm.getNodeId()); @@ -4639,10 +4660,14 @@ public void handle(Event event) { ((CapacityScheduler) resourceManager.getResourceScheduler()) .setRMContext(spyContext); ((AsyncDispatcher) mockDispatcher).start(); + + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, - NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4), + mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 2e043fb048128..05ec09e66741f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -50,6 +50,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.YarnAuthorizationProvider; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; @@ -124,6 +125,7 @@ import java.util.Set; import java.util.stream.Collectors; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.assertj.core.api.Assertions.assertThat; import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES; import static org.junit.Assert.assertEquals; @@ -4862,9 +4864,12 @@ public void testUserAsDefaultQueueWithLeadingTrailingSpaceUserName() @Test public void testRemovedNodeDecomissioningNode() throws Exception { + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register nodemanager NodeManager nm = registerNode("host_decom", 1234, 2345, - NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4), + mockNodeStatus); RMNode node = resourceManager.getRMContext().getRMNodes().get(nm.getNodeId()); @@ -4907,10 +4912,14 @@ public void handle(Event event) { ((FairScheduler) resourceManager.getResourceScheduler()) .setRMContext(spyContext); ((AsyncDispatcher) mockDispatcher).start(); + + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, - NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4), + mockNodeStatus); RMNode node = resourceManager.getRMContext().getRMNodes().get(nm_0.getNodeId()); @@ -4949,11 +4958,13 @@ public void handle(Event event) { } private NodeManager registerNode(String hostName, int containerManagerPort, - int httpPort, String rackName, - Resource capability) + int httpPort, String rackName, + Resource capability, NodeStatus nodeStatus) throws IOException, YarnException { + NodeStatus mockNodeStatus = createMockNodeStatus(); + NodeManager nm = new NodeManager(hostName, containerManagerPort, httpPort, - rackName, capability, resourceManager); + rackName, capability, resourceManager, mockNodeStatus); // after YARN-5375, scheduler event is processed in rm main dispatcher, // wait it processed, or may lead dead lock diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 01fb6a79b4a15..9b3657e00d5a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -33,6 +34,7 @@ import java.util.Map; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -143,10 +145,10 @@ public void tearDown() throws Exception { private NodeManager registerNode(String hostName, int containerManagerPort, int nmHttpPort, String rackName, - Resource capability) + Resource capability, NodeStatus nodeStatus) throws IOException, YarnException { NodeManager nm = new NodeManager(hostName, containerManagerPort, - nmHttpPort, rackName, capability, resourceManager); + nmHttpPort, rackName, capability, resourceManager, nodeStatus); NodeAddedSchedulerEvent nodeAddEvent1 = new NodeAddedSchedulerEvent(resourceManager.getRMContext().getRMNodes() .get(nm.getNodeId())); @@ -406,19 +408,21 @@ public void testFifoScheduler() throws Exception { LOG.info("--- START: testFifoScheduler ---"); final int GB = 1024; - + + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(4 * GB, 1)); + Resources.createResource(4 * GB, 1), mockNodeStatus); nm_0.heartbeat(); // Register node2 String host_1 = "host_1"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(2 * GB, 1)); + Resources.createResource(2 * GB, 1), mockNodeStatus); nm_1.heartbeat(); // ResourceRequest priorities @@ -1197,9 +1201,12 @@ public void testResourceOverCommit() throws Exception { @Test public void testRemovedNodeDecomissioningNode() throws Exception { + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register nodemanager NodeManager nm = registerNode("host_decom", 1234, 2345, - NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4), + mockNodeStatus); RMNode node = resourceManager.getRMContext().getRMNodes().get(nm.getNodeId()); @@ -1242,10 +1249,14 @@ public void handle(Event event) { ((FifoScheduler) resourceManager.getResourceScheduler()) .setRMContext(spyContext); ((AsyncDispatcher) mockDispatcher).start(); + + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, - NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4), + mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java index c3f41f62f6f4c..dc028fe743cb3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.assertj.core.api.Assertions.assertThat; import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.assertResponseStatusCode; import static org.junit.Assert.assertEquals; @@ -241,8 +242,10 @@ private RMNode getRunningRMNode(String host, int port, int memory) { } private void sendStartedEvent(RMNode node) { + NodeStatus mockNodeStatus = createMockNodeStatus(); ((RMNodeImpl) node) - .handle(new RMNodeStartedEvent(node.getNodeID(), null, null)); + .handle(new RMNodeStartedEvent(node.getNodeID(), null, null, + mockNodeStatus)); } private void sendLostEvent(RMNode node) { From 9ac498e30057de1291c3e3128bceaa1af9547c67 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Wed, 1 Jul 2020 12:30:10 +0800 Subject: [PATCH 069/131] HDFS-15416. Improve DataStorage#addStorageLocations() for empty locations. Contibuted by jianghua zhu. --- .../hdfs/server/datanode/DataStorage.java | 5 ++++ .../hdfs/server/datanode/TestDataStorage.java | 28 +++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 2447fd7137236..b7faecb1ad599 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -388,6 +388,11 @@ synchronized List addStorageLocations(DataNode datanode, try { final List successLocations = loadDataStorage( datanode, nsInfo, dataDirs, startOpt, executor); + + if (successLocations.isEmpty()) { + return Lists.newArrayList(); + } + return loadBlockPoolSliceStorage( datanode, nsInfo, successLocations, startOpt, executor); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java index 6c494519672b1..f82462a384f39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java @@ -44,6 +44,7 @@ public class TestDataStorage { private final static String DEFAULT_BPID = "bp-0"; private final static String CLUSTER_ID = "cluster0"; + private final static String CLUSTER_ID2 = "cluster1"; private final static String BUILD_VERSION = "2.0"; private final static String SOFTWARE_VERSION = "2.0"; private final static long CTIME = 1; @@ -165,6 +166,33 @@ public void testAddStorageDirectories() throws IOException, assertEquals(6, storage.getNumStorageDirs()); } + @Test + public void testAddStorageDirectoriesFailure() throws IOException { + final int numLocations = 1; + List locations = createStorageLocations(numLocations); + assertEquals(numLocations, locations.size()); + + NamespaceInfo namespaceInfo = new NamespaceInfo(0, CLUSTER_ID, + DEFAULT_BPID, CTIME, BUILD_VERSION, SOFTWARE_VERSION); + List successLocations = storage.addStorageLocations( + mockDN, namespaceInfo, locations, START_OPT); + assertEquals(1, successLocations.size()); + + // After the DataNode restarts, the value of the clusterId is different + // from the value before the restart. + storage.unlockAll(); + DataNode newMockDN = Mockito.mock(DataNode.class); + Mockito.when(newMockDN.getConf()).thenReturn(new HdfsConfiguration()); + DataStorage newStorage = new DataStorage(); + NamespaceInfo newNamespaceInfo = new NamespaceInfo(0, CLUSTER_ID2, + DEFAULT_BPID, CTIME, BUILD_VERSION, SOFTWARE_VERSION); + successLocations = newStorage.addStorageLocations( + newMockDN, newNamespaceInfo, locations, START_OPT); + assertEquals(0, successLocations.size()); + newStorage.unlockAll(); + newMockDN.shutdown(); + } + @Test public void testMissingVersion() throws IOException, URISyntaxException { From de2cb8626016f22b388da7796082b2e160059cf6 Mon Sep 17 00:00:00 2001 From: Yiqun Lin Date: Wed, 1 Jul 2020 14:06:27 +0800 Subject: [PATCH 070/131] HDFS-15410. Add separated config file hdfs-fedbalance-default.xml for fedbalance tool. Contributed by Jinglun. --- .../hadoop/tools/fedbalance/FedBalance.java | 48 ++++++++++++------- .../tools/fedbalance/FedBalanceConfigs.java | 17 ++++--- ...nceOptions.java => FedBalanceOptions.java} | 16 +++---- .../procedure/BalanceProcedureScheduler.java | 7 +-- .../resources/hdfs-fedbalance-default.xml | 41 ++++++++++++++++ .../tools/fedbalance/TestFedBalance.java | 34 +++++++++++++ 6 files changed, 125 insertions(+), 38 deletions(-) rename hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/{DistCpBalanceOptions.java => FedBalanceOptions.java} (86%) create mode 100644 hadoop-tools/hadoop-federation-balance/src/main/resources/hdfs-fedbalance-default.xml create mode 100644 hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestFedBalance.java diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalance.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalance.java index adfb40bf74b38..8252957be4db5 100644 --- a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalance.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalance.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.tools.fedbalance; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; @@ -25,7 +26,6 @@ import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.tools.fedbalance.procedure.BalanceProcedure; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; @@ -34,7 +34,6 @@ import org.apache.hadoop.tools.fedbalance.procedure.BalanceJob; import org.apache.hadoop.tools.fedbalance.procedure.BalanceProcedureScheduler; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.slf4j.Logger; @@ -45,14 +44,13 @@ import java.util.Collection; import java.util.concurrent.TimeUnit; -import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.ROUTER; -import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.FORCE_CLOSE_OPEN; -import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.MAP; -import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.BANDWIDTH; -import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.TRASH; -import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.DELAY_DURATION; -import static org.apache.hadoop.tools.fedbalance.DistCpBalanceOptions.CLI_OPTIONS; -import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.FEDERATION_BALANCE_CLASS; +import static org.apache.hadoop.tools.fedbalance.FedBalanceOptions.ROUTER; +import static org.apache.hadoop.tools.fedbalance.FedBalanceOptions.FORCE_CLOSE_OPEN; +import static org.apache.hadoop.tools.fedbalance.FedBalanceOptions.MAP; +import static org.apache.hadoop.tools.fedbalance.FedBalanceOptions.BANDWIDTH; +import static org.apache.hadoop.tools.fedbalance.FedBalanceOptions.TRASH; +import static org.apache.hadoop.tools.fedbalance.FedBalanceOptions.DELAY_DURATION; +import static org.apache.hadoop.tools.fedbalance.FedBalanceOptions.CLI_OPTIONS; import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.TrashOption; /** @@ -73,6 +71,10 @@ public class FedBalance extends Configured implements Tool { private static final String MOUNT_TABLE_PROCEDURE = "mount-table-procedure"; private static final String TRASH_PROCEDURE = "trash-procedure"; + private static final String FED_BALANCE_DEFAULT_XML = + "hdfs-fedbalance-default.xml"; + private static final String FED_BALANCE_SITE_XML = "hdfs-fedbalance-site.xml"; + /** * This class helps building the balance job. */ @@ -210,7 +212,7 @@ public FedBalance() { public int run(String[] args) throws Exception { CommandLineParser parser = new GnuParser(); CommandLine command = - parser.parse(DistCpBalanceOptions.CLI_OPTIONS, args, true); + parser.parse(FedBalanceOptions.CLI_OPTIONS, args, true); String[] leftOverArgs = command.getArgs(); if (leftOverArgs == null || leftOverArgs.length < 1) { printUsage(); @@ -355,19 +357,33 @@ private void printUsage() { CLI_OPTIONS); } + /** + * Loads properties from hdfs-fedbalance-default.xml into configuration + * object. + * + * @return Configuration which includes properties from + * hdfs-fedbalance-default.xml and hdfs-fedbalance-site.xml + */ + @VisibleForTesting + static Configuration getDefaultConf() { + Configuration config = new Configuration(); + config.addResource(FED_BALANCE_DEFAULT_XML); + config.addResource(FED_BALANCE_SITE_XML); + return config; + } + /** * Main function of the FedBalance program. Parses the input arguments and * invokes the FedBalance::run() method, via the ToolRunner. * @param argv Command-line arguments sent to FedBalance. */ public static void main(String[] argv) { - Configuration conf = new HdfsConfiguration(); - Class balanceClazz = (Class) conf - .getClass(FEDERATION_BALANCE_CLASS, FedBalance.class); - Tool balancer = ReflectionUtils.newInstance(balanceClazz, conf); + Configuration conf = getDefaultConf(); + FedBalance fedBalance = new FedBalance(); + fedBalance.setConf(conf); int exitCode; try { - exitCode = ToolRunner.run(balancer, argv); + exitCode = ToolRunner.run(fedBalance, argv); } catch (Exception e) { LOG.warn("Couldn't complete FedBalance operation.", e); exitCode = -1; diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceConfigs.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceConfigs.java index 952aef20d9048..efe906bbc7801 100644 --- a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceConfigs.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceConfigs.java @@ -24,9 +24,7 @@ */ @InterfaceAudience.Private public final class FedBalanceConfigs { - /* The class used for federation balance */ - public static final String FEDERATION_BALANCE_CLASS = - "federation.balance.class"; + public static final String LAST_SNAPSHOT_NAME = "DISTCP-BALANCE-CURRENT"; public static final String CURRENT_SNAPSHOT_NAME = "DISTCP-BALANCE-NEXT"; /* Specify the behaviour of trash. */ @@ -34,17 +32,18 @@ public enum TrashOption { TRASH, DELETE, SKIP } - /* The worker threads number of the BalanceProcedureScheduler */ + /* The worker threads number of the BalanceProcedureScheduler. + BalanceProcedureScheduler is responsible for scheduling a balance job, + including submit, run, delay and recover. */ public static final String WORK_THREAD_NUM = - "hadoop.hdfs.procedure.work.thread.num"; + "hdfs.fedbalance.procedure.work.thread.num"; public static final int WORK_THREAD_NUM_DEFAULT = 10; - /* The uri of the journal */ + /* The uri of the journal, the journal file is used for handling the job + persistence and recover. */ public static final String SCHEDULER_JOURNAL_URI = - "hadoop.hdfs.procedure.scheduler.journal.uri"; + "hdfs.fedbalance.procedure.scheduler.journal.uri"; public static final String JOB_PREFIX = "JOB-"; public static final String TMP_TAIL = ".tmp"; - public static final String JOURNAL_CLASS = - "hadoop.hdfs.procedure.journal.class"; private FedBalanceConfigs(){} } diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/DistCpBalanceOptions.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceOptions.java similarity index 86% rename from hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/DistCpBalanceOptions.java rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceOptions.java index 704ffd9dccf26..71a7d9db00b0e 100644 --- a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/DistCpBalanceOptions.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceOptions.java @@ -23,23 +23,23 @@ /** * Command line options of FedBalance. */ -public final class DistCpBalanceOptions { +public final class FedBalanceOptions { /** * The private construct protects this class from being instantiated. */ - private DistCpBalanceOptions() {} + private FedBalanceOptions() {} /** * Run in router-based federation mode. */ final static Option ROUTER = new Option("router", false, - "If `true` the command runs in router mode. The source path is " - + "taken as a mount point. It will disable write by setting the mount" - + " point readonly. Otherwise the command works in normal federation" - + " mode. The source path is taken as the full path. It will disable" - + " write by cancelling all permissions of the source path. The" - + " default value is `true`."); + "If this option is set then the command runs in router mode." + + " The source path is taken as a mount point. It will disable write" + + " by setting the mount point readonly. Otherwise the command works" + + " in normal federation mode. The source path is taken as the full" + + " path. It will disable write by cancelling all permissions of the" + + " source path."); /** * If true, in DIFF_DISTCP stage it will force close all open files when diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java index a27db10673396..d7e1c7cca5967 100644 --- a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java @@ -35,14 +35,12 @@ import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.WORK_THREAD_NUM; import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.WORK_THREAD_NUM_DEFAULT; -import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.JOURNAL_CLASS; /** *
  * The state machine framework consist of:
@@ -115,9 +113,8 @@ public synchronized void init(boolean recoverJobs) throws IOException {
     this.readerThread.start();
 
     // init journal.
-    Class clazz = (Class) conf
-        .getClass(JOURNAL_CLASS, BalanceJournalInfoHDFS.class);
-    journal = ReflectionUtils.newInstance(clazz, conf);
+    journal = new BalanceJournalInfoHDFS();
+    journal.setConf(conf);
 
     if (recoverJobs) {
       recoverAllJobs();
diff --git a/hadoop-tools/hadoop-federation-balance/src/main/resources/hdfs-fedbalance-default.xml b/hadoop-tools/hadoop-federation-balance/src/main/resources/hdfs-fedbalance-default.xml
new file mode 100644
index 0000000000000..d769832273378
--- /dev/null
+++ b/hadoop-tools/hadoop-federation-balance/src/main/resources/hdfs-fedbalance-default.xml
@@ -0,0 +1,41 @@
+
+
+
+
+
+
+
+
+    
+        hdfs.fedbalance.procedure.scheduler.journal.uri
+        hdfs://localhost:8020/tmp/procedure
+        
+            The uri of the journal, the journal file is used for handling the
+            job persistence and recover.
+        
+    
+
+    
+        hdfs.fedbalance.procedure.work.thread.num
+        10
+        
+            The worker threads number of the BalanceProcedureScheduler.
+            BalanceProcedureScheduler is responsible for scheduling a balance
+            job, including submit, run, delay and recover.
+        
+    
+
+
diff --git a/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestFedBalance.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestFedBalance.java
new file mode 100644
index 0000000000000..0d29e6ffad933
--- /dev/null
+++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestFedBalance.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tools.fedbalance;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import static junit.framework.TestCase.assertNotNull;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.WORK_THREAD_NUM;
+
+public class TestFedBalance {
+  @Test
+  public void testLoadFedBalanceDefaultConf() {
+    Configuration conf = FedBalance.getDefaultConf();
+    assertNotNull(conf.get(SCHEDULER_JOURNAL_URI));
+    assertNotNull(conf.get(WORK_THREAD_NUM));
+  }
+}
\ No newline at end of file

From ff8bb672000980f3de7391e5d268e789d5cbe974 Mon Sep 17 00:00:00 2001
From: Yiqun Lin 
Date: Wed, 1 Jul 2020 14:18:18 +0800
Subject: [PATCH 071/131] HDFS-15374. Add documentation for fedbalance tool.
 Contributed by Jinglun.

---
 hadoop-project/src/site/site.xml              |   1 +
 .../site/markdown/HDFSFederationBalance.md    | 171 ++++++++++++++++++
 .../src/site/resources/css/site.css           |  30 +++
 .../images/BalanceProcedureScheduler.png      | Bin 0 -> 48275 bytes
 4 files changed, 202 insertions(+)
 create mode 100644 hadoop-tools/hadoop-federation-balance/src/site/markdown/HDFSFederationBalance.md
 create mode 100644 hadoop-tools/hadoop-federation-balance/src/site/resources/css/site.css
 create mode 100644 hadoop-tools/hadoop-federation-balance/src/site/resources/images/BalanceProcedureScheduler.png

diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index c3c0f19319f11..4c9d356e3e95b 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -198,6 +198,7 @@
       
       
       
+      
       
       
       
diff --git a/hadoop-tools/hadoop-federation-balance/src/site/markdown/HDFSFederationBalance.md b/hadoop-tools/hadoop-federation-balance/src/site/markdown/HDFSFederationBalance.md
new file mode 100644
index 0000000000000..ff42eaf552e2b
--- /dev/null
+++ b/hadoop-tools/hadoop-federation-balance/src/site/markdown/HDFSFederationBalance.md
@@ -0,0 +1,171 @@
+
+
+HDFS Federation Balance Guide
+=====================
+
+
+
+Overview
+--------
+
+  HDFS Federation Balance is a tool balancing data across different federation
+  namespaces. It uses [DistCp](../hadoop-distcp/DistCp.html) to copy data from
+  the source path to the target path. First it creates a snapshot at the source
+  path and submits the initial distcp. Second it uses distcp diff to do the
+  incremental copy until the source and the target are the same. Then If it's
+  working in RBF mode it updates the mount table in Router. Finally it moves the
+  source to trash.
+
+  This document aims to describe the usage and design of the HDFS Federation
+  Balance.
+
+Usage
+-----
+
+### Basic Usage
+
+  The hdfs federation balance tool supports both normal federation cluster and
+  router-based federation cluster. Taking rbf for example. Supposing we have a
+  mount entry in Router:
+
+    Source       Destination
+    /foo/src     hdfs://namespace-0/foo/src
+
+  The command below runs an hdfs federation balance job. The first parameter is
+  the mount entry. The second one is the target path which must include the
+  target cluster. The option `-router` indicates this is in router-based
+  federation mode.
+
+    bash$ /bin/hadoop fedbalance -router submit /foo/src hdfs://namespace-1/foo/dst
+
+  It copies data from hdfs://namespace-0/foo/src to hdfs://namespace-1/foo/dst
+  incrementally and finally updates the mount entry to:
+
+    Source       Destination
+    /foo/src     hdfs://namespace-1/foo/dst
+
+  If the hadoop shell process exits unexpectedly, we can use the command below
+  to continue the unfinished job:
+
+    bash$ /bin/hadoop fedbalance continue
+
+  This will scan the journal to find all the unfinished jobs, recover and
+  continue to execute them.
+
+  If we want to balance in a normal federation cluster, use the command below.
+
+    bash$ /bin/hadoop fedbalance submit hdfs://namespace-0/foo/src hdfs://namespace-1/foo/dst
+
+  In normal federation mode the source path must includes the path schema.
+
+### RBF Mode And Normal Federation Mode
+
+  The hdfs federation balance tool has 2 modes:
+
+  * the router-based federation mode (RBF mode).
+  * the normal federation mode.
+
+  By default the command runs in the normal federation mode. You can specify the
+  rbf mode by using the option `-router`.
+
+  In the rbf mode the first parameter is taken as the mount point. It disables
+  write by setting the mount point readonly.
+
+  In the normal federation mode the first parameter is taken as the full path of
+  the source. The first parameter must include the source cluster. It disables
+  write by cancelling all the permissions of the source path.
+
+  Details about disabling write see [HDFS FedBalance](#HDFS_FedBalance).
+
+### Command Options
+
+Command `submit` has 5 options:
+
+| Option key                     | Description                          | Default |
+| ------------------------------ | ------------------------------------ | ------- |
+| -router | Run in router-based federation mode. | Normal federation mode. |
+| -forceCloseOpen | Force close all open files when there is no diff in the DIFF_DISTCP stage. | Wait until there is no open files. |
+| -map | Max number of concurrent maps to use for copy. | 10 |
+| -bandwidth | Specify bandwidth per map in MB. | 10 |
+| -delay | Specify the delayed duration(millie seconds) when the job needs to retry. | 1000 |
+| -moveToTrash | This options has 3 values: `trash` (move the source path to trash), `delete` (delete the source path directly) and `skip` (skip both trash and deletion). By default the server side trash interval is used. If the trash is disabled in the server side, the default trash interval 60 minutes is used. | trash |
+
+### Configuration Options
+--------------------
+
+Set configuration options at fedbalance-site.xml.
+
+| Configuration key              | Description                          | Default |
+| ------------------------------ | ------------------------------------ | ------- |
+| hdfs.fedbalance.procedure.work.thread.num | The worker threads number of the BalanceProcedureScheduler. BalanceProcedureScheduler is responsible for scheduling a balance job, including submit, run, delay and recover. | 10 |
+| hdfs.fedbalance.procedure.scheduler.journal.uri | The uri of the journal, the journal file is used for handling the job persistence and recover. | hdfs://localhost:8020/tmp/procedure |
+
+Architecture of HDFS Federation Balance
+----------------------
+
+  The components of the HDFS Federation Balance can be classified into the
+  following categories:
+
+  * Balance Procedure Scheduler
+  * HDFS FedBalance
+
+### Balance Procedure Scheduler
+
+  The Balance Procedure Scheduler implements a state machine. It's responsible
+  for scheduling a balance job, including submit, run, delay and recover.
+  The model is showed below:
+
+  ![Balance Procedure Scheduler](images/BalanceProcedureScheduler.png)
+
+  * After a job is submitted, the job is added to the pendingQueue.
+  * The worker threads take jobs and run them. Journals are written to storage.
+  * If writing the journal fails, the job is added to the recoverQueue for later
+    recovery. If Worker thread catches a RetryTaskException, it adds the job to
+    the delayQueue.
+  * Rooster thread takes job from delayQueue and adds it back to pendingQueue.
+  * When a scheduler starts, it scans all the unfinished jobs from the journal
+    and add them to the recoverQueue. The recover thread will recover them from
+    the journal and add them back to the pendingQueue.
+
+### HDFS FedBalance
+
+  HDFS FedBalance is implemented as a job of the state machine. All the distcp
+  balance logic are implemented here. An HDFS FedBalance job consists of 3
+  procedures:
+
+  * DistCpProcedure: This is the first procedure. It handles all the data copy
+    works. There are 6 stages:
+    * PRE_CHECK: Do the pre-check of the src and dst path.
+    * INIT_DISTCP: Create a snapshot of the source path and distcp it to the
+      target.
+    * DIFF_DISTCP: Submit distcp with `-diff` round by round to sync source and
+      target paths. If `-forceCloseOpen` is set, this stage will finish when
+      there is no diff between src and dst. Otherwise this stage only finishes
+      when there is no diff and no open files.
+    * DISABLE_WRITE: Disable write operations so the src won't be changed. When
+      working in router mode, it is done by making the mount point readonly.
+      In normal federation mode it is done by cancelling all the permissions of
+      the source path.
+    * FINAL_DISTCP: Force close all the open files and submit the final distcp.
+    * FINISH: Do the cleanup works. In normal federation mode the finish stage
+      also restores the permission of the dst path.
+
+  * MountTableProcedure: This procedure updates the mount entry in Router. The
+    readonly is unset and the destination is updated of the mount point. This
+    procedure is activated only when option `-router`.
+
+  * TrashProcedure: This procedure moves the source path to trash.
+
+  After all 3 procedures finish, the balance job is done.
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-federation-balance/src/site/resources/css/site.css b/hadoop-tools/hadoop-federation-balance/src/site/resources/css/site.css
new file mode 100644
index 0000000000000..f830baafa8cc8
--- /dev/null
+++ b/hadoop-tools/hadoop-federation-balance/src/site/resources/css/site.css
@@ -0,0 +1,30 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#banner {
+  height: 93px;
+  background: none;
+}
+
+#bannerLeft img {
+  margin-left: 30px;
+  margin-top: 10px;
+}
+
+#bannerRight img {
+  margin: 17px;
+}
+
diff --git a/hadoop-tools/hadoop-federation-balance/src/site/resources/images/BalanceProcedureScheduler.png b/hadoop-tools/hadoop-federation-balance/src/site/resources/images/BalanceProcedureScheduler.png
new file mode 100644
index 0000000000000000000000000000000000000000..0a070027665dc3addbac7b8b45fdedff3078523d
GIT binary patch
literal 48275
zcmZ^KXIxWF6E7Vp(tB5WCxjA;^qLSLA#|ieXo1iPB1J`-^o~gHB1J@`BOR$CN>z#o
zs3=uH>ODNq^ZxGraz7+FXV0G5nc3Od*_nUhj16^flCzQH;o;rX*V97c;So&W;o-C1
zAOTupR7(Z$@W|Q%v@HTKA+GLTE_nQ4&3~`>rKQ|_`~&#GTKv+|4p^*&v%3S@&jI5v
z;q4LtGy%^sKF;p0?k>*%_K}v7mJ*ke5to)jfx!G=b!mCv3oI)kC8uEZZ+{20i}!y7
zfo6b!kOc_D57q##4BWi~LxA59h@73Aj4*Jej>USpn7cT_+=0<_WWn+hV7cp8V0tLH
z0l%~+aP8&p=>q)dx;T0IT)(2}=I4U}T67>%5>gUi1&F)^NJas8VB+BF;OGATkh$jG
zCBOmwALphBgdxHu#M2QH;Ad{`Wrslq{>yQYi=V%{kN3abKwy9>=-&qcIIPRRt!N(~
zv==a_G(T7yP~Lx90_eN{r;d
z4{5Nryjg&svz)$+uCWZr)i+pAQ^Qpj2J>+C4-E{JmXebP6sI8zeB47c%?vDkPzVJJ
zSp_8A6JhG14roOK<^>17gRn-H<`x)uP^huDHx3F2uOW@r3UCIS!)3g|I3rgL6KO-Z
znQMTlW)RjK3Yr$dI7

ITohsuX|nVIVtW3lQMRz8k88peJ|IK~NJT?c6u?1VHyD(L#kIeF8|PpS*77m3@&KzF7{LRe2tzHDo}Mq%)zn)7B`t-5qrq5j zQ=cF=kPiHs8DCd7tfz(xM!~~d#{;-`M`HpsP}blT~DaA4%ose5bdL< zhtUo)@YmOtN5L@8?)vf|Z7C$eB*+1&Ck-;tw*=Vr0nA$iVW{VT#9*x)13*}RDHzII z(>cHl9gI`=kq!;?bF!2Q_Sc1k=pxjEy>UiXezL*lp%7h|p=F4hv^Pu(=A+=L9;k1K z0a-YLgADbJJf+<|-F+=|Je@+_F#e&I##oq~lcuSgvpNV`T2;<7ed(EN>RE-3Gr}8V7x+%9DU3k)qO3b0{yVE3eM7o&PcOBke8kVECB4J z8|-Lmg>&`>Bb-qvPY@al!dQ4ColQ{~h!t8v4h=8>NP@neXdfxpAa7~M>#HDFv@FaY zBZV~b#>hCjnS1D%8~OR`%i&A|+yJZc3p9WMj=&6LY-thz7^Q}(ysj_G%PB}!$65B; z1OqkIWneyX7EW?#H%~Ye<%9~*gz7*c`oX$zh=~@+(A&`w=8r?@_{hL@gK;6UaCHR* zU5u|M!dTWH6KsfvgS?@RNFP~CgqD|{kCl$Mg`SqPE=CFhDAN*W z=7EEUA`OE408umoTy^E-<)K>A9=>w2?v@@HuoKeH7-Z@aYGA4BrjNxs$b#IBoppQy zv^BgTt}+@L(!K!4QocBICchfl5&uR8yY)!%3FHkTtHH2 zD=8dE+R4Qmt7QU~cZB))>SzY|!c5^{S*)K41mFOmCId%@U_cNZ6HAOH?ApZv&Dv5X z7G44J>H$IMpkNu49u(`P0YR8bx#DEya8kk05R@qfgOPJKkO_e4m}1^#=w|5 z0SF&Apu2?>U;tivjtbglZb+D%fw8oLiwxl6HA0bKIX@o@?`tD4*VclW`asc^2n%0* zh#Aye9d_-Y+)Y41hQ4MnDKnUhluxjNt3FCy-Xy@;Tt-3G(Zs~vH3*5+ax?dqu{1S@ zqU1d>NNsPVyDwBq#@!*%)KSVR&;T3)0spH32*gAx*jd^;1mx)&WGLh66o|#a9Hp@s zBnA=`Anye7(M9W9g?i{(n7O(cYJyzQ&U#*&A;I1zLAtUSSuZz`nU=hPW{5vX!QI6| z77=JAW#xc?IOqU2t6>5TMrj92V-U_p0mcEw!7`eDZYT{6A6cA^zC6-J!3h1%hse7qgdmI|dO%nKu=c;m;-4@Bxc@Iq18d)>8H&Ked*)Yv)qLFcrfyvO7OT0=umHm?)a{$@M!)tV?o56! z+nP~$c0aUW_Br$SFju&{zxGa*65Pc*kMVvzb(21-S zlEQov<4_4>A*P}lZfqj-_(b&Tm9q;j%}zH|k%kt(ITBCkb9>C**P*XFciZ{8ss16O z#%RAXW=r%xKH3NhRU>p}3&S;X>LjMmj_a2i#(v67e<%*zyDgTU?t3uJpOc%bf3Y-2 zkVB9O$0wP{{MA1np8F3Alp<3-U$}O}#3EJhJ7WW-U-KCm%}`ShS=-sMTC-SFnW*gn zqt%|E$AX3#?8&8wovrX4Cm~!kkpaxixwe&J;)bIi8J)Jbb1<4&RM9WWkwQH^tw;{= zBHzDSf)fvG_Xr#v4cm6+&4Y;)N)7|6B<+&2ySlb}i+I`^(YXW~B0W8pdia*X|4CH! zhG5jZwr3DMnF4k*9w4nQyYo?5xuMDhR%_c_lP1#hdYmDpgMc!hzh2>KDliq-e33I2 zeQ^~O>=}IQe!Gf5UA=1O*LcUogCBzhX!0Ayc!cO%{Z4oPkr~fK4MJSAwZ)UJWgzPR zt~DN-Q$RJ&D4LR*XZ4=eLKWabO^uiZLLtO*FaJO4T9uJ$X><)fgtz7#h{6BNFa2W|MQNgdD8tN=QB5fXFMk{0zhwX(=h$w$|0fzvF~D{-L;C;s zxf*_b?2Q0HmnNJH$pk^HJszZ9aNhW)el~ThN<_3YKA!wYNzu{m z*MjyWz{q}}GIp^;|0|<$WR}3)>R(!(N^;p8PSl?;}w&&jy=gr5bSH26uYn7i^o81cXtV zF_6i<#iMQf?O#!Hn3G?(E;m^q33g6$geFBffEMQe(08X$t2631_t~*QBv#F9VykP~Jh$*q{-Q6B ztc>xtKU@1^1upb)z6g1ei4wDn#32SD6Tmra6FdB)ZNECa)(qzUzF;p@Ie8*iwrf4l z_tmn`{R9(STxw8qDQ^z+J-!8-774YI_ocf2mtJ3WdfulJ&USU-Mg8nhHq^>fc5)=x z{&F+tDPb}fzSAIH@J=}4W+dLqjIjNlqZ&N%;Eb9ZkCCwm#$03~IHBynf!7&|KU+`u zbC$4t&hn8WAuxj@LyEu*ngTOe6yLb{?+h|92pu2`;>o>2dVEbyO@(W-u7n2&CU(By z4<-5Do>WlinJ({n>C*EW(^F%rX2v8Hq0->ZsV03tMU{7wg`M#DH~#UCzsl8_505Nk zj#4(^kj?YQnT!oiq`RY%k-4ncQ<;=3k|*5MWtD`UpYXu0cx8^ab1OAT?RV{&d^fI| z?Tw^~oy`DUN#B$4asXD9nu|%%7SdMu{nsZQ9o)BvY^Yy$Rh8z@G1!W&7}$q=QC>M- zj(P)G*??26*IB&9pRW??`TXqQ??cj_gU$TeTKHyAj`--8CC`+iHpUfHHbhJ#x$Z!Cyqx;*(T<}m ziVSc3h)?#T>BTo*TQ)(A;bPoG8LgLI1QY8exAsIOt@`8S(&g^Sp{X1{tn|w7A?e=; ztaLb&3Fe6>4fa@a)Vu`c5-9((HBtTJ?RZFt<0ZZU!`D53Ze7!?k&(UM-@xBUDJ+NW z@+IVHE!FSG%}E)q&ke;8cBQ7~yEQ!H6kOZhB&D$s5LbG7aL;i75}I*fFqD*EPm6us zfOmKZ2}cw3zkAoS|E*`HI=}w@vNL6VXGh&_RXegf_l_7waWB<#3F7MjN2#sq@cJK5 z?nSPZ3dJpn;FcMQRs)5T=R{G?E69R5En2L7W7NUsWWE>+(WAKRTs&{b+d~gh^T%2_ zs_G{GHeFpLe~l7AIQaNXxV$B%fH20dIHDj}K~G>eSQY4a$_FN&5n8ou z^t4e%`GrTyQrgTPx3Ip1*7(MGkHjauE6z zU^7jZeTXMNQbSAEi@!OTs33RK@pH;8?3X4qHP=$-1Nt-b-YZj@C1vlo;}?pQH*c

vC;?f|65_VQ0CUd6C1* zejWj=*6n;^Yce1L;Nd5J*O@WYXLV~bd%ukD5A}z5#(Wo-^fj9pJTCN*A!p-C_N|lZ zV27HH-as7gtmhX;)YfzC_BviRZK<1#RiZPUaQVgYazu{257&dZU&MU+`bHZ2tg)%kp z4ZasAo7OGf1JBLt-3q(zpCLs@N3$hUb93WTQfSsUHhMpQ=4|%(NYL2!gi|RiN%N9W z?eajaWQQ`JagROWxWJh!|IVE|jXqcSOWncLWP84uKLh5xa|yykihS&2lc=b26mfK? zhu{0qk^^D2C*>VQJ7GiG+G<^+RtQ$fZ~AA4I}v{$^hHHMUb#>V365C}Fs#t$o7bHa z3j1<51b(*Z+K4-)BW>}QU-=PE_#U)}4f3rBqN?L=GpRJ3xvd@l*`)?D^ zl$>U&5GTJ{mQ!+aZa7l2N;}_hbacdCoCcz*k?|}>JU4q$ zB6YXYkn{Y{tR0V2SNH{+fPesx6B`SQ{dk`23DBCFk@4suDfMj`8HSr|GDTSU(O@lG zSf?5JUW&oPgY^*;--C%eha8HG;|oC#3)F8$(>%jG5a#h9ru$mDx3+iFMyZ|pE+=P0 z#FYxd=$6UX%wXZ<4S5Ova`yHN!Ou=(Ba@X41}bU(eo|isNfPu1$q6QFO%3o>-nOdW zXXltdU|)FU!MCeCLw)}@Q=$$#D{Jp&oTOR3EnNNY$==tkxkldCKFfvzp${j zyV^H3Jq<)eyVm^OoX$-7WHO}D0}dNI@i+1I&weyyj@A&gU*(gt!!%oCHXqg2`ml<2 z!M+bld>i?(XnxveJ2>>!*qDZRFCc-^X<>=*+-kC`LZ%HLLIXbVp>h^9|TU$&c0^bg$vW?bKePlL@#K_ z5Cx6{!hoG_)i6W6JPTDvCnw=0dKhV3N%}=YY|~VI))FO8`ij9eYxmgQ$UiYV?-EnI ziVg5Bk@h1eh9{n9uDLCkj5itC`A^n9#fu?@leLmyaa>I>+{?q)d$tLldN-oKMfPhM zG&Gm@oOg=r0MUS-6!WR2dsag44S~DDdnW17h&3_jekWg<^D%b>c3(B2Z4Z9}TwxRy z9Vd`A!7Z3KAGoc4K1Gz&;x(^vr-#o2me<@Yne{MV)-o9fe>_~IV~IEOqWke$wt{kl7#)C1hoxt z{^?9UoI&s<+i2xrQR&T!ui=lMYnw=0_fO;|;^Niy&&1WR#3~gPO!FYNt?vk>!PwA0 zAyj-!SP7NIwSoLKz5$r+LHh0A_g0u};(hLvQBH(@3|tM~fp}PF7J4MrV}8srlam#p#S0Ef<#|?+W0E zh~s)G$S3&dhnqcTw-oiYLN~^8vHt$0wr)Vg_j4Q(8w2RzCS_v7(G+IG-7MDp2}XmB zoM!UiCD}7tZf4QO?_^SpjD&wDtlmm|)8A$(lS@Tgun=!}($Z?b|5ial@~uwSV`ZsZ z6}w4Q$pSt8t+w+;&4Q)Uif6>dU|)uwCG1bUN$At{5IJdr!6f20D2 zVV`|jy3bTM@NDkJQ4_m;AmK5kjJUchLW^GG6mZ{So44$9Nuke=IgG6FaB$!CGm04^ zj9xLo%d{!rm7^d?FQYBuc z)^RZ0vZbg8s=D{XwZ7u^8Jy9zC$+UDTQ8IH*qR+vsQ;{eiuKD9mBo60B**&VJ32x# z3H7z#&RGBlC@Q>k3Rhuaj)Z~UXdqnw%c@#AOcvFeEvdjr5clgLo^-Y0+;e6%JiqVA z36`=SJW>8gj_9zvP_vtU#fd!?PZxzl?kzt)Ncn>0+J3}!cfoAiMTBTGKG3x@hhYB& z5Ip`>QGFd+foDW4LOv08_nqh=r%8hLYD`v*h*Bc7L=7R*+S7B}B1j5UJ@vbsWTIaM z5u#3eu;GiBlQC}F&NSDYK-ibeO=whr*M#;z%E}~832zVf?=mcyomvS?v=QK?4MKK> zUW8J`re~!Sp6^p^PL>f(3{VS5OTWBP;^b7~PPBl-n#*{4^ z8JJk%9z>BCRv>%djqLjxceFzCe7W)wNoIvw9LjHgo*`!>T9ptEFD6Q7gD`h<4F6H4 zS0ZV(&?}iJyW35b0`nPAemy+AqE$r(X>TV)Ao7=oh6oJ}`E*&ayF8>ltw2U9^5nh? zUVZ{MQ=?3Z-l4m^ISc#uw>V>F2=#X=uu195$&9Eb;~nLnV$0-G*sPrcr{`$$T~tr2 z$<2<$WJ&y?rCP>DR@P-N#$}fm_SyS$FjE`+H0W=vF%4B_8q|;hIwAu(JHhMf>UqMd z>Yb^TL+0jxe|#tj|MBDeSTMeicYm%{wSK>q&h3egi6>>IU*{BTR@dj4CLZRmNS7(0 z22RrX(zq8gvrfB6czK;5?N1_`nkdHNhVo@ABA;AmlXR#bizf*4)Dlem9O?ti9eIDK z7Fp2nLnQoat$O{%va%|=_Q@b(zEZ%XMz+RW9mw@;@acRdhDXy+I=nRgnnq@b3Z}ZGaXu_W z`IXF78$1Nh^T$;)tf@9M ze7obiS+r5I-*}UodYqVogg#Q=sS?eTrR|{>Us-Q$#bN!sspP4Bmf9{@CMNn;wx!ct zm+cdUc_^-@E!xE1)1Ew5Ju2RoilGlVmm*z<2j^#Ic;vadN>H9gKk&MrYNEuVOf319 z4pyQIvlgRr|5Rql#^~*^9<{{zX>s(_?}KcMH?7N$7F*e4li-mFP2lJ)*L784;J8y( z1_&)oHd*099%~I8PS?yVx`qVjP2Rl!7GIG@AAOw$Lm@O)Y{D`*R9}hy(NYTZAl!VL z4~zD2Jk;zCVI%?qMt7OKuQ6Quq6B%VGEo3gzAAtyS^Ri~Be@AJ=Blt`Cm809 zB)0^WzRIGZA~OE2=A##d3AJOCW45i-n^&LJ3ky52;QJLPn;l2=9J`eFK8=;U$4+-$ zoiDw|yb1m#b1~GCdSCF#vp<5Uk2CWv!K|5t2kCUFoGh_p?O&$2q&aE=e@J&Sr~z&> z=_G3;x|`(V^AB75H`3(bIt!IEDDtsAOUt=Wb(~fyjXnUnJ{CCLBqAq{tQtQbLbtbg^4OHcnv-+p!H`?5^l$40jC?u2Zs zJ>S%}wti$!Z8q=R*|t$2zOo6m7JwWqOb!)gE5H3a-zf@d_PJ?x@rw-pqGs-Nk!Y@Y zEA~sD0Brk-Gw!x`e=Cm?g+f%+aQH!9_vP$>``>b+sE3a4T~2FLFXXWKWq!7 z^#?Mi%_b4`I#A>i^~zY&)jQ#aMo&|siZgj}wQySr&g7oc5ha9b2VrXAgGT!&god9l zdg=2?6Q7Oxt9p3+P=1TsX`7!RkhY)kQmkLrl(bQ7W{=oblss&&xTHB<<_k!y6n zsH`~Z!Pr;{CUx>T6~_Y+s-*aakI)uUOMQOxf?dS-?=tTUO2!u#T6C?E_jlR{ds>wl zNB_2QJn-M+%-q#vH;Xw~sbeS`hbKN_cpMyD$Gz}p>1IE4nEK+MEXT#qVR7R=@tu2) zi|p(c=j6LLC-4gi8@A7-JL1ELv&|&1FMS_8%jGl(fEU%4iS0sfZnEFL^i=*ebbF$F zRJTV#=0vtxBV{mAOwuZXqZ|I+bnP~GoR*PQ1M5cf4~Jj%6LUZJ(vEIrU%e@;&y}a- zY@=4+RFOFJWH!2tu4*_|jr6y?Q2N706;sgX7Ln|{RWe18^b_7JdCGbB-M1DX?%Rigo2BP-$Ky_rhmJ`Xv8l+@rjAv%WK8nLnIxh z!o^`wj0^31JTp1={JJ=Cvd)p8uyZ+dK4pmIS|arDsL_b;Z$C@OR)00q^7KrWh24K& zGogv?IrsIu8B;Kodav8P-d^Wb> zn`c@t*lU=Yi*$JM!YwogwuXj_9*pqHFOG~)N{XP;@}I#wM_;Buld>cCa&sAS2wi_JX5q_}vTZ79cE@7gT-q=weEO zSLf~gJZtZ1ac5`e-p>xZmEau~F6E=4l=lg=3>*(UKGuxvgBqImjEiir0;wBjp4O{l zGvKYoH}!gtoGoQf(o;k8sWH8$eJ%E09{TF3Ww@tO5qGi-w&DwQCbVL_AEu@;B=at2 zJG@(a5t~Tb7SiCwWjwuHSpRx~7=B+sh}SUphkc0MC2z9IM)q8~XaWoBP+n0ViWqQ_1+M zgUXj{Du3>qCC-$_qfpdoGNNlZC&(k%PE(>F^}-cF z@h&tpUNnpHw}F|-NG5-#HIO;k0Dby2H}zO`Cyemky$*jr0xvJ;T?Xec{eOzPi7j1S z(p^*WlF28<3e~?1p;Oe)D9nQ;-#lw4-f34!s;gE09~J=1rIKIq`X^AOik6ecEXd5r znGCJrzPepmQnaXPYjQ6JpPLr%SqOd?F_wT(&w=rhC2&5VxU=JYKjJ}3Ou(uOeaBYiWYm1p`>gS<}DQO%=lA>ukJtxYIK3ew) zM*Q?%+C0gD?nC6u>;&g!<-}PZ&a8*9e5#7eGkdLlf%_G6$eZfBOgd}t?4e(LUUL?{ zjlHii#21vRWNzcWzBuzzqC)aB*&R*#OCqz~N+?V>+xG*7d-epq9mSKN-`}d$!u^o6 zTKlPpCG;rSX>#zZGudpLZ zg@+`2i_v$?t0MDaNST_w;y&^@+L2kMaxeU1ctRP=otJU?`&N+ZD^G{;t+_cZNIRr& z-m>9*Dm3)t-uY#B!^`!#nzS^e!{;z5iHps+*O-B)re$;;OmxeRxhn<|1d2PsgE6xC z39e}#V>NFPc^j6OCv`Ur_{$?K4;a=rneXZ;`@8YN(efTTBWuJuen znL`R^a6G-k)`fmynXOL zO6Nf9#Nc-{G(kJ7nv^wnpIU*5Fi}xaZjL_5+aI6j-qobp15gxQVG6dVr@f!Du6CT% zD^k9pyT8Q*Jv6B_ z!7_0bB>R3Iwa#*J^%T{Pl75RDh-RFL=?Ge}ii;oWX=Q;hn;oTZ;qT@=vEaSD_(|;r z@na_@{%ZkxA^#X|^W@Vrs8xubGj1cr6TRzchR6tMaXPod{{&~QhBj6cu{s@hmR zoLvd|t+uHjw3lQek9B4Q;TO|P9M5CYYEC4MQO=)VPH&!}{QBFM38TuqjHb{=RXY%wXcS>kR!ennOjxWjCYSr>g_)oy5GPl z0N8`v_NPxj$oQ97DE(2iw!pi8s^5(qZp2nhI3;>>^%c_L`1fa8c6JJXe}6vh5Y6ZD8`Rcq{>#**0vCpo?x65O5}T-5t(-r6g2Uxk1)Q`{#J0YFZ~RrK zymelgL0P;&xjaqur*#xw)6h^dLk_8>W!!fAV>P4yi{3%MPMpqw8^wGYNN6xH92|#b zTabO1{-RQSTKu__hQ07n=$~4IDg7>_v0xKgIk^~F)8;zY;nF_bl|84nW?E!`{ghuF zci25Gno(();{yI4l;U$NSoM_I+2wu$G{*mYT1K2pUgWOZo-v5dj+Q9=lkhf}FOwrt zf|U4{f+%bQaKg=Yjw3l&ySEl*s-k}VT2Qx!!!2;oUq)W+F+j;H>LH=u_I6~Rtf;(J zWhEgktue)o$mz~|EUMvQ>1ZILQA#;eovOlGdp7{xga;^ijtN0}w&Yx6V)HR7X9yPrUZ@+0Zb6^N~<^YK+gw zLyK_xTF9zILEo`QKvw^4_hmknT)}+{#U;P2AB`k%(aF!O5CLJc zqBI?0_T(rne1eJ6CrP&zPVUR+ZtRG z#&}2I<4^|YuW#PR*Y2$N+$wwrS7u za(4sLq49j`VSC?_fgdBv=L;@A4m>-R@ZiUfAAaJ2K&88rB{wA{vcF%etfJz!xcJTQ z-@gyPd#7P&NTqjJICGDOiV&HLfWvRHv9TSdmA!r~7Z4CoR9Q)S|NeazR@S#_k6+#d z6!&vBUA#cJ#~T0lH?`xNlGxB+6lImpA7V^DW`KM>tq;%2Q+tPx#l^WQEht80$MqAA zUp#^6CknoM6;iOeI@28{uG}%)E+$#M{!Z~^2n3Rm^cUyk^ca_NX}MtBID3RC3nPv{ zE9T_1dRyKw91ppeS~7(fG&7M11U80W4DK_F3V{@aFQuhZo~FjtSG()MpgKqmg0714;>U4hV`Jlp=VszF&rL`Je}2;L4E$b4t%M9uTnowi%Ond3m_M+<*GV#X;ZerU8DnkZ~J# z8%1akV_~znh$7Xeagwi>7o={VYI9LX7Z+tjv4zUG2k6Oy)#`dl+s@FqrF@&5*~SM5 zxc^peUQf>q(W$`T6j)j#P*bz2Do({G!q!kCZEY<@@AkZ%BMtW-;y8pI4LfbzaB{JX z$LBY`^sipFRA!3-RcScBH3wYa?R?MF4NvONkAX_5?{;qD2!nA5v7YcB7kWir6^X3h zvSK{6VD>`%zmk`8fiK?sRB+mu|6QG~n`J0?7U5a9n^zW=UI;2rN@6 zcj)5p^xg1XCT2-oyz{RIRCJu2H|YY8fpYze#V>kr_(M$kpj4eXd)DhuRql7QdmC$I z2|m)ky>)|vQfKTe`9|1Zaz(|3t?79(p!_6LAC$;e;sDJh+QffI(pMYBn}3>=*4*Uq z6IiX$|4((S&aE>$sB5s8I2q&%vp#$}(CDs3B=8oK$^GR{d;h{kfeSpdjJ#;{!6=ia_PLaxophwI(mUy3wwk; z{+GFq>00+GwO9Nj!uBH)H>tVa3tbjP5rW*NcsvfO+|u>2M~8>~lK#y07pv`zpP7FS zF;vZvi?J?~MPQbns4Sa0Jar39vnA(CvFYgS_`_QF{30bgoqF@z`1_Jd?edR1fm%cz zh024QJ!%0XlbU_?)l*}VLWi)3=aE9x?tAYP`)A3>WZu;lmuPg3eQImVJQ|xH%q3t% zL;`yq7zj^IZYv+C#KtcAlxJQkMPr$Ea zbO1S$na5;gO6(CGZS^LbCi7eWTEoRhQSx!3oJ~1z8~2GEOolKI74iQ5epTkzlf6>4 zaH^%`7aAXHdcO^EtKMfEd$HXR`iCu5LP1)Zu042Xu%Pqa`<^a^yx^S&8%Z4$uVMrI zSlPD!K06Ef9Z2DGztCy$@OzGezkqo1gi*RBcBA)TT{L{p(#BMtMnV4IMV*8=O1i}MPS;T$ zGxO-k3hfb@Li9&!eHHgK^E@*(u}2>IQ@6|_u{eO%0<^H~#t!r9guIx21R)(C0 zr}JAN?eC#k@;mk+&N7`sdJ^AcJ@~>%$}%z-YVWo)0f-l^WrT#=VmI{@aS!rNLmW4c zfHFa{Wa!#HnYozNs~e+P;+oBVemkQwkaWUZuM_W|QLkKf-qVXsPCo`a{zQ|r~_RNf=hc}>60lgV-46JawgR4B`lMk8YUThgl&l=ePPTwfOoWdh3D>8n|=UKMk zeu2D?TVAW_3jk?uGOq$dD(o(Q9a{af91+6K@q_^x)Ry+z!^+Cd9+UX{;?&_V2UP1< zMA=RHj6>1>{kFr_?5KK`X`9`fxF+V`D~GMmLk@RUtXgO#(udwYBy9|smwxqon)R6u zWpqJfs_{>g{)sqH;$GLISOqB zjn8y85$D>@E|}zLD8FFRtB?n7XTq|X3({*%FRNrrqba|{N_G`0fYEEiseLRT8t~h! zbNVKF@Y>k+@6u?b9GqUxuh)7J^$+!lD$@-ucMMFbCJ!ZjZjWY7`@{HGCVt|l19`e0TeDYWhh!9^rYU@rQ!4Fwl6~DXh zn}=cD$C7qm-*ss=G&K$s+_~pQo3+DEh{h?vUmI3LW-#t{vKjtj|B)xcP{gj3tB=z@ zhfDecDO2}7Hzw+&kEKH5T*7l2;#^tA;{4)V5lWXwGf}}iLNb!x3k;*y867ktTNZ2K z1sQ5wOO@1KnNf^Kzw~WF?!9G4PcA>oG{sPI4o109P^?O#uhXo&I*A$&k6yfwSm5iL?dMt-!tU)5 zhMet^89swVnZ2;eOo;}Yc<8Em3A2D6#+=1w!Eh%_P=$N2mI#tKTNxQ-z}kt zbtsI?&!=~2TtR<)3b3D|CAgyL)y4fOkLCR zv&W0Qgy}WG*Uw$|*$*}@A8Z~cWi2R4ozPDdbVoHYd-MMCG($MgfBDiy(3JUkU}%W+ z*|U79%Vd{KY3s*T#QgK}R98OY-uLd6!r3R5jVSZAUr4hj3meBV=$+&nmQGSHsTzm; zR<%~-QUod+J^MeA$PwXb-zgwse;JaptZB+fRMZ3?-!rme^A-|zauL*Rt@hHBAb9Ql zo}-KRS?(Rp$n4BHxoxAi)wpHmYrx|B^B9Q(lWnXgSbZ4%|@XqQhU$m72ETjl3w^P^EMp?lf& zYkggnP8@08r@EN&r*a@{UD*!aqMV7E82O3KiIynM4>tGd8vzfGWVKW#Gk398RqIwE zA*sQwxj^k>Gc^X^#NV$fdtOj64>%?>3hYYeG9^158-t`pTn_+gJn%(Ckjnyla*{8c z;*MM^kah_j;w!70LbYc!gB=q{zKMB5i_}~G708kVuf*t>Mn=1w9DF#%nLt}l; zE|0lZ#{Vo*<_`#h*mD6iDi66Q%36-QVRa&?_11FHkQTdsthmb%%IzuM3D4w93g)yz-R|#uTlQ!+S7M z^LpE2S-?P3ytJPIVB5;Vl0!2D;_a7vw#IaO5fzNu+=%T`9#!rSoNYjcB09&5@)|N8 zLHxV$uiC|=Kw!=L8?WkZ-?gCpqJy3&eyrJ3Eqb8(osm|(bnKVMOwm;B3xSKUc^6>h zlL`g37qu_${{Ak7l`t=n<_1hHKRkZ=X&5&x)(l~4njt>2q_Xel&k7k`s%{_#5sSnT z@MSJN&OF}zUZ_`;&h+^F6D9p(_FdoSX?iA(PZREkVU-~{1Wo>*+oZ+ufkoXKU=e~3 zIyY&26~ZTi(RZ-YFD?7_II0Od1OSL37Yya{e^so4u#&ZG@pS~nC2qVm_aAlPVw&kI zn_`=)h+Mv2|9Rz`s2YGPNC-ElFd0ddsZWq^*YtJY{Mk$KXnd5rj855CP$b?8ynT4s z(0TvfmZ#hBx4OUj3FgfOvbQUZ(nc>vdP@KadWH2OZW86UQ*je);=1)BK;&%%EtrVa z4fkM@3G&b`FKYw*T*M0Cs@6-EAMGGVw21@|XM^p!?-`2P6I(KK|G+|Ti5A@H`vSyC z8Mr6S&1IF$XJJ4#=tcPO-@s>Q3SwUTr_+6o%dt-ku5mO}8t+DVR&2D}MO-nqF`;mF z7)~}`{^H)1`CPNTu=I47G}f%+tBVp)d45d5sLy}A{|TKJ#VaXTcv@(E_$noFT+f~{ zxwI50r(`8jMY{sG8ZzkWm69p-3-J78Jne@nme$}kir7ssbO&H#>A9}wD z8gGBMSg=)zxG|7~OOOgVb>=uOr;i*CSwOrkN{8L1A#@khmcgif0 zv7PRdh^ngB{&8iGa%1?neIe=OChT%CfHnDGga}RFDUIO5eVu5f*gZMJ3%BiJ8CMKM zW&F~|a7+IFGL-s}UCM2N4?@KiDf}y+1p@meGuLl`PmEkK)zCl`r`2t2$n5xa$1`mR zRV1A0M0l)jfvQy!)qb)FnMt6#-$2-0Qgb2~Ahb9?c zfNIYf?$0hLmb*pW^B!fM9IKV$H*^bvfm5qF`EW)i(#n~6O`~t{jX?#~iWg0AQ^wFY+>Jo%QYZbTKTR~oYq!STwe!0#1`n6{a z>&;OK@=_Z1jYcxW1MuItnE5Y+{LsdqRnLo`m-By83Zj9DsLlqfW2=35n|T2&uk3dh zkF~WG1E#EJxp4<)RI8T*qg%2;d~L}rB(i~lxmHbL5(0us?6#E?D?b|3x8}MfNerLi z{c#*a)c8I zwf+xX?;M<0+;)w|Mq?+9ZQFKZtFar~wr$(C(>RUOGLd8!-E*j0;qL@-BuH2E+Z77( zXIEg?5#vCwthuujPGsi2uN)}N?KYq74aCM{Z|w6sZlRkjZThLx2g&!Ce`J9QA=N<* zT@TF2n$I3#$uhzxuTSCx**hN^Zm_9o1OzbwMGx>7eZ;Z{M0R-$xHqkA)o#C>&g_!Lgk@ zo1>J51Ao23XQzrk8?=^NF+8=GL|pX+4n4O97RoL5G4_jMDqvf=T?SR_blP8t(K%i` z9o{QAS)6XKsvh-*i_P;ZGijDxPyN*?*soYPkeuxP2!s3Zi}}chjrIGIcv0eTMaju@ z5Qbv`*bxjg!icc6y6x*0ymC7We@ul~i{%RNXzR?$``!>OZ#gkC{S6-w!AoAL$SBF* zxqs$*?d`F3u9n402Qq}5ZlDdaUs|G=I_rDo0%Xei!|B`db;k6`u5`6&I<7f5&}oYQ zbR$9jf@#*1M6=pqYOB%wX)b+EO2wT4=A@ucc+ZmbHdXD2{)Lk(Dg1Wdnsa_;3SD-+ zxftYq#mP?>cacbKQI&b<3{$S9%Gl%g^0D(N!eN_Lz`*kTm-P5>feWlF=V*9BG2hX7 z|2D3LK1Mh=xT_;kqPq%{5TFb-zdsHuCYL4k`0M_T`)#9~c$BR=?ufYo`^W=vb~IW{ z>Km$OSKgQ_r6fW#rY$iZfV$$1c=ZuufWBGK7rY7h>q!RR^Jz*LPm#`$zRM`C z$75<^juFzBPwnpfMB{B*d^hyRna-NCbq^CaTp}gHKV2eFu)BC-GUfX#t9{vY#5~rEbH~Qce6TKW zZ=h65Wp$l1ObvYC404=~3z=XQ*{8aZxd57WfYzRoR;SK} z%Hh}(gimzI@!`#n9Mk!52s*mk1wPI5L5wjfkvq`|9*&AGDGuO?!cC&t*cxe`8wcB* zUyP}dc`KC7A3>Y{C;li)SWJ`Y-Y$MI@K(ocF>7XGOMc1x?g+XIVqQDO&A|`;SFv-z zXP<39EY$CzJ2t4N(`e#8>k5yzQK~fIQ?Df9U>@;o@7p*u zzUMtuo$(%1zCJ;C(J!KY{ufs|S7R$yvJ=;se%s&2{ zR@~7%xSE792%=(k6t*$x8Jz`(zbYZ z=e^wmsC-!||JxDTt51*Q@wkwHrg8F=Nln1Bcrdm+pFWV;c+j4!Hmqn zB#3)z<2obWDa#umuP;@J>a^ijsxJs**xFjC*TBiw(6G^lbRHbQe)&IGfKVO)a92=R z3WtG#K}Aiwkar#r-wOHsJU%%o3S_l^I5j5Zq{ue~elIoudV8kvy|`q@T88FVqDp*f za&U667}*%5kgN`#=cvRSCv#{(1#I-xbh&v~XbE}r)W|K`H{g`tfT8Q=(xx#o>aeK@ zsiP~McICV!a^vKPCj3Y#kHT-E) zq(Kk`+>+b8E4|#ItBXTt?d}L2S47?tT>$@-DUqeAKA*je?WtSG&9JB}Y~J zh9T@0R8$fYGDD;9oKHz9zQ`G-3o!?nfc=C>{n@7gkqaw^mQJB0znv+>5n#L9S6f&T zLc_B@_iMgDNr7*b=fx9!S#62Te$r%{4tfj0CPFSClBOT>sIH@<-px(Y@COTljJOt# znYK2nJh{d^Kg{o0r%OECfL5UT4%!)-5?e2NbM8mSmF}5gzPe_lHkoRiB{>g;$;pM* z^YGj^SkV^He!v$zF*aQ({INrk2DRdBqd1V3r55C##;&H;(CCB5rM(F8?ENtP+Z` z-XWEre7K7QKs7I=#mu0`SAyv}xOO|k3NSN|PfQk_DQl=p5Xs8A-yIV#-Ah~DWS{f3 z*LcP*Sie#6{U@GGkXQVcmO$ksWK0snGI>falVGbk3?6}+E#A%6fgP*GzQ3D3+$#k& zW1N<*0NfHRJWX2ZVBeJe0m75{)7Ti6FsmE!($doW#@rVT&&Sk=#UF`Zs9sEHYJ`s6 z=mTH!ycE%Twe$Y*j}9CDgQDqkmPlN!;+r1m*J@t?e7^sLNC8dX6%v7mvsn`pWN{%- zHp~Ec5=KBgAYu_#c_y;RV3H{flA&tTzkg z&;L6=052kSBJjKOuP9!9Ac7pa;$Z`;u`g9Sm5Gqd2DzR72X`2zoXNZb0PvU!qQKjo zlu`BIaLFE$0S^gt4tNzI8sM`-gNNuvd~KnFUo$HqSTI>>$KF7wE#lBADE7>kF(Mbd@z^eQWaIy{Q~<85)0Sz$p*%x%|3EbG2W7~RA{5RB zC}jT#W)Xx1<^Uh%|4sye?GBtIL`fI>i)T+HF=Xli@0?>OTlJZPj5xCF`j=^ZLgq?X zD~<)@3p5v2Uj0b|T61=5&(F5(^;YP(6!@7f@HVC~V3M)#H4S_Ly@{zfnL)v!el2&z zH5eco1i14~BXNf%kLKrkA263_iAFs0=w64;L9OVCe>J=T&qhUiySrFk7T*)YKmE>G z&VD%h-fgB^Ac0mKn24Wjr4wk5T>`5Aoc(N;+l z+99~OpFEM0z?@;MxHQBe$3jD!xXRfEf1m}d%OjSD3{^JxjzF98%c=n|48HMBhGmyi z|M+4uu3R2jS<^6zvi!DJ4;!~1!$N1B=#{+Ld@-^?uSLDR0uSBp5tPaX4#jTBy72E} z0WYIh1I;g5ajb;tCR_OLXG_AjMU?eLvgX>Uuj6N&nm-;v-$rA-+vnHE{G;}_gRMVvu2iyk#;0b6i0eRXxR0VJe@pbYmgb5*Oi=770^@i) zQGKi6RaRb{+|X2v^USzsXFE>&!}+2DFZn2aZwVD^1(g|yJmWcz&Xsqv1uY-V^|{l= z?T`%S;Aw~Cnav$HN4;lx(6` zdZu5(QJtewvl9g`^Ix2{~W!wcm;3~M}hZ5QFaQ@HG7u$nHU-q#; za`0A$1GxXx?~jAWH+*JZh;b&Qr$XK`zoNK0lkm`S#c+dX8cA%Gl5=obhGG3``r-IP z*}huv7QJAe`sp>T_AX;L?^b(d&rSKsU_>|lQSe?Ds?z=g%93thmiLnLuAG9PXn6* zbvKwe32s&gv$~$ue+n-gmjA8~?d#db#~az~?pvhIv(Q~@ zGoVQqmi9fY2rzJv*56vAraIAeCbBR_v%I=Jg2r^l?tt*WRPY1YU@Aa33ukA=r* zeDN`s$=IH<*7LnIaSyx=86(zoy#pn*a}9OHaOw0dqrgo6)0goio_dRe4FDSb;|hx` zlhVW_@fvi4edbBdx8x!s zjE=;|aV;N>-Q1B4$`$?`zLjfqox1IXhbOSTtI~n?i4@P4PVRgV4AUCU==n7MC)%NT zK#!ZP#ZFe#q-o~3wjIIIN|DZ44@uy6k^mhpuP}B=`nzza*+hz)H|T>Zp;_HY(dbqT z`dEF53ShJL*XV`Znv7O-*%hWXAf=x(736Kv3ms>BdRu% zjSBqeM$suHkBH;>RmCt@v(NXLAd-K_i#>atbCFGrN{G{u1+84rE4|2{F38Eo6N;xJ zci>tL zNX2Bc>N!n7_Rbi~=(#ZlkUCA+BlH|>|BL`#s+K36U|2%V+1`;I;>lCih7SsMg1@Ct zX8vDqjYYOa>dZ8do6QV8rP*pv6s|D}bHkkhFL%6LkinH<{p(4^>H3Wl_7A>;p!jmK z^PII5i3~x)O#f;C!x6g~m!70zRIJ0Yd;ZRXzt^fSxtb6G;L^X%O7N|yEhuGCay{yJ z1ZZWPySy+9us4c0{+d+E-YG|OH zM*j`p^>-T38%z|&y~>vk2vX8a9Vm_lrYjQ?vSeAS;^9MV<>asKWZBF%uS&UlvR0TR z^48VUea<*OP7i~DZ1|h&B?_F9KM*`Ezz3#E3UO5^pjt;dr{>HhI9)MC&q;zGk+m$`Qc7O= za0-4uO8$BAbv+y}8$-|UUtV+Y?QdKFs~->SD>}Hj5k|7HU!wTlM$lt>hbv6tLR0zJ zT0;W&J=gD9fe1*^bLPQ$OtMBk^DWyq)+)SBNpcMF>jsRA`@ndC2|C} z`hk~gf|J_t9#RTw0_#ziehbQ(xO&Auu)|aQZB#aowrY4D&yaV&HY4}q9NdPOtSY|d z_Z;2GZfvpPGwBGsxcO-Gom4LyczR$U^H?!sUZ3%Ya&AKG`k{K&7&}lD~OB8solwiSZ&PpFOFaL#&-Sm#i z-r)1}xc<^nUT&a9*GcmWI7NLqhk#6%1*8zFY3}d#hSfN3Ai!?19}X^6ew7F_a`_MZ zFdmC2E;ywx;3GtsWGXXL>6=O!h~CINtPBlJcm^N_n8tW}1~~4wl4J>xz&wOc0d@x` z=imn>s@{Y96Jc4}1HH>T0E!0n8k;!ypcw+n1>gXLgKZ)^DNnHoKocfTvtKyvpiTD| zSQkr^&LOU^#8+t4edWX&pB5HY*Z~4Cs_~zoaDMc+XC2%^3Jh>kek=+|M=OONBfYt= z+^CQ^IOGqt#g%aPrPBqxMl685*d`nx(G*@Lr1#H-X8Ml{-H}C3&W}fcl{Ci^TO5(_Klt}JCWq@+HcL zKyt}y+QR6m2H*R7oQm_83`wasC@m-IB&~>!0t5#oEfy9L_VzC{lRJBYm^>abA$d{p z=>7?KZ+PO2{vSpYGLF5=YxdyrKMaZB8XNPAYmx3lUCUn6i1Z#wt0RumCL(zn5^#-xFv7#viyn(_}D1HnKq%SJ@2dgl>;-VusM)Z)H zQQ|*LNVqUy-fP8`KAkQt3n$NsQ?P2XrO{U2kUbn5lxkG4)Ls$&wf`mx764moqvHQ3 ztn&AF5{O8|6fyY;Lp!!I5il`m@O;;T`E2B|3K98g>IboWX$u0T4Yt@TF8ed?ts^zy zshjT{4ngd@I_4ppb!;hnv~0}2%)wW8xtE6ltoB;Z*u`Kut1XL#zEnDT#qZYAygqQJ zt}*#`8ZRZlk_<`%f!%)!Yk-0LUooRN?WK|OYpb}xB5LoF$fuPDiY7xkY937=;mWV> z#$EJnO{Fg_W>d{gLwWSv7!(H;e*|v>%(lyw%W!p5IVF2iC2pF%3s(Ns?O<{Kr!Hx> zO_z}bWG``Y*699WCBMq5BRax-$;*%q;Qq>-U^aF9-HImGl8obUgrLp>V%-RcR|1wf z7ZO)Der1OP!RRSa!TcAD(!)B$0>uP#*9&1W2N;0xWn^d-D7-*XU}UY(zq@l#^$&MY zD5ZTWfXzjFcilLx<_d|9ldUHzLd6!>)H?dYVT*oP&ic*C0yIUI*Ffcy71To9zEiDg z&~4x0*vfkntM1qfbLk_osnN*qhApwr7h@^j2A^9^2z9&L`w9CFn;ReL{q zX3^G$@&V0go6a?24MGm|ggV~>h|R7^rziQ^U2Wl}QwnA}JeVC?)_hkb`3}=<~sd$<#Du#Qn|2LR{TH*}$IX z5y5yD^}shv%fo|h!kd*P5zGn5PIYW0<>R%#+cVV&X?9hB#t7-KFJn2wWcUaGT3R(a zY+;vD=4LBjwv)Ly5IOm-y^wTtM&2WWK@wF@CCNc&$WFZ<-^ls8Jn)FXHwT9&0KNtx z_$xy!71B&YLf-XJy7a2YEz9(R0&c*8zKO)I@#}Tih3gf1MJkMSHkO5jf{Kxjh3nxn z3c?RJ3@106!H+GC0y|P(XB>fa-x#s_+uNHt`(3U#W@CKU&30Y3(M;F{CYxpFRW>a9 z2HFhBqXO{Pf;cq}in}jwU zr4=5NHuI}4;rMPONb>sd8tvtIWs%KlA-&HUzI9eBytT(G-?r}J&N^38@hig(D?ar7 zSf?~=zWEzHxU#w#g#>-JB(Z+ByxG9o!U9^9nc~1%&d_j&9|r*TF~YE5dqv>5c!TzN zxl5Sj#{@}#ydq}sei5tsL~LwKob(Adng2Aur0cKL&t&?jOb(9>4EYlQW{l$MW}L5& zhNdIfz*}EyXyjH?WK?GL?(hW>lR8e}{QoD&q>^n2Kn%ET_zli+t`@9*q1|H93hm-|7s*G)}N zl6P=X8rp2wx&9BItB(QGb8J0>AVOPxI5Z``fZ8YpQuByP%@(tsT}qXH1WFN6aVv=$ z?`N^(HGc-4od@UuNnsQcpS|mQ7~=f1^|Wu+GP8MtlCOu;${egr88?(26Qe}0(kD2q zj0OWanuc2mq)g1r%4huMM+cHZ^MlU^NoDB^USB;GagbScfYt+hn=NM~mmYVG7zqrn}qdht2jdV_>AWCzVABxtf5 z;THCJ2P~{(aJAY}+Ud0iqkQu0`M5(I-r-OMca1w<9Qcnuaz& zEmz+JWd@U{V9q%1u02jUMq6otPSZs>&NPV-L37m3Kv0#f-Toq_6L5y-xW}!ku^@EY z0c#Bj4ySTDN+e(Mko`6N*k7b54vA#jLnk7nQCqfY9Qlib!*0W$5M!c9c_*;k;(Ky| zI9tR)*3J&s^&L*@2Al+-JH^dU`sc&7Vpc2?b4F};c|`Wf{;x#0_nR2Z5g!GG#pWcT zS&J5oLgUO!}8Jw0c99wq$qBJDZY5Y~uIQxl4C(ZFQSd0!hUU62VS2-bi*nMUeF z66B`CG6gsjGjV?fsR|4dPwtRB`E|llMxoeft)3q))P`e3Lo%n;t04IBg%Z1LCh&2 z?;spI<{W|~*AH`d&3+b~I9CU`j;u?$wI3qge*$$*Y)6h0{z7v<@T-BJS%PExH&2yv=p`neGi(liCL1_F$; z4^7kewv>vMvr3yiy6Rg9Xay{&!6S4bm}D<1w-`icvne6_b@rd{V5hR%yCYE|RIg9& zLn~ug&o?*&t?H)To&*7KDK$)959g}%46>e zRBea$?rc!8f7oAh#pjIbCStdk)H={Dyt#Xl!`dtBk9)5t)#{wAtU(q z_O^X?2A|iYe|3E?dR+)(^y#uohQ^A3-ToD9R?rW#V@grb2h}CR z`fz&EkS)BwiT6_Mj(hd9?FnFr(` z(Z~L=EhOK3=1kAS|L4Wdb*~sRb=DdkQjv(_jKSm#Z~t_WOz<@ui%-=zfB%Xr_w{(n zDd7|UmR(&epFR^a_0@`g+Nz-26@FDqF0sp2p0%CV0W+|VV6>#_h|Sj0swEd7Jda2w z`}lP4?42Q>Wui+1v)0NY=?Z?Ll9OFZAEX~^oX+E76{)M=?H(W!0yprS&-(-WK%qg< zO$sjnc?Si1r4vm=R8)(r43Xf`3{37T9gWOB3v{0kZQ)c4Jh~wjPO$&V@)zZRBKL!h zW4<-85xIj~WVRCXjmOvM0CsirFNuwePD_x`$<510&4_EtrR#&QkC$l0`tHQryQZs} zY9Tck2UJ-@qZ0`C(eqvZ2MeHe@2q0WSQ0B4`)YI@`Of8W>MWx-a%3_ZJJJ&NXvk^i zUisowwurXc<3L@B&KME`k6G`az|V2<6C%BT=JiqBYE;m4(pdlg6oD)Vb@j|!e$n4< zu`N4X=!of8QYHieL^E4mEPD=-lcxkO)wgwtPd#zRlL>JFrgSJf%M)Bo%v7pE@Apyp zRc)>QQuUOlA#ZK%$Syvrn>}-EENg#8gTi=jr$cnT&xgNwH0m57E@Q3rR`YBl9~;Cj z5O2stAzQic?KsW^fL1SEwn|ux7jrC&wW^OopMSxj>;7CKKQ1N!1J0m-5<`M6$9i% z))3gMs6Sg58j?~6ne4UXvbmU-s`wmtA=UvPqylMZf2V1aYTRCI1nd*RP4H{qWW&}F zcnmIkqa`jhW?1A$_uHR)e4pb5M&HiYA6Rgbkmbn z5J}L;c>_lq7*9_I0Y?!|^_R5y2CMgr_gy+AbPN~UF zmJjn>2J>N(XuV~3g#t>-L!+%6HY0KLa`Gin-hDo=n1K52=yaFcoQ)7sa;2^``Eu2S z!UlXNsAAiFxn?PydbMVNZpTM;Q_aobGoVPUxz~y-(Ax_W2cu!Sd5(vsgbP}9 z_yanv<`AjYh87Hw4sVZViC#dQ61of)O{vFMZ}DP1!5&M_*!)BtxO5-3cP$yGa#*2# zULWLLKh?Ckc<;`TFBkNWf8`TY(`tk@6eJ81eg|y)VusLam%|gJ-tdeHVYx~lgL<7_ z(&0fi3?hb0M$OFuTH%@yuVq1!*%05?A=Gg953O+XK7Qn2cRT*K*>Eg)^EEkq zEG*#b(9kFp0*Z=?Y-T1roRyT6-fFzjm6sQm0&3KBN8nkLsnt|xSY^yFtmghaEy5Fh zT94=Q^V-;)!6e8DmdKR+)%9o7_kOwpbvm31Xli2BUUd_mUY<^9b0D^qvs_GHQ51Fj z*hazMY(?7}jTs7tMT;jL+hVLx*4mLP57}=dVgK;h5zo*m91q_{YEHVWRrR_4@o= zpga_s&eBT+W*=m7tL#we@7-Y8o9GZ=T)`k_o+V7Cnz9qq-2JcSkTg9(5aIJV=yrm-Zw__Au6ym zl$)gz#83_v;aEo8)0)*PpSuCYWt-J*98UWUf75rKazi!n6f0?&T1(?D#|tpKXM_Hd zh0Su!3RKU>^9X=+7r8!VX-Pw!{kz37YQkeOU*49XC^2<N4Z%|es(8-Y2if0t{z=XMM`XX~iZXhgUewHhG^z1+5|k0#vZe8Oe|O{Yh( z*(~C9=X^dt_~%VYdj)*UH4BoZdcwPG$l|W^Fc9}yJaD7EUon%)cSGX*zD`Y7l0{M- z&Cusn9W8994tDl>-ovRvzf5JYa{YCIN=P75Zf&H#Nu`M;F_kS+I!R<=Q~$t9-vaH1 zhZnUN_u=b63tV|5+*O!!`95ps|Kf6105Izdk(#=D=gNWoYw$;JGrO~E4ddai-iRXq z5Tf#-nsGvv0AMucDC7>0*>B}%kw-l(J+aWz-qC9)Olyo^Sc3&jWi)R454p)}Mt)oV zYQN+CaIVE>zli|4nWfx2RcEm`YWum<{%C8n)u2-8A|WaHi8?`2cphhknSv^wjELAI zYpRnQB;XhELY+)q>5b9*5m0Wb2Hx7*M>0W8K$uh}IMI6lbZw&B?T!Mp1rX^!m~8ju zhhb)Brqb-jzCou6*M+^-{Q7ZWO6et^n*sv13$hpyu4Z*ql{ z?oTFkmn`JLg}I}OA5VZ3ydR9I&RJ!Bjh2=!hluGeLL>EOn;wyXw~Xrb?qJCoV31_c zm1i>9tt*$am3iiu_S@{YMgb>B$dd4pkPig*{0NXiSei^5H!}no+UPJcOh;R5#1o>e zB6Myt3u&!oXl`yv(4vKBF`Yr6@sl|T0z|`6@NNm1w|aMUoY{cnjqp?&m3QP@c--aY zo}L(lfX`dF_w(&gv7s<=xNHg)@h|#v66uZ~2l0Y_F9%&s4l`7WJaw!?C|`HFDY!H@ zJ|yQ4w{#BWGMIX-)Z`lL#~p0k2P#E@SkuOfSaD~gMh6R>X?dh@Z79*XQtY6W%X{BF z(Ipgc)Y7Q#cW(}a0CzW~SU-Ho`>;?qyHUq|_*JX|YsEgee{>YLwT1j(hf0#WCTt|R zhe9n&kdF`Fo{9w@1++65XvK+oS8@(?#K_;`n;Q^|)LyU`JGm4TkBaE1t!22ozc(^8 z{3`P>e?XcHC8{PJ9j#i1&kS#_=C!*@MDdjpOzqPW+9=*D>kbw(8$`9Sh}ZN9L3b>`_DwJNJ^1j zQT-kyMdvlC@pnpKnHA8LQ!k@!8L=jGREyjUbs}_newpBGdaW2v;mAgSnx%o2V{H5CVHwVnsdN4~# zB-y0p-_m!@W^ZI?mzL|R0nt@Wtv9|YbEvb-O2;b0pFIF8rczVZtg9?0o z5gJ@X8Q$NX%w<8VoBRT^Jw1k>*gu8}ic&B>RrA9BXqfHEjbnZnp-`n>1@W<_4^=lK zaNdju&zZ`pf2?HXa7GTvO+7a-GsTzz`gw$vlt7v(oQE+G;C|jqx1-%)e|qF%%Pn;o6^AY-CmojMg@dZQon5 zf>ic=U|yflv%o0~h4K83E%6LDBVt9u+bY=*9TQVbDo^OdTKF=n2RmfVdC{r! zk&iMeU2Z2C-sW_L0`ZrTi4I=t?ii+8rD_7Sdg}m0;-}v-^2CfnsVQze ztl&oCyQYz52Y$}rT;mP`FiIz=bsKj5O1_ZWWK#KfHJxw2=nlQuete$}nmJ6GEA9s0 z^!5%U?xHo+xJ16TL@na0EKr;daVL0&`9OX&`j!N4Y~ws-u9U%77hl8mI1a9oNr-ys z_f8lRd)8=dUgvh@RSylSh-4&Ug1mtYKm3;DN+Lfz(uE9nNEd}<_K9Y~z|V~kmmiY; z=LX@N6Axa;^mc+#PY4s}MROj#m;35?lV3FdW(V zGt}+yCbJ9Lx=2`mAUuLN6%xuFVqf|Lx$xD!8exiUmQ#UZ&d_u+pYCItzS{7f#`@@oUB6Ak@3W0Z@EodR zTdg2-1J;Rx%Eg#k(Ln1zM zcDH_eW7&K}I~J;o`07NN&&La%mUj!~QqSA_3oqkr|GBz(aC>RD1x>32P$Oxp{1%%1-jloJ`+3$TEZxYNIt2>b4(NPj7 zv*%={#IeS`H?4MC?zdV!{{=x(rHb#|-M)CC2N!2oPa`TeIVVsAHN~F;$zjMgq#nT1 zJ)bB+f)O;9evtnoL6mheircN_w-h{MRa^-Ej|UxmGW0AD2e0GI)F25S>w9cf+o1YPr=`$N78} zA&phq7h?;>G`T2Z2M9AlAZ|hqU_|?i@aEQhkw0# zac=L6Gd9G+kx#%TbRG7aIR?$H5+*JzUJCjO-J;|;PMISAQyM?NkNWZ;0QSgIeJb0N=L;M0>@zElQ_M85`Fl-(ho7F?I4Gdn8E zd-XOgD|gKQEn52N<$7|C>|JGDqGEfPE?dn(g(Y7WI!%A9^co8VkG6xndRl5h3`|>p zkQ=-ZLsmhya?uQ@vmR9TIk54(CqIlk)C*W|RU_)@9E@3wdP2TzmB8%hB(d*qO`E4V%O z;{hXw@-C!lkBtx6D?&cZErQvTr!&3T3hCxxCg>%#6}!(H3WI^7foewgVIVP~ zY2Nk*#Mx>N_wQ+I|1`84ljjN9Tbl(mVV`^AcwW(wg30?p%__g!JCU_+hZ{IypLe8( zXek+Owx!aKv*#=ag&9FxpfQS;^|}z|OlE7*X;-eDMAjD^6nf13k|gc~|K-_&pdh3Z zgC1!gONs}+mcb0Wm!@i~0C6Y$`2rW84mv@am$SOFs`onM-Q*5G81u~V!kUALlp&yM zFnm8L8j3Igw1*mDpNx%-HAZLw5=;oVY*M-l)n#R2rjx1vf{*aXNRd1NpKYKuA<%DX zXmC(OcPm?W_}OhoZqN;AI_2QzhIOl2r7jQijL-ceA84X3l*WAp4XlP$tE# zMg)t9C|)2E!eqb62ejD1LMG(6}c(ETiHKL?UT^Ne<4E4%SF;PYiI6U!@nTb3sGsiUmO-GFjRXA>ggz zczSlSOu=!*>V+RvxeY9j(tNyFp+@#eW>e$JoZ$U#v&XuP!{`2{yV3*X4CaW%JM+JP ze!Lfc`~W)KFnd2a+?}tXq3h)c`0&_m^~R*oYD8W;5A6U6Q6)f5PE$*(M4Jof7p4+! zKtx1z4AfVM6-nGb-k$w1W+WseMDQ0hU#7zw8knIFu>IGX%=dsFD&@l9FVAHPIfFn$ zDaYp<)A>5j`%@8zB4mivGoF%u@L*iiO;p?L`#B zoopTjnKSW0kMTf9u1?h(d5_72KMuF6Fhl3udp<8u&u9-bTj3|xSouaRJQjLgmLz^* zTycOP?)~0Y8O%+3NzzV-Gbg64PHZ+5x8YWElGFlM_jhQ*9`{JUQe+h6llBL49yo;_ ziG23}l885Al)i_4%=XzoSI|XjuN;v>fviwmIXhhStkE$DB|oZ!Cxwsy z_-Phy=%ISZXR--DoF|Iz%+Jq{CsD?Qv+;6qg+EH!BvREX7lpd>yPj)e1AW3y7b~7_ z4x+~6W)jKa{}nb0#iMr5R_a9jzxkp5S(Hld^|+;28tjmaBNR!G!(o+(E=$Hh?bN8% zeXeq#DDXa>D|oW*Jh_iRqcpU$V>sWR$cV#fgy^Id zX-w8~?fRich0cE2JEyn{c&z;VxcYSSE4QCi)@yKj&`ze)q*L~Lr3N=#si$9Gw1YbH zopvzXQBR0^r>B5wdEppf#&Xkb_uyW;R~mjksWMMr^-8Yq@}A}Cj(hX(we6%l%bV(F z9|vl`i0g9f`V#{sob^9X0=d49bI;stjW@#_wXfO#>4elsBiC&6CB=t!9jFJ*0a9xL zY}{?`YaDzq?}4VUl|1nd3fPVU-cL-Shp|QuHl$k|D3}#@yWH%5(#PFF7V34mlDzNQ zSKyK*&*t!k{VE}MX**#rNl&IBr>4Yy_vw!sXbr7;MT zUv9QTh9KDUElx>0C+|j_(Zg;HV5&Nwb_e7g|1DE&jV|3MgDUA}U&x=}UQNL@Nbwsi z)aNnDx7es5FG@zwsN>5{X@)1lSAsufNW;|uXxB7v>>O`x1ZQRKB9c1L3X^8}>Cz|& zr*k^{W-Zuj7zdFlB73;^^UbMbk1=BQab`IqXKe|W^$uq&a_9WGjDz|Vs5gslGoj?| zo{v0cNX{Y|BqdinDrD;CdW=8n+NL!jYv{BsIBO|KKUwkFY1A=x{VIR+;k!6ut;|rg z`)9?E~(TMZ)bT#tb z9+w^PEy5xX$S_2|S%Ie67jD%*w_`f9_MTl1W(bt)b#VY4{0NBtlFxUT&k_QytjRM2 zzY!fb_4f-UGaBfJYe()UI#3wNpWu*6NLiVv>^}i6SVS=EyN(3;I#o*`$tJK`BcC6S zf{qTcBr~O@rP!Qy+1QsSC)8r};!!%y7Q}^zz|v#nY3b-hbz7sZ|Aal*3Souycmb=o z`@=)5eWFD2%n;*H%09=Vhxvgl`YQX2LFG48tgb&DPpD?Ev6@wwgCI5F*QJ7Kdm=sS zaV=~(s!1l;;X3?34OZ27kR+F9!GIxXcCwYzYP$sB~~lI*~Ev zEv(>M|DVdP?d@|1Jx*DnwC-Z< z`1oIjbZo($!FTa9e}qCcIaUI1P>da^T#sFRGEh%E5zmZ{d-*P#hMZH$1a<5!_BFg6 zvO0-;-5;`i$g8NGdeCG=v?A9jAGGRJ6i7d{(kCDWbB@s?Kj+o~0H-PTO)>4Go1_uYzsmVx7hY^&5BmC+TPSX`hQd*l@THe&u)F|-N z_NYv~P#V;0btzN`1ufS4K+j59?h}x(4UeqRDj%mIv=up@eXGMkcw0IoM8#WD56(!$ zNLw%N3qPlZxf1R0+l&&hF>c*e>o>}x<0<{1MMaRVtJu+OechcZ4p%RK(VUx`3s=Zo zh;;Aeb0@JDe3yBl?A93=^Cao?-EnP9l^%moKY`hL^@Ey+-`KyaXt86+Wwfzn0Gssv zbZ1@*=(@h|CKHcer1QIMZDW7Dy@5D#Bs^s~gSFC-Vv!w5Y;1QOPfYNB`2KKmW;usj zBuWIdi_Pe+6WLEx?m`J#!~B*@5ss69w>Frdr_EQ%VJ~U>xjMXi($eLODta`lvA~JV z{{Y|M-mKcJ4)?~*A3TleROtJdDvr0=X??)jsn<4JH&DzEJCXSPqKE+Q3gE~eXhaDf z4-(#5sq5e>S<(_J_qG98r`KQ=RZ({`A$ixmxM;YAm-3>l&xs^cIOwJo&Z|KGQ6Y)G z!nCV%D0HRO)slX-;+(3~eW}BrOi)m81BE_nBTPqtwL(_oxKufV-muxB)%UC{Qcw$9 zF&z8;C-W}QToUI|{zBv#@N410sqxa7Xm%Z`q|#?8SkEfs`M(F?yHiHfNsf$`9y+US znDCfHn9sGMgM(n01fA!ZJ5gM;?tu_EdOh3x-AXIXy}|I!`oL{_65jC)bKweLJ>XTn z+;#YDgzD)oHO37%l-0&3H_OKNmMz(9HiE9!B?b;_(FaBL%?um(SAv3~dOnO;n!KbT z88e3cc}+_9yOah&e$>}Lco$WL40Sm*$DjIZ%d@z%=w`C)vQ;)scXPv6%7W{1(Bq>nZZ=2$ z;5;a*hRwJuRzToiPW}v1vkw>4gLSSrkIcGhM>B;vxMbRDNq~7*MvZ%OlZ&GBb;7(^)g!b`s2-Xn*YH+2J7A~(dc{opvv1$CKjdG?^AOJvDEShh?h%k}!u z2GP-l_BGo#I!I$xqG-R&Xl_wRDS-O+C-M1vrZkfDKlh|eUKds9d=wuzsAUU-H`T5d zPr4dEuWku``@C1!iHyx&$#-(WAdA;QO5Wqg9AnVyTOMeS>yoAs-pA}_)twdkcTA;x z?ri4KQCHkWqb8lRvU#AD2(A`+Z~jOqSpC9MR>Ue&_mL!Jzp;6K5K95)I^x*H%xL^< zHj{O-r#EgnA0{OJiqvAT)DEx_lt&lkwMX?xTFBf1E=gK=^a|CgcYjF!g@!BZTaxO8 z@bY$Z#}@e$OObaI5ox(c6{RlmhS2X1wAemG9`nNScEf`1-ruLO9&wJDoU|QE;~Tl{^T3P`eV^Mr$rQ4%IXh z?yQoj2_&*XR7+=BF&ECZeD8UIo&mNle?(Cgv)br}@>%5Hj*K!{yY4v%RqM5o)zpk^ zI*VCd{E$aV&+50cGNsyyXxGKAx~;ax(<%HG{3gGCAr9WN-ygUW>Zs8vdftBBpYkX) z463aU@A`9!9e-GsxjKN)EUe71rHV2h<`DWi4QD%dH`49OKI(NieqXKm_=#5?o86nk zbpENAFQ3F#w>%^b&xx-1Rfe-E2VH<0(zQ8va(>^na*e>dY~wVJ1qb~`+dsg(6J?AM zj-#P4wQ=gXal4rl zle6R8cqGqB?!$jk*CJ1qV(x!J@p`6#JdhrAEiSk*(uNp=jGZe-O>}j=avvW>? zJAF>=pO*z=O+|t2P)s-#oW>4IbFw%XWHs@YFH^=kiajrssB>jbBtn(L*-72jy_`tn zLd@jm)88g=@C(fq@|TJJO0$34%I99&*A>aipOKYzs?<8eP)k^68(lu=5KAGw#h+GS56V zvQcAqz^3`stG^!U3t=vxguw7IoAswK2A!yhe>@yx7wF3w5*jMa$59(YsMk7(e}@k> z@}a(!Phum+6Pe5{TPi`?26Fpo3K{F|2w}V@;)`!xT&d}5*3@49(Xrvisu_6>1Y8() z0zJ3)6{MKL8|#xEH9s>%x0GQ(=gw8-M;&7#xZtK-F2j}8^U2h+`aS59nrB4rEZNn+ z`?R>eZd1DYmvM}*{7#|$)LR$(J!oUEO*T%LZdn7^J0r~kAmP2qGBE`PlJ{6=HeCsO z$IEe{$Zdu1h1Q3xy=2RY%n)p1xXU-+`SN{k@OV9JY9#}6rw?sWD+#gDpO%WUe)Br6b_XdDAok>3i~tI@xcutk;Iu4?m_j@+&8$s6cTIDx1==){aoN5?)8Md` zz*zm;t6`f`aVoKT1ES3LHr&{0Y|%3>XG`YqSO}km`K)-_We`jlc`}S)`AU%kyQjub zx7z;Ce7`TQ9Ho@Y>gzL!9#K)@NKYOnK`x&hP0Y$_mw4idmGX|s(kG&Nx;5j%6H0;F zK5jb9Qo>_FPsr-eQZe_YYdO+hsI_)hJ*$nzIxii}07}h#RY@Hai9cln|IC2zKQl14 z`!!g^_Ysipfa5fu!)2eA+NZOi)6533+hAf2v4VekrtS@}Tz)Ep2)vX$qof6G5PL;>q0 zA{C?^Z!|@(b+d$VTHy&>eD0O#<)OEF@Rw_4r^%TiMOZE{QMshCRy-s}wGDWACA>w8bd-)LMxyt4A%Idy!o2JQsWMS&^)`8kOgH9ih2Ub`%U4QHe z>v(FsyN%}Iw?!aD0Xr#&PEnPO%XJpZCavw<@*syjoedMk2V$!75FnVYM80li-ATbJ zmdgL=-x?MFOaIpI+h8Y~Vd34Dn)QM3mH8%V58V05Bx*MV9lHe1;|;z&AU=Z==46-) zzCc^Ua+RDDhDE)1#;NqObqBM@5j9EaV?=i3AEGzDq{XlTP-PI|w%SDy$-{rS9UjRy zqHW`J0O9m>1xg?W{#jaN^5O-*FPn-~7V>U)4mpx z{O#IB|F1KMGrCByZrJ%}VP9F!Gos1>uYtonh%_I?xVI(4LYR_T;Wz zhIo;tXitf!o!WV?aYBPdN885S7VZN}HtNb6Fzy${>V7LSQ_?^Q{KlHuXlBn3e$6M^ zub*%tX3SFNhW}4LEw1RqN7EQf9Ze-06^{(JNmP>bF{eI2nT2cqH zDqCG(Z_a+&~Yj&Wg%yUatO; z+!g`q3>@Y{n7>o0H<>Au2)#}Ir_KQ62wkq`=_qkb;#NBR-TfjEdDm=O%zchC?82#8 zhK$3bZQO=c?4_(O+2-!_#sXDI7xt?TB%T#qX2!Np-fudCi$5wS-(FF5f z&Y3ZpIC`w0fcEjrZ}&K!S4-sxI&SgC+^GDMY}p=FBtJhrl?HvR!A8XwGPHo$D@vti zQ3#T}{^R-S9EPX)E_7s6F4}vj^m5;tf6X{%riD05!l>))|>4{d_JzuKJW;xUve|R2wU1A`;t^)D@ zH`#1lrBlV+iM2J9ght|8)^Q=yqlK6!}O0lp37!@C!jf*r&do z?BjHw7I;o{!BAsR10YSywyuI{r=S8@QIJap-i4l%^IndS^HW(_C?^+JeehinFsBbe z`nCrg5Sa-tDH z9|&6)H}8dl&hdSKZYa^5uF}N^kXhyp9@X*nua}gPK}L}WTVPZl-FMX16A}{efli{I z3@LLq2mGrvJjJK{uLnI45AD%&^{=d$=ro7lg0)2(2MtQg;(t?h#tP&MOmU(q0;!P@TiU`H0%&M=4@fLvaL_OtKsb#=M<%ZVte=+yv%ONuiS z$_!yA_$|Jn@+z(9qsJuoVK;4*m5fZ^Z@*H;cv0^|PWtzZ_B4_~h!<=-8sG=$<%&VfOMgFXyXfiBSGs=X4DKjlg{8}*yGssNVr<*J zXA+;}n$aMTPfLY`h0<`XL$4)U98%8inJV3SAm7lLBf1C$n_8Rbw~oy?h%QQb_*<|! zW_`Q+J+pq_VF)oNCL#?KH84<-EQ#80pZ;8Ek~48Ndf%*FsX5nZn-EFQ;=q{6#Lt31c+3EXISyY>MZ&xW` zJAglrmVkls5Xn!9-qFvsKHUJyf<9X1E>rW<{PZ*l5N;C7h>45$-(>KZlJ;hOgWFHD zp3HxWB9@~Tn%@`zH5q;cYkcl|_0O``n1HdElQTghP9zR33FH6%P%8}1$Os)67?3s# z`g`+`L$^j&MJ1M?oW!xYu8v?M6)ITj$Z6cJjir4}N2v1ZJ&@GWXDhdTNli}T(B*Dz zYl9{E?|?fPXqZVVuTUWQoh*|?5FD6oGUR4eL6)tivZ-MwCzspNcCCm5+*}Gz? z&3%1mhh?Q0%lGQ^dp{+}pxb7j)^9qfGMdvRGykElzD?qsMMSFg*}~9pPqh1Tl_VB4@n@mR@o>GpeV|-@Xj?v zpmN!j8l||P27=HvPuSh&<4$V(lQe!--E4<Bf0q`x3Xe_Sd88jf1=mf-g_MjgsZTdmLT1s6t=ai^e$qh6X7AH` z#!?U~Z~@$0$<%-Que&2JDJ4ti;_%M8b02X2T$>(+$+?rwRQx)z$dr1@a9lNNB-kpFklsGJlsaEk7dZtwo}&Yg-jZ8${bV_MhInI~;RQPpYd+EQ&9V^%17=N>GRkVJ6PoC^W-hz0tSFFE zwaD%2CTIs@dClmC)= zBLmIb3s|Gy81??(Awvn>S)#U}IBjvl`BCQ&ACJ0STFFhIyx>L6!g9$H z&N@C>^8nvTI9KZIG5G%#c%gy17ckUZWyuU6o!4#7+k+40fyyxjDGBRs3t3|u=evCu zSL1Q(ZN4@4RBC0Lbp&;)FCG}Fc5=7DYE!1S2lZRhckG(*QS6+`lKfMz=o-P*22UJ3 zs>PVZ#e`#FKBX0{I&opb$%&dbU%p2dTBqRcPIX`or*m5~T;g-c!^*JbM02!y^G%FY=Swtn$IhOL#|3@p@?WlpN(}F6lcapG zjz@JVdZ@D|Kj=Dhdh4e-opHF)18b|$?qH|^!U~RP&P8jmINxEt@PmoI)tvn3h*mH} zoQZ!sYN7uwPc5pEv8a~wD`p-^Ptt9i=}R@evkB~w3r%JTfBAO@0%q6 z->)Rj128E$6}~nha-~=?%;6%xUrY#dT3zw^y{;WY^}ZjC_D^h_OPF2EYb2^w{YnfM z27r;7dauZkWcf4jn=X4-8`wf~K8evF)T_3j=sg4*sK5X^-4RAb1$|um{9!JB(D(Cu zGk))_P9weudNqCPNee0P5a!(#3+9%l!B_`*5LVrKa+`5Pmi^u4@Mx~MKZcO=vg$AR z;Sf|i35gb$?q48RN*Y5!4buU$Y94fgWxS5^LiKxQgG>Wq>sC>mjog6ww^99uhf4js z5x9F6zeAcUY&1}Hst@WN|KL4#nB(nF0%k%+eYOnSh%ELAY2O}O{5qbBR2q^&`@U?x zirhrPkiF-<@oqUW=Us6SQua5=RLM!*F;Z)v$@_Oig0IhE!fspLJGz~dGc!Fkfh@d% zD^HDZ+VLD6e_H7#X=k;ikgXK`8)nh~r|=(`$wzc0aI+eG*B?hAR0qV=jE>LWYQuVZ zdRR?9!Vi}}u(bG=KAQd?;F)0qPf)Z4Q}Xp8{MjI~wvRwPgUD;%%_@AkhgI$CWb7dK z6g#W2rT0tmfa_zq!m6|5^D*zNjSfVDB5 z8zPcgHpYvoo3X-f(ks^H8AIVWDvSYYb00&gHYlURqx67XzS)SUQjX zr)rL%94L04ELA2u(eIr-7nU0a83saf@@OQ?1E_cIgEtSgm=A40Hm~`sxnhmtr|aFJ zXa#M1-`;TqeEsqTf@2#l(di}x#(tiK7uepox2Ofi;s7grV9 zg;w;LG)og=>ZoQAv@r9 zl;=RL@;c^Iyc#wGxK^Ia;x{grqlHe!j$A=!@pwdQB#0c$T)OZwCCs}`b>({7Ku9u_ zXY6Kd?cwqGbn7z%66)e@h!f>7M!%e{pfgFJ5+%jJzyRfh?4V~K{b_Nd)#q1aJSGJ% zriy-F|EeM=n9i3K5+EuCo*yK452uRX5NspS>n6C8IqVP!C;qY{6j^CRJg`8JL9)5~ zp9H)KfS&(H0{$`J#Bbyj1D7waFbPUx39JPA26VwJtbzwS7{ava01FfBKTe)(5?1AuY!dlZ5yl+!Vyyb2p% zCMVTP9({`1AI}wieVWe3_c7qgO+D&EX66I+K)dS{`ftnap2TeTr%5^WP}uu3m)52B zQg{cmK8ZyEjN>13l1lUiL^Mf3CoS5zM8in%{NqdMW;p*p8SALluRG`eU&{KgnuOmu z7KmH!n?3;K`R}<3ZA>6fY@#~22PP|9R_eYIn5=IlEPE48tOjtQmkU^U;yDGzReo-$ zS^q-9Q|+GnzhCe~je}a9?$J@sDMaeER6aTeMp3}ksr0laV@4Q144j|v;E_ex1DT{v z5Q;Dnji=hG2+yS&JRv+PmQUCKfd&n$PT0S*uxh91Jzu|SfebEb%Zn-L5~ok!-3M9R`-d>(9r=Y$L}X6hcZ4xoacN<3QAQUu7l1PU zKV&1(-gtB;4jis@P`K}m-+zPiL!4-Xaolz^lTK6NbbBmDztJWNn=D83CMGhByZOHVGKPh-;~_IfjCA@_FxKKQDjeIfpgj^c1dta>avJlI zvEa83<65}s2q=hsF&BHA`~*PYxKLS6*bZ>879dK%~0yFqhyc1{viDe_%-Z}!?^ri3@$EWIY4K!_lmSm^_~Z>^Z(Drh67{2S z3P3lxd=f)N;@~y1&~xs8eSQh>`QIjMu-1|e#O<#0yd-s zNWp5TuNd-yQW%ive;ybyrt!KknrK{`qD^r#!YAM?M1{oUIWPk*{L*-H=2o zv%gp-j1?$@ZLN<`CMJ;R9R=)Q-|_y%iBm4~jJ)0}Gt;%N6a^f1?^L>)f2}-PJC`l! z$1X7Ywbk-xjB9NgS}^i=T9RnLI%MvF;vG%BR9SutHANcwo*J4M73v>`$`%&oiQ^C5 zQLc7prs-ENs*A!JtExC&ar;}wRi#XGA1=95_6=}-D(I2vmt$JW3}Hs*o?#z`ULr?h zi8b^g%-9cSCxIvQ+CAh(C&=-0y+(JAS;5MA7tYSyQTMEZ)CBL(#jAC;wUvjv`*$xL zKHW~kcEVnX{^Y+SFI4Pp(;3;`HP^)ix=5w)t(kq7xyKJ9=iRrX_6Wh#%8*ZE6aBo8 zd|ENqP!;lNwtW^-HdNi)!O1^q={}ZIBo-5xtr*fH*cuM_$$np>4i(|(C7`KrG93-d zA)F6TdSyb6OhfStQKI$+#zUTjmb3^9uLn3VsyE}h?1V7#8Qb!ySCA6C$Bk^o3|mEw zo-Ueff_ahY_Bw_fLuZFBk*&3myyl-Xm5Q1hLWZ3Tj?Hz-!FMWRQmRI%Vf|qc)Z` zQLHuLYdEH`;9(;8P*T@S^Z8ksl>pocfJ@?-JeLkU6|k_eycy$i$?0!dK@exocGIETdY`Orx=iQWr?rfGD?gR zGi#@re1liTp%<5^2Nx*UZJ}SaLS|=YOyOVqY0tJ*!e`g-^W801FIr%Ucr=iaMYvc( zvXRG2Fdxv@0?Gk3j+s^&_Tij#e1?iiV-DL(ity#yv01kpt&8hC&pIVBuGLif7DF%| zT5}8ZI`>Ws{w5DIn={?vXI!q79$$}{4K zd%+(npvL#BYBs*$OFUpjQ=op!huos0&+8b#7Ud1?B2Fv=badF+AXzuDWWhJqH7V1A z7|i4RKA!b}yH?p&%{^$TnYwKIUB6`bS=GMOZ59#pwB-8!s$wbgps@LPV;vcEI-jZD z!y}B*pt_XD_jXe>aR%!}7rMf@snSoMJ6SiZr0mCUD69jw@4Lb$-)o{*n;wSQlis)T zd=~5N={~q}ceD2WCj*(kLl}%T1-XO2?6d*0$qb@Npr8yBwZ^jDZ0L>`4DqE|bBC)3 z4)Lt32v3SGLa3TiBl<(1)iM#iW*Ykh;Ep!CW5TXKnu(IVvh1bt=Y z&J%uoiPb1gKMo5!+idLqE+EArqzFV5GFl>0uHjp^fp-t!j|q0)Qw(KOz=f#`hRN;N z%31dLX^$fLjrjx5k4@kcl#t>jVe`}xg7)@>hW4l(o<1|18C+5CwwV1oP|xJa1YXbB zJN_1&p%4PQVLWaj@&d_LMp>Z0sVGV3)#-c5ek?{XIf6P(iE3oPr5IO7UM#k*BB8m5 zPDNcpLNx4MBZ@vbfyTJQL&f;yuFyl)f{{Csn+ZRddv z_tGnn(U(zH=D4LjQBjeKN2bCEnRCEF6o52HkYYJt zX(WNPh}hIKS)T{J{Sy zq<$dd=s=t~BxjQuas&I6QI{h?hUawJGC`{II^-?3?My+R2`}muG03b1mpLRMtx^FW zl+VukRUdkq>l>;heqgZNFow4?rLe)x&KP=;ZfDXAR54cf3gSasu)fSppt<#)|N*|_XX4%rb7xi>CI~WXCru9?}^FQ37S{sx)-uspAWn^G$ zv5t6dv6K%RdJ1mmaO3m%AKzFC8)8S~U!y&>Mhz@}&!v?XewLv3*>jbxnWj6PSt?)Qjj`b_y6b z0z4TAt|;qrj~h*2&B7SQp9L`eDz?L&$ANfOkTyR0$G@<&t$c{YRd+nD9V_fy;SbfF z4VlxU4ZdG+VFf(*T9Qy=6?}43XQOt)Xl);pHip|w9FMmi+Utb2!p|s#9~6YUYxPX* zu*Y@nn*>UI^2?9=V^EJqG?T9(L$)E6;_B@W6ZND0p8{LhZOZ3GCqwwV%H?sK7@2PNiD`n#lMPbIqYGh_2HSc5C)nH zmmYN1=lHyw*bXo8&VOZ`ug)5u_7|w&d?t(Oo(WktiK1OyrKsS4$U6TZ((m#mX_}6U%REwJ~ddglO1xzt6GKH1M+rSRgq~mgweBk}-@@o=!4EAD>KX{kLkrj`H-Ssypd8Yq z7@ev9T(mXQ5-&Isj*?6W4B4&;-YAcR8xyKc9lV+eQ)xw zM9k1G>>UhcaH|FB>K{?nn>{zeZ}T=OFBjf()OC?1a;?_>ePoJ4!LoL1sYourY?nO2O0N1! zunDM*vwvJWiT9Z{vx%q?9=s_J(S0IytFP#cBA4s@6QlPKg^3*Xc`;McdKjVYhRl8e zM_UwTY2bMmE8)0&1Y}t}g>=a(MN?gxN1@YWIuB~~j36NzD@it&4Fp=JrP*bF>CD#= ze$SUukSA=JhM#SrrJURjdXL(P%2T`7Bl$UCffQ^Zc7)?wU1rJ_0* zMbx4U8dH+FQfL9C*`9nHeYt@jQRwZShc5)q~r9gDi{k%Dd9m&ece&n$@p%o&wXtYB??~V%gPt*h%-5;yWtIzqZ)^;u8hfS z*-K{4jObg3=4(AL0KU4NQPoQa^G#L0oMr-*3Jw=ICc%p3Sa)4zd$Ib(YZ2=`GwnW& zhKT{ve|(7wtubtf9ziUkM4!s8B-@pdYRIt+so0~f%qFbUq*t%h*`tq=V99vGAqB%b%9LG`D*G#qBc;V zHWu{xXkXEH%>$kEe4&w}QWkCSsdge|K*@=GghD6uv1d|=n;1Uy+)bfVMcn`^P@i@x zUmgBcWMe=;pFL_#QjDu!^}fW=POD5tgTy_cg<%@n%#vEbDq%%9OLRrqyT9!T3=uWR zY*xteJOr)PkuFk+iI!)*dRp#y>oZVhjpAbxE0{vqux;ITj(_#-ZheC_E{tysxnCdR zPk>ArIASgefX1xj${RDlbacrb6hsgzqyZJT)|PMYUs&tk1Ce&^{a1e0dxb@ca$6Hw z`TJro=VS^$kIZ)kOWKTPh5~-3e$;;EtvSea0$%mEX~uu0@-xU2LPJrJ-z@kvFfZo6M?)KCQ#8v-z|-f-UA)cp|@;D&nq>VpqQ9A7?+33T5wF?KNA;! z)sSdFk`Aaq{DXZx-^$a@^a_uAitzdzFFg4zc|5qR>wUs}ii^k=HeU#q+{dh3A)jYp zro|G=KE~SK-iCV}WD<;mT9r&*a|k$F&E@ZnZ#d2XNKE$4%{>4w^*SYmIG$QOyToOs z!5XLf-Me}a-+K^_KaTo$v0anP_+zOE(brd3+MJ;yA+K{R%FXtNLAFcZ_rP~MR*`nB zSJ={GLkAo_p;fMCe!SL0pG`4aR8(|)`dyiX19TkQ9_s_t4xonFPh5LvNnAlVk04l{ z?gFf}UZV}JhlfWZNH30N3*b;wQ%n2&1x1u_k6i+%rB+l&M@Jt&zgIMN~8V>6f78E4aDpLvl@ZrO*vMC?(*dkEew&qOv zc>UqjCjpmF`o!M9X0w5y>EiF{o^}FQF*A$A7Ya|HfK1k?w~)K(>Nvx#JiXWVyQAV5 ze|!j^PyS6HuS#KCwXi4i1~jV%aj&d5eq5GBka#4xID#fUI+!39_j40@djYH=vA!Ok zqgQoqltfaSU`mAdw&0bXy$8vl=g5@S<{aUKulUX{KdHpak4I->mq)&mMmrmRc`2!> zjN#$o$p{1|9o@?=$3J}Q4vq8ES<7wi<(~!zt6s*oq8n{xXOrLV@9iZ=M`PcAmz0#0 zoSt4rFT}(1Opd#V+@*!biDD0e^zSSGK z9)D@5UsNS3a9LrLq6tRy&JU7Be&we^#kT#*#fD>$(7m-)ViUQJC8HocCtV<&*{K3& zVP;lxbbRNP5IJ~oP`zDDhXC|!Wf5hKm#=Rd*eLQxuUq4yqLPM(m0H5xrKCc-ebrP| z(?EX+Vb_gMZ(Sme<$@phgJRICco##%9HVv*#fF9kMse}LnTG>pQ253!rc+DR_5s}R z-SeopxU{!#**mDX{f&)tV`F1qbk)F*$IetCB;%cJ+Ub${SiJH zFqo7q!}jF-d?h#gUsIm9Nx8XPpq;f+QB#u+I6(!}SaPr<=#&UALr}ukJR+wX8u&b4 zxsDkR&eI{1!pBEPr{nzD*x1^ILAi?t5fM?|A{;IxR4yRqdn$YOm0-yw56Cvq1rFyM z=4iUv`5BU}2EH6h9Pwu$NU9e*e* zBG6eC&cH($k{PZXQ}Mc(i-$+q#ig#j^7i&NTcgv(+?r)3i{IAD&(DB=f*_Wlwp{Jq z4FQ3K%sHCHqG9jD&D8XC`{v&RaTMf7-Fk3zOBVW;quOHu+mc?(gsaifoU=w#8EaB#nWIDbRJWM5|5}&tKE`=qVIBOM5`b zCLHj)PGs)t>grmUZ>9n*UeL&9E19qsfjBf!aKfMaN?IPlhEwFK5=H=($Z7OiUpoiFB!yazR4x z>rAULj^Bk|$)!)o=oq(yF0?uadXEKsdbk^&0)$ZtGC=6VAz{GDbh*{k6qfAz+MThM zD6^Hz;?G4$?BBrIs?9ogBHB;^Fna+M3L#L1?oZJ1Am8UfO-(~dDPF|7ZQ{xwJcBYm zI#(vSo8~U&q38*~98giQcep~v=@gL{utFk#Qu_T-{rod{pfy5c688b-qd1y`qx7W0 zN6T2FPa%wE_kby|gu9av(V`N~@j>!)Ia4GD{kL*?wp$$VDCZiXf9!Ci{^PW)_HW8Lncc2 zSwzwaD)RQnl90{K&E%_fe|pj!%Ikoe9$NTbcMf$0A0&T}?4Xy+ zCw-J>en8~%n8+s{C7jz`5CKbn&5FY2dy_SYp-GvHm ry^c-zbRCsOfqHBE1Fn@q^c!D#Vfe)#n%iF};73tTO}0kb?A8AQr~R?c literal 0 HcmV?d00001 From 3b8d0f803f1c6277f2c17a73cf4803ab0bd9954b Mon Sep 17 00:00:00 2001 From: Abhishek Das Date: Wed, 1 Jul 2020 00:28:35 -0700 Subject: [PATCH 072/131] HADOOP-17032. Fix getContentSummary in ViewFileSystem to handle multiple children mountpoints pointing to different filesystems (#2060). Contributed by Abhishek Das. --- .../hadoop/fs/viewfs/ViewFileSystem.java | 37 +++++++++++ .../fs/viewfs/ViewFileSystemBaseTest.java | 61 +++++++++++++++++++ 2 files changed, 98 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 56448cb600b61..39d78cf65012d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -1328,6 +1328,43 @@ private FileStatus[] listStatusForFallbackLink() throws IOException { return new FileStatus[0]; } + @Override + public ContentSummary getContentSummary(Path f) throws IOException { + long[] summary = {0, 0, 1}; + for (FileStatus status : listStatus(f)) { + Path targetPath = + Path.getPathWithoutSchemeAndAuthority(status.getPath()); + InodeTree.ResolveResult res = + fsState.resolve(targetPath.toString(), true); + ContentSummary child = + res.targetFileSystem.getContentSummary(res.remainingPath); + summary[0] += child.getLength(); + summary[1] += child.getFileCount(); + summary[2] += child.getDirectoryCount(); + } + return new ContentSummary.Builder() + .length(summary[0]) + .fileCount(summary[1]) + .directoryCount(summary[2]) + .build(); + } + + @Override + public FsStatus getStatus(Path p) throws IOException { + long[] summary = {0, 0, 0}; + for (FileStatus status : listStatus(p)) { + Path targetPath = + Path.getPathWithoutSchemeAndAuthority(status.getPath()); + InodeTree.ResolveResult res = + fsState.resolve(targetPath.toString(), true); + FsStatus child = res.targetFileSystem.getStatus(res.remainingPath); + summary[0] += child.getCapacity(); + summary[1] += child.getUsed(); + summary[2] += child.getRemaining(); + } + return new FsStatus(summary[0], summary[1], summary[2]); + } + @Override public boolean mkdirs(Path dir, FsPermission permission) throws AccessControlException, FileAlreadyExistsException { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java index 59588a527f46e..05d7974395013 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.fs.viewfs; +import java.io.File; import java.io.FileNotFoundException; +import java.io.FileOutputStream; import java.io.IOException; import java.net.URI; import java.security.PrivilegedExceptionAction; @@ -32,6 +34,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockStoragePolicySpi; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; @@ -57,6 +61,8 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assume; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.hadoop.fs.FileSystemTestHelper.*; @@ -109,6 +115,9 @@ protected FileSystemTestHelper createFileSystemHelper() { return new FileSystemTestHelper(); } + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + @Before public void setUp() throws Exception { initializeTargetTestRoot(); @@ -1369,4 +1378,56 @@ public void testDeleteOnExit() throws Exception { viewFs.close(); assertFalse(fsTarget.exists(realTestPath)); } + + @Test + public void testGetContentSummary() throws IOException { + ContentSummary summaryBefore = + fsView.getContentSummary(new Path("/internalDir")); + String expected = "GET CONTENT SUMMARY"; + Path filePath = + new Path("/internalDir/internalDir2/linkToDir3", "foo"); + + try (FSDataOutputStream outputStream = fsView.create(filePath)) { + outputStream.write(expected.getBytes()); + } + + Path newDirPath = new Path("/internalDir/linkToDir2", "bar"); + fsView.mkdirs(newDirPath); + + ContentSummary summaryAfter = + fsView.getContentSummary(new Path("/internalDir")); + assertEquals("The file count didn't match", + summaryBefore.getFileCount() + 1, + summaryAfter.getFileCount()); + assertEquals("The size didn't match", + summaryBefore.getLength() + expected.length(), + summaryAfter.getLength()); + assertEquals("The directory count didn't match", + summaryBefore.getDirectoryCount() + 1, + summaryAfter.getDirectoryCount()); + } + + @Test + public void testGetContentSummaryWithFileInLocalFS() throws Exception { + ContentSummary summaryBefore = + fsView.getContentSummary(new Path("/internalDir")); + String expected = "GET CONTENT SUMMARY"; + File localFile = temporaryFolder.newFile("localFile"); + try (FileOutputStream fos = new FileOutputStream(localFile)) { + fos.write(expected.getBytes()); + } + ConfigUtil.addLink(conf, + "/internalDir/internalDir2/linkToLocalFile", localFile.toURI()); + + try (FileSystem fs = FileSystem.get(FsConstants.VIEWFS_URI, conf)) { + ContentSummary summaryAfter = + fs.getContentSummary(new Path("/internalDir")); + assertEquals("The file count didn't match", + summaryBefore.getFileCount() + 1, + summaryAfter.getFileCount()); + assertEquals("The directory count didn't match", + summaryBefore.getLength() + expected.length(), + summaryAfter.getLength()); + } + } } From 4e37ad59b865d95b63d72523a083fed9beabc72b Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Wed, 1 Jul 2020 16:52:25 +0900 Subject: [PATCH 073/131] HADOOP-17090. Increase precommit job timeout from 5 hours to 20 hours. (#2111). Contributed by Akira Ajisaka. Signed-off-by: Ayush Saxena --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 1d4c6fe4c3c97..0461c5727aff9 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -23,7 +23,7 @@ pipeline { options { buildDiscarder(logRotator(numToKeepStr: '5')) - timeout (time: 5, unit: 'HOURS') + timeout (time: 20, unit: 'HOURS') timestamps() checkoutToSubdirectory('src') } From 6c57be48973e182f8141d166102bdc513b944900 Mon Sep 17 00:00:00 2001 From: zhaorenhai Date: Wed, 1 Jul 2020 17:57:11 +0800 Subject: [PATCH 074/131] HADOOP-17084 Update Dockerfile_aarch64 to use Bionic (#2103). Contributed by zhaorenhai. Signed-off-by: Ayush Saxena --- dev-support/docker/Dockerfile_aarch64 | 90 +++++---------------------- 1 file changed, 16 insertions(+), 74 deletions(-) diff --git a/dev-support/docker/Dockerfile_aarch64 b/dev-support/docker/Dockerfile_aarch64 index 5628c60cf9fb3..ccc517dbf9fd2 100644 --- a/dev-support/docker/Dockerfile_aarch64 +++ b/dev-support/docker/Dockerfile_aarch64 @@ -17,7 +17,7 @@ # Dockerfile for installing the necessary dependencies for building Hadoop. # See BUILDING.txt. -FROM ubuntu:xenial +FROM ubuntu:bionic WORKDIR /root @@ -35,24 +35,26 @@ ENV DEBCONF_TERSE true ###### # Install common dependencies from packages. Versions here are either # sufficient or irrelevant. -# -# WARNING: DO NOT PUT JAVA APPS HERE! Otherwise they will install default -# Ubuntu Java. See Java section below! ###### # hadolint ignore=DL3008 RUN apt-get -q update \ && apt-get -q install -y --no-install-recommends \ + ant \ apt-utils \ + bats \ build-essential \ bzip2 \ clang \ + cmake \ curl \ doxygen \ + findbugs \ fuse \ g++ \ gcc \ git \ gnupg-agent \ + libbcprov-java \ libbz2-dev \ libcurl4-openssl-dev \ libfuse-dev \ @@ -65,6 +67,9 @@ RUN apt-get -q update \ libzstd1-dev \ locales \ make \ + maven \ + openjdk-11-jdk \ + openjdk-8-jdk \ pinentry-curses \ pkg-config \ python \ @@ -74,47 +79,24 @@ RUN apt-get -q update \ python-setuptools \ python-wheel \ rsync \ + shellcheck \ software-properties-common \ - snappy \ sudo \ valgrind \ zlib1g-dev \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* - -####### -# OpenJDK 8 -####### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends openjdk-8-jdk libbcprov-java \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - - ###### -# Install cmake 3.1.0 (3.5.1 ships with Xenial) -# There is no cmake binary available for aarch64. Build from source. +# Set env vars required to build Hadoop ###### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/cmake/src \ - && curl -L -s -S \ - https://cmake.org/files/v3.1/cmake-3.1.0-1-src.tar.bz2 \ - -o /opt/cmake/cmake-src.tar.bz2 \ - && tar xvjf /opt/cmake/cmake-src.tar.bz2 -C /opt/cmake/src \ - && cd /opt/cmake/src \ - && tar xvjf cmake-3.1.0.tar.bz2 \ - && cd cmake-3.1.0 && patch -p0 -i ../cmake-3.1.0-1.patch && mkdir .build && cd .build \ - && ../bootstrap --parallel=2 \ - && make -j2 && ./bin/cpack \ - && tar xzf cmake-3.1.0-Linux-aarch64.tar.gz --strip-components 1 -C /opt/cmake \ - && cd /opt/cmake && rm -rf /opt/cmake/src -ENV CMAKE_HOME /opt/cmake -ENV PATH "${PATH}:/opt/cmake/bin" +ENV MAVEN_HOME /usr +# JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) +ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-arm64 +ENV FINDBUGS_HOME /usr ###### -# Install Google Protobuf 3.7.1 (2.6.0 ships with Xenial) +# Install Google Protobuf 3.7.1 (3.0.0 ships with Bionic) ###### # hadolint ignore=DL3003 RUN mkdir -p /opt/protobuf-src \ @@ -130,46 +112,6 @@ RUN mkdir -p /opt/protobuf-src \ ENV PROTOBUF_HOME /opt/protobuf ENV PATH "${PATH}:/opt/protobuf/bin" -###### -# Install Apache Maven 3.3.9 (3.3.9 ships with Xenial) -###### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends maven \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -ENV MAVEN_HOME /usr - -###### -# Install findbugs 3.0.1 (3.0.1 ships with Xenial) -# Ant is needed for findbugs -###### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends findbugs ant \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -ENV FINDBUGS_HOME /usr - -#### -# Install shellcheck (0.4.6, the latest as of 2017-09-26) -#### -# hadolint ignore=DL3008 -RUN add-apt-repository -y ppa:hvr/ghc \ - && apt-get -q update \ - && apt-get -q install -y --no-install-recommends shellcheck \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -#### -# Install bats (0.4.0, the latest as of 2017-09-26, ships with Xenial) -#### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends bats \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - #### # Install pylint at fixed version (2.0.0 removed python2 support) # https://github.com/PyCQA/pylint/issues/2294 From 9b5557a9e811f04b964aa3a31ba8846a907d26f9 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Wed, 1 Jul 2020 13:41:30 +0200 Subject: [PATCH 075/131] YARN-10325. Document max-parallel-apps for Capacity Scheduler. Contributed by Peter Bacsko --- .../src/site/markdown/CapacityScheduler.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md index d0e453b1b7e74..6a857e9f7b7ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md @@ -142,6 +142,23 @@ Configuration |:---- |:---- | | `yarn.scheduler.capacity.maximum-applications` / `yarn.scheduler.capacity..maximum-applications` | Maximum number of applications in the system which can be concurrently active both running and pending. Limits on each queue are directly proportional to their queue capacities and user limits. This is a hard limit and any applications submitted when this limit is reached will be rejected. Default is 10000. This can be set for all queues with `yarn.scheduler.capacity.maximum-applications` and can also be overridden on a per queue basis by setting `yarn.scheduler.capacity..maximum-applications`. Integer value expected. | | `yarn.scheduler.capacity.maximum-am-resource-percent` / `yarn.scheduler.capacity..maximum-am-resource-percent` | Maximum percent of resources in the cluster which can be used to run application masters - controls number of concurrent active applications. Limits on each queue are directly proportional to their queue capacities and user limits. Specified as a float - ie 0.5 = 50%. Default is 10%. This can be set for all queues with `yarn.scheduler.capacity.maximum-am-resource-percent` and can also be overridden on a per queue basis by setting `yarn.scheduler.capacity..maximum-am-resource-percent` | +| `yarn.scheduler.capacity.max-parallel-apps` / `yarn.scheduler.capacity..max-parallel-apps` | Maximum number of applications that can run at the same time. Unlike to `maximum-applications`, application submissions are *not* rejected when this limit is reached. Instead they stay in `ACCEPTED` state until they are eligible to run. This can be set for all queues with `yarn.scheduler.capacity.max-parallel-apps` and can also be overridden on a per queue basis by setting `yarn.scheduler.capacity..max-parallel-apps`. Integer value is expected. By default, there is no limit. | + + You can also limit the number of parallel applications on a per user basis. + +| Property | Description | +|:---- |:---- | +| `yarn.scheduler.capacity.user.max-parallel-apps` | Maximum number of applications that can run at the same time for all users. Default value is unlimited. | +| `yarn.scheduler.capacity.user..max-parallel-apps` | Maximum number of applications that can run at the same for a specific user. This overrides the global setting. | + + + The evaluation of these limits happens in the following order: + +1. `maximum-applications` check - if the limit is exceeded, the submission is rejected immediately. + +2. `max-parallel-apps` check - the submission is accepted, but the application will not transition to `RUNNING` state. It stays in `ACCEPTED` until the queue / user limits are satisfied. + +3. `maximum-am-resource-percent` check - if there are too many Application Masters running, the application stays in `ACCEPTED` state until there is enough room for it. * Queue Administration & Permissions From 04abd0eb17b58e321893e8651ec596e9f7ac786f Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Wed, 1 Jul 2020 14:10:55 +0200 Subject: [PATCH 076/131] YARN-10330. Add missing test scenarios to TestUserGroupMappingPlacementRule and TestAppNameMappingPlacementRule. Contributed by Peter Bacsko --- .../TestAppNameMappingPlacementRule.java | 106 ++++++++--- .../TestUserGroupMappingPlacementRule.java | 169 +++++++++++++++++- .../scheduler/fair/SimpleGroupsMapping.java | 11 +- 3 files changed, 263 insertions(+), 23 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java index 1204213cf9162..29141aedf6c48 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java @@ -37,7 +37,15 @@ import static org.mockito.Mockito.when; public class TestAppNameMappingPlacementRule { + private static final String ROOT_QUEUE = "root"; + private static final String Q2_QUEUE = "q2"; + private static final String Q1_QUEUE = "q1"; + private static final String USER_NAME = "user"; + private static final String DEFAULT_QUEUE = "default"; + private static final String APPLICATION_PLACEHOLDER = "%application"; + private static final String AMBIGUOUS_QUEUE = "ambiguousQueue"; private static final String APP_NAME = "DistributedShell"; + private static final String MAPREDUCE_APP_NAME = "MAPREDUCE"; private YarnConfiguration conf = new YarnConfiguration(); @@ -62,16 +70,20 @@ private void verifyQueueMapping(QueueMapping queueMapping, CapacitySchedulerQueueManager qm = mock(CapacitySchedulerQueueManager.class); when(qm.isAmbiguous(Mockito.isA(String.class))).thenReturn(false); + when(qm.isAmbiguous(AMBIGUOUS_QUEUE)).thenReturn(true); + rule.queueManager = qm; ApplicationSubmissionContext asc = Records.newRecord( ApplicationSubmissionContext.class); - if (inputQueue.equals("%application")) { + if (inputQueue.equals(APPLICATION_PLACEHOLDER)) { inputQueue = APP_NAME; } asc.setQueue(inputQueue); String appName = queueMapping.getSource(); - if (appName.equals("%application")) { + // to create a scenario when source != appName + if (appName.equals(APPLICATION_PLACEHOLDER) + || appName.equals(MAPREDUCE_APP_NAME)) { appName = APP_NAME; } asc.setApplicationName(appName); @@ -81,31 +93,85 @@ private void verifyQueueMapping(QueueMapping queueMapping, ctx != null ? ctx.getQueue() : inputQueue); } - public QueueMapping queueMappingBuilder(String source, String queue) { + public QueueMapping getQueueMapping(String source, String queue) { + return getQueueMapping(source, null, queue); + } + + public QueueMapping getQueueMapping(String source, String parent, + String queue) { return QueueMapping.QueueMappingBuilder.create() .type(QueueMapping.MappingType.APPLICATION) .source(source) .queue(queue) + .parentQueue(parent) .build(); } @Test - public void testMapping() throws YarnException { - // simple base case for mapping user to queue - verifyQueueMapping(queueMappingBuilder(APP_NAME, - "q1"), "user_1", "q1"); - verifyQueueMapping(queueMappingBuilder("%application", "q2"), "user_1", - "q2"); - verifyQueueMapping(queueMappingBuilder("%application", "%application"), - "user_1", APP_NAME); - - // specify overwritten, and see if user specified a queue, and it will be - // overridden - verifyQueueMapping(queueMappingBuilder(APP_NAME, - "q1"), "1", "q2", "q1", true); - - // if overwritten not specified, it should be which user specified - verifyQueueMapping(queueMappingBuilder(APP_NAME, - "q1"), "1", "q2", "q2", false); + public void testSpecificAppNameMappedToDefinedQueue() throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, Q1_QUEUE), + USER_NAME, Q1_QUEUE); + } + + @Test + public void testPlaceholderAppSourceMappedToQueue() throws YarnException { + verifyQueueMapping(getQueueMapping(APPLICATION_PLACEHOLDER, Q2_QUEUE), + USER_NAME, Q2_QUEUE); + } + + @Test + public void testPlaceHolderAppSourceAndQueueMappedToAppNameQueue() + throws YarnException { + verifyQueueMapping(getQueueMapping(APPLICATION_PLACEHOLDER, + APPLICATION_PLACEHOLDER), USER_NAME, APP_NAME); + } + + @Test + public void testQueueInMappingOverridesSpecifiedQueue() + throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, + Q1_QUEUE), USER_NAME, Q2_QUEUE, Q1_QUEUE, true); + } + + @Test + public void testQueueInMappingDoesNotOverrideSpecifiedQueue() + throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, + Q1_QUEUE), USER_NAME, Q2_QUEUE, Q2_QUEUE, false); } + + @Test + public void testDefaultQueueInMappingIsNotUsedWithoutOverride() + throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, + DEFAULT_QUEUE), USER_NAME, Q2_QUEUE, Q2_QUEUE, false); + } + + @Test + public void testDefaultQueueInMappingEqualsToInputQueue() + throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, + DEFAULT_QUEUE), USER_NAME, DEFAULT_QUEUE, DEFAULT_QUEUE, false); + } + + @Test + public void testMappingSourceDiffersFromInputQueue() throws YarnException { + verifyQueueMapping(getQueueMapping(MAPREDUCE_APP_NAME, + Q1_QUEUE), USER_NAME, DEFAULT_QUEUE, DEFAULT_QUEUE, false); + } + + @Test(expected = YarnException.class) + public void testMappingContainsAmbiguousLeafQueueWithoutParent() + throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, AMBIGUOUS_QUEUE), + USER_NAME, DEFAULT_QUEUE, DEFAULT_QUEUE, false); + } + + @Test + public void testMappingContainsAmbiguousLeafQueueWithParent() + throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, ROOT_QUEUE, AMBIGUOUS_QUEUE), + USER_NAME, DEFAULT_QUEUE, AMBIGUOUS_QUEUE, false); + } + } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java index 5028ce6c1322b..98fba14787593 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java @@ -149,6 +149,7 @@ private AbstractCSQueue createRootQueue(String rootQueueName) { ParentQueue root = mock(ParentQueue.class); when(root.getQueuePath()).thenReturn(rootQueueName); when(queueManager.getQueue(rootQueueName)).thenReturn(root); + when(queueManager.getQueueByFullName(rootQueueName)).thenReturn(root); return root; } @@ -220,11 +221,13 @@ private void verifyQueueMapping(QueueMappingTestData queueMappingTestData) .withQueue("root.agroup.a") .withQueue("root.asubgroup2") .withQueue("root.bsubgroup2.b") + .withQueue("root.users.primarygrouponly") + .withQueue("root.admins.primarygrouponly") .withManagedParentQueue("root.managedParent") .build(); when(queueManager.getQueue(isNull())).thenReturn(null); - + when(queueManager.isAmbiguous("primarygrouponly")).thenReturn(true); rule.setQueueManager(queueManager); ApplicationSubmissionContext asc = Records.newRecord( ApplicationSubmissionContext.class); @@ -375,6 +378,170 @@ public void testUserMappingToQueueNamedAsUsernameWithPrimaryGroupAsParentQueue() .build()); } + @Test + public void testUserMappingToPrimaryGroupInvalidNestedPlaceholder() + throws YarnException { + // u:%user:%primary_group.%random, no matching queue + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%random") + .parentQueue("%primary_group") + .build()) + .inputUser("a") + .expectedQueue("default") + .build()); + } + + @Test + public void testUserMappingToSecondaryGroupInvalidNestedPlaceholder() + throws YarnException { + // u:%user:%secondary_group.%random, no matching queue + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%random") + .parentQueue("%secondary_group") + .build()) + .inputUser("a") + .expectedQueue("default") + .build()); + } + + @Test + public void testUserMappingDiffersFromSubmitterQueueDoesNotExist() + throws YarnException { + // u:a:%random, submitter: xyz, no matching queue + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("a") + .queue("%random") + .build()) + .inputUser("xyz") + .expectedQueue("default") + .build()); + } + + @Test + public void testSpecificUserMappingToPrimaryGroup() throws YarnException { + // u:a:%primary_group + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("a") + .queue("%primary_group") + .build()) + .inputUser("a") + .expectedQueue("agroup") + .build()); + } + + @Test + public void testSpecificUserMappingToSecondaryGroup() + throws YarnException { + // u:a:%secondary_group + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("a") + .queue("%secondary_group") + .build()) + .inputUser("a") + .expectedQueue("asubgroup2") + .build()); + } + + @Test + public void testSpecificUserMappingWithNoSecondaryGroup() + throws YarnException { + // u:nosecondarygroupuser:%secondary_group, no matching queue + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("nosecondarygroupuser") + .queue("%secondary_group") + .build()) + .inputUser("nosecondarygroupuser") + .expectedQueue("default") + .build()); + } + + @Test + public void testGenericUserMappingWithNoSecondaryGroup() + throws YarnException { + // u:%user:%user, no matching queue + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%user") + .parentQueue("%secondary_group") + .build()) + .inputUser("nosecondarygroupuser") + .expectedQueue("default") + .build()); + } + + @Test(expected = YarnException.class) + public void testUserMappingToNestedUserPrimaryGroupWithAmbiguousQueues() + throws YarnException { + // u:%user:%user, submitter nosecondarygroupuser, queue is ambiguous + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%user") + .parentQueue("%primary_group") + .build()) + .inputUser("nosecondarygroupuser") + .build()); + } + + @Test(expected = YarnException.class) + public void testResolvedQueueIsNotManaged() + throws YarnException { + // u:%user:%primary_group.%user, "admins" group will be "root", + // resulting parent queue will be "root" which is not managed + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%user") + .parentQueue("%primary_group") + .build()) + .inputUser("admins") + .build()); + } + + @Test(expected = YarnException.class) + public void testUserMappingToPrimaryGroupWithAmbiguousQueues() + throws YarnException { + // u:%user:%primary_group, submitter nosecondarygroupuser, + // queue is ambiguous + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%primary_group") + .build()) + .inputUser("nosecondarygroupuser") + .expectedQueue("default") + .build()); + } + @Test public void testUserMappingToQueueNamedAsUsernameWithSecondaryGroupAsParentQueue() throws YarnException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java index f7648c86d4bb5..9c916e36418bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java @@ -25,10 +25,17 @@ import org.apache.hadoop.security.GroupMappingServiceProvider; public class SimpleGroupsMapping implements GroupMappingServiceProvider { - + @Override public List getGroups(String user) { - return Arrays.asList(user + "group", user + "subgroup1", user + "subgroup2"); + if ("admins".equals(user)) { + return Arrays.asList("root"); + } else if ("nosecondarygroupuser".equals(user)) { + return Arrays.asList("primarygrouponly"); + } else { + return Arrays.asList( + user + "group", user + "subgroup1", user + "subgroup2"); + } } @Override From 3b5c9a90c07e6360007f3f4aa357aa665b47ca3a Mon Sep 17 00:00:00 2001 From: Mehakmeet Singh Date: Fri, 3 Jul 2020 16:11:35 +0530 Subject: [PATCH 077/131] HADOOP-16961. ABFS: Adding metrics to AbfsInputStream (#2076) Contributed by Mehakmeet Singh. --- .../fs/azurebfs/AzureBlobFileSystemStore.java | 2 + .../fs/azurebfs/services/AbfsInputStream.java | 68 ++++ .../services/AbfsInputStreamContext.java | 12 + .../services/AbfsInputStreamStatistics.java | 93 ++++++ .../AbfsInputStreamStatisticsImpl.java | 205 ++++++++++++ .../ITestAbfsInputStreamStatistics.java | 297 ++++++++++++++++++ .../TestAbfsInputStreamStatistics.java | 55 ++++ 7 files changed, 732 insertions(+) create mode 100644 hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatistics.java create mode 100644 hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatisticsImpl.java create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsInputStreamStatistics.java diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index 397afc8efbb18..c310e29870a6d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -86,6 +86,7 @@ import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; import org.apache.hadoop.fs.azurebfs.services.AbfsInputStream; import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamContext; +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamStatisticsImpl; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStreamContext; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStreamStatisticsImpl; @@ -511,6 +512,7 @@ private AbfsInputStreamContext populateAbfsInputStreamContext() { .withReadBufferSize(abfsConfiguration.getReadBufferSize()) .withReadAheadQueueDepth(abfsConfiguration.getReadAheadQueueDepth()) .withTolerateOobAppends(abfsConfiguration.getTolerateOobAppends()) + .withStreamStatistics(new AbfsInputStreamStatisticsImpl()) .build(); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java index 50380c9bb9f40..a809bde6c3035 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java @@ -68,6 +68,9 @@ public class AbfsInputStream extends FSInputStream implements CanUnbuffer, // of valid bytes in buffer) private boolean closed = false; + /** Stream statistics. */ + private final AbfsInputStreamStatistics streamStatistics; + public AbfsInputStream( final AbfsClient client, final Statistics statistics, @@ -86,6 +89,7 @@ public AbfsInputStream( this.readAheadEnabled = true; this.cachedSasToken = new CachedSASToken( abfsInputStreamContext.getSasTokenRenewPeriodForStreamsInSeconds()); + this.streamStatistics = abfsInputStreamContext.getStreamStatistics(); } public String getPath() { @@ -105,10 +109,21 @@ public int read() throws IOException { @Override public synchronized int read(final byte[] b, final int off, final int len) throws IOException { + // check if buffer is null before logging the length + if (b != null) { + LOG.debug("read requested b.length = {} offset = {} len = {}", b.length, + off, len); + } else { + LOG.debug("read requested b = null offset = {} len = {}", off, len); + } + int currentOff = off; int currentLen = len; int lastReadBytes; int totalReadBytes = 0; + if (streamStatistics != null) { + streamStatistics.readOperationStarted(off, len); + } incrementReadOps(); do { lastReadBytes = readOneBlock(b, currentOff, currentLen); @@ -130,6 +145,8 @@ private int readOneBlock(final byte[] b, final int off, final int len) throws IO } Preconditions.checkNotNull(b); + LOG.debug("read one block requested b.length = {} off {} len {}", b.length, + off, len); if (len == 0) { return 0; @@ -155,6 +172,7 @@ private int readOneBlock(final byte[] b, final int off, final int len) throws IO bCursor = 0; limit = 0; if (buffer == null) { + LOG.debug("created new buffer size {}", bufferSize); buffer = new byte[bufferSize]; } @@ -183,6 +201,11 @@ private int readOneBlock(final byte[] b, final int off, final int len) throws IO if (statistics != null) { statistics.incrementBytesRead(bytesToRead); } + if (streamStatistics != null) { + // Bytes read from the local buffer. + streamStatistics.bytesReadFromBuffer(bytesToRead); + streamStatistics.bytesRead(bytesToRead); + } return bytesToRead; } @@ -200,8 +223,11 @@ private int readInternal(final long position, final byte[] b, final int offset, int numReadAheads = this.readAheadQueueDepth; long nextSize; long nextOffset = position; + LOG.debug("read ahead enabled issuing readheads num = {}", numReadAheads); while (numReadAheads > 0 && nextOffset < contentLength) { nextSize = Math.min((long) bufferSize, contentLength - nextOffset); + LOG.debug("issuing read ahead requestedOffset = {} requested size {}", + nextOffset, nextSize); ReadBufferManager.getBufferManager().queueReadAhead(this, nextOffset, (int) nextSize); nextOffset = nextOffset + nextSize; numReadAheads--; @@ -211,6 +237,7 @@ private int readInternal(final long position, final byte[] b, final int offset, receivedBytes = ReadBufferManager.getBufferManager().getBlock(this, position, length, b); if (receivedBytes > 0) { incrementReadOps(); + LOG.debug("Received data from read ahead, not doing remote read"); return receivedBytes; } @@ -218,6 +245,7 @@ private int readInternal(final long position, final byte[] b, final int offset, receivedBytes = readRemote(position, b, offset, length); return receivedBytes; } else { + LOG.debug("read ahead disabled, reading remote"); return readRemote(position, b, offset, length); } } @@ -247,6 +275,11 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti LOG.trace("Trigger client.read for path={} position={} offset={} length={}", path, position, offset, length); op = client.read(path, position, b, offset, length, tolerateOobAppends ? "*" : eTag, cachedSasToken.get()); cachedSasToken.update(op.getSasToken()); + if (streamStatistics != null) { + streamStatistics.remoteReadOperation(); + } + LOG.debug("issuing HTTP GET request params position = {} b.length = {} " + + "offset = {} length = {}", position, b.length, offset, length); perfInfo.registerResult(op.getResult()).registerSuccess(true); incrementReadOps(); } catch (AzureBlobFileSystemException ex) { @@ -262,6 +295,7 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti if (bytesRead > Integer.MAX_VALUE) { throw new IOException("Unexpected Content-Length"); } + LOG.debug("HTTP request read bytes = {}", bytesRead); return (int) bytesRead; } @@ -282,6 +316,7 @@ private void incrementReadOps() { */ @Override public synchronized void seek(long n) throws IOException { + LOG.debug("requested seek to position {}", n); if (closed) { throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); } @@ -292,13 +327,21 @@ public synchronized void seek(long n) throws IOException { throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF); } + if (streamStatistics != null) { + streamStatistics.seek(n, fCursor); + } + if (n>=fCursor-limit && n<=fCursor) { // within buffer bCursor = (int) (n-(fCursor-limit)); + if (streamStatistics != null) { + streamStatistics.seekInBuffer(); + } return; } // next read will read from here fCursor = n; + LOG.debug("set fCursor to {}", fCursor); //invalidate buffer limit = 0; @@ -390,6 +433,7 @@ public boolean seekToNewSource(long l) throws IOException { public synchronized void close() throws IOException { closed = true; buffer = null; // de-reference the buffer so it can be GC'ed sooner + LOG.debug("Closing {}", this); } /** @@ -443,4 +487,28 @@ protected void setCachedSasToken(final CachedSASToken cachedSasToken) { this.cachedSasToken = cachedSasToken; } + /** + * Getter for AbfsInputStreamStatistics. + * + * @return an instance of AbfsInputStreamStatistics. + */ + @VisibleForTesting + public AbfsInputStreamStatistics getStreamStatistics() { + return streamStatistics; + } + + /** + * Get the statistics of the stream. + * @return a string value. + */ + @Override + public String toString() { + final StringBuilder sb = new StringBuilder(super.toString()); + if (streamStatistics != null) { + sb.append("AbfsInputStream@(").append(this.hashCode()).append("){"); + sb.append(streamStatistics.toString()); + sb.append("}"); + } + return sb.toString(); + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamContext.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamContext.java index a847b56eabe3a..f8d3b2a599bfe 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamContext.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamContext.java @@ -29,6 +29,8 @@ public class AbfsInputStreamContext extends AbfsStreamContext { private boolean tolerateOobAppends; + private AbfsInputStreamStatistics streamStatistics; + public AbfsInputStreamContext(final long sasTokenRenewPeriodForStreamsInSeconds) { super(sasTokenRenewPeriodForStreamsInSeconds); } @@ -52,6 +54,12 @@ public AbfsInputStreamContext withTolerateOobAppends( return this; } + public AbfsInputStreamContext withStreamStatistics( + final AbfsInputStreamStatistics streamStatistics) { + this.streamStatistics = streamStatistics; + return this; + } + public AbfsInputStreamContext build() { // Validation of parameters to be done here. return this; @@ -68,4 +76,8 @@ public int getReadAheadQueueDepth() { public boolean isTolerateOobAppends() { return tolerateOobAppends; } + + public AbfsInputStreamStatistics getStreamStatistics() { + return streamStatistics; + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatistics.java new file mode 100644 index 0000000000000..2603394c9337f --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatistics.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Interface for statistics for the AbfsInputStream. + */ +@InterfaceStability.Unstable +public interface AbfsInputStreamStatistics { + /** + * Seek backwards, incrementing the seek and backward seek counters. + * + * @param negativeOffset how far was the seek? + * This is expected to be negative. + */ + void seekBackwards(long negativeOffset); + + /** + * Record a forward seek, adding a seek operation, a forward + * seek operation, and any bytes skipped. + * + * @param skipped number of bytes skipped by reading from the stream. + * If the seek was implemented by a close + reopen, set this to zero. + */ + void seekForwards(long skipped); + + /** + * Record a forward or backward seek, adding a seek operation, a forward or + * a backward seek operation, and number of bytes skipped. + * + * @param seekTo seek to the position. + * @param currentPos current position. + */ + void seek(long seekTo, long currentPos); + + /** + * Increment the bytes read counter by the number of bytes; + * no-op if the argument is negative. + * + * @param bytes number of bytes read. + */ + void bytesRead(long bytes); + + /** + * Record the total bytes read from buffer. + * + * @param bytes number of bytes that are read from buffer. + */ + void bytesReadFromBuffer(long bytes); + + /** + * Records the total number of seeks done in the buffer. + */ + void seekInBuffer(); + + /** + * A {@code read(byte[] buf, int off, int len)} operation has started. + * + * @param pos starting position of the read. + * @param len length of bytes to read. + */ + void readOperationStarted(long pos, long len); + + /** + * Records a successful remote read operation. + */ + void remoteReadOperation(); + + /** + * Makes the string of all the AbfsInputStream statistics. + * @return the string with all the statistics. + */ + @Override + String toString(); +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatisticsImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatisticsImpl.java new file mode 100644 index 0000000000000..fd18910813d39 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatisticsImpl.java @@ -0,0 +1,205 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +/** + * Stats for the AbfsInputStream. + */ +public class AbfsInputStreamStatisticsImpl + implements AbfsInputStreamStatistics { + private long seekOperations; + private long forwardSeekOperations; + private long backwardSeekOperations; + private long bytesRead; + private long bytesSkippedOnSeek; + private long bytesBackwardsOnSeek; + private long seekInBuffer; + private long readOperations; + private long bytesReadFromBuffer; + private long remoteReadOperations; + + /** + * Seek backwards, incrementing the seek and backward seek counters. + * + * @param negativeOffset how far was the seek? + * This is expected to be negative. + */ + @Override + public void seekBackwards(long negativeOffset) { + seekOperations++; + backwardSeekOperations++; + bytesBackwardsOnSeek -= negativeOffset; + } + + /** + * Record a forward seek, adding a seek operation, a forward + * seek operation, and any bytes skipped. + * + * @param skipped number of bytes skipped by reading from the stream. + * If the seek was implemented by a close + reopen, set this to zero. + */ + @Override + public void seekForwards(long skipped) { + seekOperations++; + forwardSeekOperations++; + if (skipped > 0) { + bytesSkippedOnSeek += skipped; + } + } + + /** + * Record a forward or backward seek, adding a seek operation, a forward or + * a backward seek operation, and number of bytes skipped. + * The seek direction will be calculated based on the parameters. + * + * @param seekTo seek to the position. + * @param currentPos current position. + */ + @Override + public void seek(long seekTo, long currentPos) { + if (seekTo >= currentPos) { + this.seekForwards(seekTo - currentPos); + } else { + this.seekBackwards(currentPos - seekTo); + } + } + + /** + * Increment the bytes read counter by the number of bytes; + * no-op if the argument is negative. + * + * @param bytes number of bytes read. + */ + @Override + public void bytesRead(long bytes) { + if (bytes > 0) { + bytesRead += bytes; + } + } + + /** + * {@inheritDoc} + * + * Total bytes read from the buffer. + * + * @param bytes number of bytes that are read from buffer. + */ + @Override + public void bytesReadFromBuffer(long bytes) { + if (bytes > 0) { + bytesReadFromBuffer += bytes; + } + } + + /** + * {@inheritDoc} + * + * Increment the number of seeks in the buffer. + */ + @Override + public void seekInBuffer() { + seekInBuffer++; + } + + /** + * A {@code read(byte[] buf, int off, int len)} operation has started. + * + * @param pos starting position of the read. + * @param len length of bytes to read. + */ + @Override + public void readOperationStarted(long pos, long len) { + readOperations++; + } + + /** + * {@inheritDoc} + * + * Increment the counter when a remote read operation occurs. + */ + @Override + public void remoteReadOperation() { + remoteReadOperations++; + } + + public long getSeekOperations() { + return seekOperations; + } + + public long getForwardSeekOperations() { + return forwardSeekOperations; + } + + public long getBackwardSeekOperations() { + return backwardSeekOperations; + } + + public long getBytesRead() { + return bytesRead; + } + + public long getBytesSkippedOnSeek() { + return bytesSkippedOnSeek; + } + + public long getBytesBackwardsOnSeek() { + return bytesBackwardsOnSeek; + } + + public long getSeekInBuffer() { + return seekInBuffer; + } + + public long getReadOperations() { + return readOperations; + } + + public long getBytesReadFromBuffer() { + return bytesReadFromBuffer; + } + + public long getRemoteReadOperations() { + return remoteReadOperations; + } + + /** + * String operator describes all the current statistics. + * Important: there are no guarantees as to the stability + * of this value. + * + * @return the current values of the stream statistics. + */ + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "StreamStatistics{"); + sb.append(", SeekOperations=").append(seekOperations); + sb.append(", ForwardSeekOperations=").append(forwardSeekOperations); + sb.append(", BackwardSeekOperations=").append(backwardSeekOperations); + sb.append(", BytesSkippedOnSeek=").append(bytesSkippedOnSeek); + sb.append(", BytesBackwardsOnSeek=").append(bytesBackwardsOnSeek); + sb.append(", seekInBuffer=").append(seekInBuffer); + sb.append(", BytesRead=").append(bytesRead); + sb.append(", ReadOperations=").append(readOperations); + sb.append(", bytesReadFromBuffer=").append(bytesReadFromBuffer); + sb.append(", remoteReadOperations=").append(remoteReadOperations); + sb.append('}'); + return sb.toString(); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java new file mode 100644 index 0000000000000..7a62ecab7f4ea --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java @@ -0,0 +1,297 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.IOException; + +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamContext; +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamStatisticsImpl; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; +import org.apache.hadoop.io.IOUtils; + +public class ITestAbfsInputStreamStatistics + extends AbstractAbfsIntegrationTest { + private static final int OPERATIONS = 10; + private static final Logger LOG = + LoggerFactory.getLogger(ITestAbfsInputStreamStatistics.class); + private static final int ONE_MB = 1024 * 1024; + private static final int ONE_KB = 1024; + private byte[] defBuffer = new byte[ONE_MB]; + + public ITestAbfsInputStreamStatistics() throws Exception { + } + + /** + * Test to check the initial values of the AbfsInputStream statistics. + */ + @Test + public void testInitValues() throws IOException { + describe("Testing the initial values of AbfsInputStream Statistics"); + + AzureBlobFileSystem fs = getFileSystem(); + AzureBlobFileSystemStore abfss = fs.getAbfsStore(); + Path initValuesPath = path(getMethodName()); + AbfsOutputStream outputStream = null; + AbfsInputStream inputStream = null; + + try { + + outputStream = createAbfsOutputStreamWithFlushEnabled(fs, initValuesPath); + inputStream = abfss.openFileForRead(initValuesPath, fs.getFsStatistics()); + + AbfsInputStreamStatisticsImpl stats = + (AbfsInputStreamStatisticsImpl) inputStream.getStreamStatistics(); + + checkInitValue(stats.getSeekOperations(), "seekOps"); + checkInitValue(stats.getForwardSeekOperations(), "forwardSeekOps"); + checkInitValue(stats.getBackwardSeekOperations(), "backwardSeekOps"); + checkInitValue(stats.getBytesRead(), "bytesRead"); + checkInitValue(stats.getBytesSkippedOnSeek(), "bytesSkippedOnSeek"); + checkInitValue(stats.getBytesBackwardsOnSeek(), "bytesBackwardsOnSeek"); + checkInitValue(stats.getSeekInBuffer(), "seekInBuffer"); + checkInitValue(stats.getReadOperations(), "readOps"); + checkInitValue(stats.getBytesReadFromBuffer(), "bytesReadFromBuffer"); + checkInitValue(stats.getRemoteReadOperations(), "remoteReadOps"); + + } finally { + IOUtils.cleanupWithLogger(LOG, outputStream, inputStream); + } + } + + /** + * Test to check statistics from seek operation in AbfsInputStream. + */ + @Test + public void testSeekStatistics() throws IOException { + describe("Testing the values of statistics from seek operations in " + + "AbfsInputStream"); + + AzureBlobFileSystem fs = getFileSystem(); + AzureBlobFileSystemStore abfss = fs.getAbfsStore(); + Path seekStatPath = path(getMethodName()); + + AbfsOutputStream out = null; + AbfsInputStream in = null; + + try { + out = createAbfsOutputStreamWithFlushEnabled(fs, seekStatPath); + + //Writing a default buffer in a file. + out.write(defBuffer); + out.hflush(); + in = abfss.openFileForRead(seekStatPath, fs.getFsStatistics()); + + /* + * Writing 1MB buffer to the file, this would make the fCursor(Current + * position of cursor) to the end of file. + */ + int result = in.read(defBuffer, 0, ONE_MB); + LOG.info("Result of read : {}", result); + + /* + * Seeking to start of file and then back to end would result in a + * backward and a forward seek respectively 10 times. + */ + for (int i = 0; i < OPERATIONS; i++) { + in.seek(0); + in.seek(ONE_MB); + } + + AbfsInputStreamStatisticsImpl stats = + (AbfsInputStreamStatisticsImpl) in.getStreamStatistics(); + + LOG.info("STATISTICS: {}", stats.toString()); + + /* + * seekOps - Since we are doing backward and forward seek OPERATIONS + * times, total seeks would be 2 * OPERATIONS. + * + * backwardSeekOps - Since we are doing a backward seek inside a loop + * for OPERATION times, total backward seeks would be OPERATIONS. + * + * forwardSeekOps - Since we are doing a forward seek inside a loop + * for OPERATION times, total forward seeks would be OPERATIONS. + * + * bytesBackwardsOnSeek - Since we are doing backward seeks from end of + * file in a ONE_MB file each time, this would mean the bytes from + * backward seek would be OPERATIONS * ONE_MB. Since this is backward + * seek this value is expected be to be negative. + * + * bytesSkippedOnSeek - Since, we move from start to end in seek, but + * our fCursor(position of cursor) always remain at end of file, this + * would mean no bytes were skipped on seek. Since, all forward seeks + * are in buffer. + * + * seekInBuffer - Since all seeks were in buffer, the seekInBuffer + * would be equal to 2 * OPERATIONS. + * + */ + assertEquals("Mismatch in seekOps value", 2 * OPERATIONS, + stats.getSeekOperations()); + assertEquals("Mismatch in backwardSeekOps value", OPERATIONS, + stats.getBackwardSeekOperations()); + assertEquals("Mismatch in forwardSeekOps value", OPERATIONS, + stats.getForwardSeekOperations()); + assertEquals("Mismatch in bytesBackwardsOnSeek value", + -1 * OPERATIONS * ONE_MB, stats.getBytesBackwardsOnSeek()); + assertEquals("Mismatch in bytesSkippedOnSeek value", + 0, stats.getBytesSkippedOnSeek()); + assertEquals("Mismatch in seekInBuffer value", 2 * OPERATIONS, + stats.getSeekInBuffer()); + + in.close(); + // Verifying whether stats are readable after stream is closed. + LOG.info("STATISTICS after closing: {}", stats.toString()); + } finally { + IOUtils.cleanupWithLogger(LOG, out, in); + } + } + + /** + * Test to check statistics value from read operation in AbfsInputStream. + */ + @Test + public void testReadStatistics() throws IOException { + describe("Testing the values of statistics from read operation in " + + "AbfsInputStream"); + + AzureBlobFileSystem fs = getFileSystem(); + AzureBlobFileSystemStore abfss = fs.getAbfsStore(); + Path readStatPath = path(getMethodName()); + + AbfsOutputStream out = null; + AbfsInputStream in = null; + + try { + out = createAbfsOutputStreamWithFlushEnabled(fs, readStatPath); + + /* + * Writing 1MB buffer to the file. + */ + out.write(defBuffer); + out.hflush(); + in = abfss.openFileForRead(readStatPath, fs.getFsStatistics()); + + /* + * Doing file read 10 times. + */ + for (int i = 0; i < OPERATIONS; i++) { + in.read(); + } + + AbfsInputStreamStatisticsImpl stats = + (AbfsInputStreamStatisticsImpl) in.getStreamStatistics(); + + LOG.info("STATISTICS: {}", stats.toString()); + + /* + * bytesRead - Since each time a single byte is read, total + * bytes read would be equal to OPERATIONS. + * + * readOps - Since each time read operation is performed OPERATIONS + * times, total number of read operations would be equal to OPERATIONS. + * + * remoteReadOps - Only a single remote read operation is done. Hence, + * total remote read ops is 1. + * + */ + assertEquals("Mismatch in bytesRead value", OPERATIONS, + stats.getBytesRead()); + assertEquals("Mismatch in readOps value", OPERATIONS, + stats.getReadOperations()); + assertEquals("Mismatch in remoteReadOps value", 1, + stats.getRemoteReadOperations()); + + in.close(); + // Verifying if stats are still readable after stream is closed. + LOG.info("STATISTICS after closing: {}", stats.toString()); + } finally { + IOUtils.cleanupWithLogger(LOG, out, in); + } + } + + /** + * Testing AbfsInputStream works with null Statistics. + */ + @Test + public void testWithNullStreamStatistics() throws IOException { + describe("Testing AbfsInputStream operations with statistics as null"); + + AzureBlobFileSystem fs = getFileSystem(); + Path nullStatFilePath = path(getMethodName()); + byte[] oneKbBuff = new byte[ONE_KB]; + + // Creating an AbfsInputStreamContext instance with null StreamStatistics. + AbfsInputStreamContext abfsInputStreamContext = + new AbfsInputStreamContext( + getConfiguration().getSasTokenRenewPeriodForStreamsInSeconds()) + .withReadBufferSize(getConfiguration().getReadBufferSize()) + .withReadAheadQueueDepth(getConfiguration().getReadAheadQueueDepth()) + .withStreamStatistics(null) + .build(); + + AbfsOutputStream out = null; + AbfsInputStream in = null; + + try { + out = createAbfsOutputStreamWithFlushEnabled(fs, nullStatFilePath); + + // Writing a 1KB buffer in the file. + out.write(oneKbBuff); + out.hflush(); + + // AbfsRestOperation Instance required for eTag. + AbfsRestOperation abfsRestOperation = + fs.getAbfsClient().getPathStatus(nullStatFilePath.toUri().getPath(), false); + + // AbfsInputStream with no StreamStatistics. + in = new AbfsInputStream(fs.getAbfsClient(), null, + nullStatFilePath.toUri().getPath(), ONE_KB, + abfsInputStreamContext, + abfsRestOperation.getResult().getResponseHeader("ETag")); + + // Verifying that AbfsInputStream Operations works with null statistics. + assertNotEquals("AbfsInputStream read() with null statistics should " + + "work", -1, in.read()); + in.seek(ONE_KB); + + // Verifying toString() with no StreamStatistics. + LOG.info("AbfsInputStream: {}", in.toString()); + } finally { + IOUtils.cleanupWithLogger(LOG, out, in); + } + } + + /** + * Method to assert the initial values of the statistics. + * + * @param actualValue the actual value of the statistics. + * @param statistic the name of operation or statistic being asserted. + */ + private void checkInitValue(long actualValue, String statistic) { + assertEquals("Mismatch in " + statistic + " value", 0, actualValue); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsInputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsInputStreamStatistics.java new file mode 100644 index 0000000000000..22c247f98af63 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsInputStreamStatistics.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import org.junit.Test; + +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamStatisticsImpl; + +public class TestAbfsInputStreamStatistics extends AbstractAbfsIntegrationTest { + + private static final int OPERATIONS = 100; + + public TestAbfsInputStreamStatistics() throws Exception { + } + + /** + * Test to check the bytesReadFromBuffer statistic value from AbfsInputStream. + */ + @Test + public void testBytesReadFromBufferStatistic() { + describe("Testing bytesReadFromBuffer statistics value in AbfsInputStream"); + + AbfsInputStreamStatisticsImpl abfsInputStreamStatistics = + new AbfsInputStreamStatisticsImpl(); + + //Increment the bytesReadFromBuffer value. + for (int i = 0; i < OPERATIONS; i++) { + abfsInputStreamStatistics.bytesReadFromBuffer(1); + } + + /* + * Since we incremented the bytesReadFromBuffer OPERATIONS times, this + * should be the expected value. + */ + assertEquals("Mismatch in bytesReadFromBuffer value", OPERATIONS, + abfsInputStreamStatistics.getBytesReadFromBuffer()); + + } +} From e0cededfbd2f11919102f01f9bf3ce540ffd6e94 Mon Sep 17 00:00:00 2001 From: bilaharith <52483117+bilaharith@users.noreply.github.com> Date: Fri, 3 Jul 2020 23:30:22 +0530 Subject: [PATCH 078/131] HADOOP-17086. ABFS: Making the ListStatus response ignore unknown properties. (#2101) Contributed by Bilahari T H. --- .../services/ListResultEntrySchema.java | 2 + .../contracts/services/ListResultSchema.java | 2 + .../contract/ListResultSchemaTest.java | 157 ++++++++++++++++++ 3 files changed, 161 insertions(+) create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ListResultSchemaTest.java diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultEntrySchema.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultEntrySchema.java index 1de9dfaeeb910..cdf3decdc98bc 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultEntrySchema.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultEntrySchema.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.azurebfs.contracts.services; +import org.codehaus.jackson.annotate.JsonIgnoreProperties; import org.codehaus.jackson.annotate.JsonProperty; import org.apache.hadoop.classification.InterfaceStability; @@ -26,6 +27,7 @@ * The ListResultEntrySchema model. */ @InterfaceStability.Evolving +@JsonIgnoreProperties(ignoreUnknown = true) public class ListResultEntrySchema { /** * The name property. diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultSchema.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultSchema.java index 32597423c86ff..e3519fb429bff 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultSchema.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultSchema.java @@ -20,6 +20,7 @@ import java.util.List; +import org.codehaus.jackson.annotate.JsonIgnoreProperties; import org.codehaus.jackson.annotate.JsonProperty; import org.apache.hadoop.classification.InterfaceStability; @@ -28,6 +29,7 @@ * The ListResultSchema model. */ @InterfaceStability.Evolving +@JsonIgnoreProperties(ignoreUnknown = true) public class ListResultSchema { /** * The paths property. diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ListResultSchemaTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ListResultSchemaTest.java new file mode 100644 index 0000000000000..8a33ea5de0641 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ListResultSchemaTest.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.contract; + +import java.io.IOException; + +import org.codehaus.jackson.map.ObjectMapper; +import org.junit.Test; + +import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultEntrySchema; +import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultSchema; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Tests the JSON parsing for the listfilestatus response to ListResultSchema + */ +public class ListResultSchemaTest { + + /** + * Test parsing a JSON which matches the properties in the ListResultSchema + * and ListResultEntrySchema + *

+ * { + * "paths": [ + * { + * "contentLength": "0", + * "etag": "0x8D8186452785ADA", + * "group": "$superuser", + * "lastModified": "Wed, 24 Jun 2020 17:30:43 GMT", + * "name": "dest/filename", + * "owner": "$superuser", + * "permissions": "rw-r--r--" + * } + * ] + * } + */ + @Test + public void testMatchingJSON() throws IOException { + + String matchingJson = + "{ \"paths\": [ { \"contentLength\": \"0\", \"etag\": " + + "\"0x8D8186452785ADA\", \"group\": \"$superuser\", " + + "\"lastModified\": \"Wed, 24 Jun 2020 17:30:43 GMT\", \"name\": " + + "\"dest/filename\", \"owner\": \"$superuser\", \"permissions\": " + + "\"rw-r--r--\" } ] } "; + + final ObjectMapper objectMapper = new ObjectMapper(); + final ListResultSchema listResultSchema = objectMapper + .readValue(matchingJson, ListResultSchema.class); + + assertThat(listResultSchema.paths().size()) + .describedAs("Only one path is expected as present in the input JSON") + .isEqualTo(1); + + ListResultEntrySchema path = listResultSchema.paths().get(0); + assertThat(path.contentLength()) + .describedAs("contentLength should match the value in the input JSON") + .isEqualTo(0L); + assertThat(path.eTag()) + .describedAs("eTag should match the value in the input JSON") + .isEqualTo("0x8D8186452785ADA"); + assertThat(path.group()) + .describedAs("group should match the value in the input JSON") + .isEqualTo("$superuser"); + assertThat(path.lastModified()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("Wed, 24 Jun 2020 17:30:43 GMT"); + assertThat(path.name()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("dest/filename"); + assertThat(path.owner()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("$superuser"); + assertThat(path.permissions()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("rw-r--r--"); + } + + /** + * Test parsing a JSON which matches the properties in the ListResultSchema + * and ListResultEntrySchema along with an unknown property + *

+ * { + * "paths": [ + * { + * "contentLength": "0", + * "unknownProperty": "132374934429527192", + * "etag": "0x8D8186452785ADA", + * "group": "$superuser", + * "lastModified": "Wed, 24 Jun 2020 17:30:43 GMT", + * "name": "dest/filename", + * "owner": "$superuser", + * "permissions": "rw-r--r--" + * } + * ] + * } + */ + @Test + public void testJSONWithUnknownFields() throws IOException { + + String matchingJson = "{ \"paths\": [ { \"contentLength\": \"0\", " + + "\"unknownProperty\": \"132374934429527192\", \"etag\": " + + "\"0x8D8186452785ADA\", \"group\": \"$superuser\", " + + "\"lastModified\": \"Wed, 24 Jun 2020 17:30:43 GMT\", \"name\": " + + "\"dest/filename\", \"owner\": \"$superuser\", \"permissions\": " + + "\"rw-r--r--\" } ] } "; + + final ObjectMapper objectMapper = new ObjectMapper(); + final ListResultSchema listResultSchema = objectMapper + .readValue(matchingJson, ListResultSchema.class); + + assertThat(listResultSchema.paths().size()) + .describedAs("Only one path is expected as present in the input JSON") + .isEqualTo(1); + + ListResultEntrySchema path = listResultSchema.paths().get(0); + assertThat(path.contentLength()) + .describedAs("contentLength should match the value in the input JSON") + .isEqualTo(0L); + assertThat(path.eTag()) + .describedAs("eTag should match the value in the input JSON") + .isEqualTo("0x8D8186452785ADA"); + assertThat(path.group()) + .describedAs("group should match the value in the input JSON") + .isEqualTo("$superuser"); + assertThat(path.lastModified()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("Wed, 24 Jun 2020 17:30:43 GMT"); + assertThat(path.name()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("dest/filename"); + assertThat(path.owner()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("$superuser"); + assertThat(path.permissions()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("rw-r--r--"); + } + +} From 1f2a80b5e5024aeb7fb1f8c31b8fdd0fdb88bb66 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Sat, 4 Jul 2020 00:12:10 -0700 Subject: [PATCH 079/131] HDFS-15430. create should work when parent dir is internalDir and fallback configured. Contributed by Uma Maheswara Rao G. --- .../hadoop/fs/viewfs/ViewFileSystem.java | 37 ++++- .../org/apache/hadoop/fs/viewfs/ViewFs.java | 37 +++++ .../TestViewFileSystemLinkFallback.java | 148 +++++++++++++++++ ...ileSystemOverloadSchemeWithHdfsScheme.java | 28 ---- .../fs/viewfs/TestViewFsLinkFallback.java | 154 ++++++++++++++++++ 5 files changed, 375 insertions(+), 29 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 39d78cf65012d..cb3696507afd9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -41,6 +41,7 @@ import java.util.Objects; import java.util.Set; +import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -1180,7 +1181,41 @@ public FSDataOutputStream append(final Path f, final int bufferSize, public FSDataOutputStream create(final Path f, final FsPermission permission, final boolean overwrite, final int bufferSize, final short replication, final long blockSize, - final Progressable progress) throws AccessControlException { + final Progressable progress) throws IOException { + Preconditions.checkNotNull(f, "File cannot be null."); + if (InodeTree.SlashPath.equals(f)) { + throw new FileAlreadyExistsException( + "/ is not a file. The directory / already exist at: " + + theInternalDir.fullPath); + } + + if (this.fsState.getRootFallbackLink() != null) { + + if (theInternalDir.getChildren().containsKey(f.getName())) { + throw new FileAlreadyExistsException( + "A mount path(file/dir) already exist with the requested path: " + + theInternalDir.getChildren().get(f.getName()).fullPath); + } + + FileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + String leaf = f.getName(); + Path fileToCreate = new Path(parent, leaf); + + try { + return linkedFallbackFs + .create(fileToCreate, permission, overwrite, bufferSize, + replication, blockSize, progress); + } catch (IOException e) { + StringBuilder msg = + new StringBuilder("Failed to create file:").append(fileToCreate) + .append(" at fallback : ").append(linkedFallbackFs.getUri()); + LOG.error(msg.toString(), e); + throw e; + } + } throw readOnlyMountTable("create", f); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index c769003aacffa..a63960c55de0c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -33,6 +33,8 @@ import java.util.Map.Entry; import java.util.Set; + +import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -919,6 +921,41 @@ public FSDataOutputStream createInternal(final Path f, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, UnresolvedLinkException, IOException { + Preconditions.checkNotNull(f, "File cannot be null."); + if (InodeTree.SlashPath.equals(f)) { + throw new FileAlreadyExistsException( + "/ is not a file. The directory / already exist at: " + + theInternalDir.fullPath); + } + + if (this.fsState.getRootFallbackLink() != null) { + if (theInternalDir.getChildren().containsKey(f.getName())) { + throw new FileAlreadyExistsException( + "A mount path(file/dir) already exist with the requested path: " + + theInternalDir.getChildren().get(f.getName()).fullPath); + } + + AbstractFileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + String leaf = f.getName(); + Path fileToCreate = new Path(parent, leaf); + + try { + return linkedFallbackFs + .createInternal(fileToCreate, flag, absolutePermission, + bufferSize, replication, blockSize, progress, checksumOpt, + true); + } catch (IOException e) { + StringBuilder msg = + new StringBuilder("Failed to create file:").append(fileToCreate) + .append(" at fallback : ").append(linkedFallbackFs.getUri()); + LOG.error(msg.toString(), e); + throw e; + } + } + throw readOnlyMountTable("create", f); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java index bec261cf3eb37..bd2b5af02ad87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java @@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; @@ -765,4 +766,151 @@ public void testMkdirsShouldReturnFalseWhenFallbackFSNotAvailable() assertTrue(fsTarget.exists(test)); } } + + /** + * Tests that the create file should be successful when the parent directory + * is same as the existent fallback directory. The new file should be created + * in fallback. + */ + @Test + public void testCreateFileOnInternalMountDirWithSameDirTreeExistInFallback() + throws Exception { + Configuration conf = new Configuration(); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + Path dir1 = new Path(fallbackTarget, "user1/hive/warehouse/partition-0"); + fsTarget.mkdirs(dir1); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path vfsTestFile = new Path("/user1/hive/warehouse/test.file"); + Path testFileInFallback = Path.mergePaths(fallbackTarget, vfsTestFile); + assertFalse(fsTarget.exists(testFileInFallback)); + assertTrue(fsTarget.exists(testFileInFallback.getParent())); + vfs.create(vfsTestFile).close(); + assertTrue(fsTarget.exists(testFileInFallback)); + } + } + + /** + * Tests the making of a new directory which is not matching to any of + * internal directory. + */ + @Test + public void testCreateNewFileWithOutMatchingToMountDirOrFallbackDirPath() + throws Exception { + Configuration conf = new Configuration(); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path vfsTestFile = new Path("/user2/test.file"); + Path testFileInFallback = Path.mergePaths(fallbackTarget, vfsTestFile); + assertFalse(fsTarget.exists(testFileInFallback)); + // user2 does not exist in fallback + assertFalse(fsTarget.exists(testFileInFallback.getParent())); + vfs.create(vfsTestFile).close(); + // /user2/test.file should be created in fallback + assertTrue(fsTarget.exists(testFileInFallback)); + } + } + + /** + * Tests the making of a new file on root which is not matching to any of + * fallback files on root. + */ + @Test + public void testCreateFileOnRootWithFallbackEnabled() throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path vfsTestFile = new Path("/test.file"); + Path testFileInFallback = Path.mergePaths(fallbackTarget, vfsTestFile); + assertFalse(fsTarget.exists(testFileInFallback)); + vfs.create(vfsTestFile).close(); + // /test.file should be created in fallback + assertTrue(fsTarget.exists(testFileInFallback)); + } + } + + /** + * Tests the create of a file on root where the path is matching to an + * existing file on fallback's file on root. + */ + @Test (expected = FileAlreadyExistsException.class) + public void testCreateFileOnRootWithFallbackWithFileAlreadyExist() + throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + Path testFile = new Path(fallbackTarget, "test.file"); + // pre-creating test file in fallback. + fsTarget.create(testFile).close(); + + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path vfsTestFile = new Path("/test.file"); + assertTrue(fsTarget.exists(testFile)); + vfs.create(vfsTestFile, false).close(); + } + } + + /** + * Tests the creating of a file where the path is same as mount link path. + */ + @Test(expected= FileAlreadyExistsException.class) + public void testCreateFileWhereThePathIsSameAsItsMountLinkPath() + throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path vfsTestDir = new Path("/user1/hive"); + assertFalse(fsTarget.exists(Path.mergePaths(fallbackTarget, vfsTestDir))); + vfs.create(vfsTestDir).close(); + } + } + + /** + * Tests the create of a file where the path is same as one of of the internal + * dir path should fail. + */ + @Test + public void testCreateFileSameAsInternalDirPath() throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path vfsTestDir = new Path("/user1"); + assertFalse(fsTarget.exists(Path.mergePaths(fallbackTarget, vfsTestDir))); + try { + vfs.create(vfsTestDir); + Assert.fail("Should fail to create file as this is an internal dir."); + } catch (NotInMountpointException e){ + // This tree is part of internal tree. The above exception will be + // thrown from getDefaultReplication, getDefaultBlockSize APIs which was + // called in create API. + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java index f0f3aae1ba6c0..a44af768bdcd5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.test.PathUtils; import org.junit.After; import org.junit.Assert; @@ -346,33 +345,6 @@ public void testCreateOnRootShouldFailWhenMountLinkConfigured() } } - /** - * Create mount links as follows - * hdfs://localhost:xxx/HDFSUser --> hdfs://localhost:xxx/HDFSUser/ - * hdfs://localhost:xxx/local --> file://TEST_ROOT_DIR/root/ - * fallback --> hdfs://localhost:xxx/HDFSUser/ - * - * It will find fallback link, but root is not accessible and read only. - */ - @Test(expected = AccessControlException.class, timeout = 30000) - public void testCreateOnRootShouldFailEvenFallBackMountLinkConfigured() - throws Exception { - final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); - addMountLinks(defaultFSURI.getAuthority(), - new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER, - Constants.CONFIG_VIEWFS_LINK_FALLBACK }, - new String[] {hdfsTargetPath.toUri().toString(), - localTargetDir.toURI().toString(), - hdfsTargetPath.toUri().toString() }, - conf); - try (FileSystem fs = FileSystem.get(conf)) { - fs.createNewFile(new Path("/onRootWhenFallBack")); - Assert.fail( - "It should fail as root is read only in viewFS, even when configured" - + " with fallback."); - } - } - /** * Create mount links as follows * hdfs://localhost:xxx/HDFSUser --> hdfs://localhost:xxx/HDFSUser/ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java index 49c0957c446d1..04d26b983ed6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.fs.viewfs; +import static org.apache.hadoop.fs.CreateFlag.CREATE; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -24,12 +25,15 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.util.EnumSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -294,4 +298,154 @@ public void testMkdirShouldFailWhenFallbackFSNotAvailable() assertTrue(fsTarget.exists(test)); } + /** + * Tests that the create file should be successful when the parent directory + * is same as the existent fallback directory. The new file should be created + * in fallback. + */ + @Test + public void testCreateFileOnInternalMountDirWithSameDirTreeExistInFallback() + throws Exception { + Configuration conf = new Configuration(); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + Path dir1 = new Path(fallbackTarget, "user1/hive/warehouse/partition-0"); + fsTarget.mkdirs(dir1); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path vfsTestFile = new Path("/user1/hive/warehouse/test.file"); + Path testFileInFallback = Path.mergePaths(fallbackTarget, vfsTestFile); + assertFalse(fsTarget.exists(testFileInFallback)); + assertTrue(fsTarget.exists(testFileInFallback.getParent())); + vfs.create(vfsTestFile, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + assertTrue(fsTarget.exists(testFileInFallback)); + + } + + /** + * Tests the making of a new directory which is not matching to any of + * internal directory. + */ + @Test + public void testCreateNewFileWithOutMatchingToMountDirOrFallbackDirPath() + throws Exception { + Configuration conf = new Configuration(); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path vfsTestFile = new Path("/user2/test.file"); + Path testFileInFallback = Path.mergePaths(fallbackTarget, vfsTestFile); + assertFalse(fsTarget.exists(testFileInFallback)); + // user2 does not exist in fallback + assertFalse(fsTarget.exists(testFileInFallback.getParent())); + vfs.create(vfsTestFile, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault()), + Options.CreateOpts.createParent()).close(); + // /user2/test.file should be created in fallback + assertTrue(fsTarget.exists(testFileInFallback)); + } + + /** + * Tests the making of a new file on root which is not matching to any of + * fallback files on root. + */ + @Test + public void testCreateFileOnRootWithFallbackEnabled() + throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path vfsTestFile = new Path("/test.file"); + Path testFileInFallback = Path.mergePaths(fallbackTarget, vfsTestFile); + assertFalse(fsTarget.exists(testFileInFallback)); + vfs.create(vfsTestFile, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + // /test.file should be created in fallback + assertTrue(fsTarget.exists(testFileInFallback)); + + } + + /** + * Tests the create of a file on root where the path is matching to an + * existing file on fallback's file on root. + */ + @Test (expected = FileAlreadyExistsException.class) + public void testCreateFileOnRootWithFallbackWithFileAlreadyExist() + throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + Path testFile = new Path(fallbackTarget, "test.file"); + // pre-creating test file in fallback. + fsTarget.create(testFile).close(); + + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path vfsTestFile = new Path("/test.file"); + assertTrue(fsTarget.exists(testFile)); + vfs.create(vfsTestFile, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + } + + /** + * Tests the creating of a file where the path is same as mount link path. + */ + @Test(expected= FileAlreadyExistsException.class) + public void testCreateFileWhereThePathIsSameAsItsMountLinkPath() + throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path vfsTestDir = new Path("/user1/hive"); + assertFalse(fsTarget.exists(Path.mergePaths(fallbackTarget, vfsTestDir))); + vfs.create(vfsTestDir, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + } + + /** + * Tests the create of a file where the path is same as one of of the internal + * dir path should fail. + */ + @Test(expected = FileAlreadyExistsException.class) + public void testCreateFileSameAsInternalDirPath() + throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path vfsTestDir = new Path("/user1"); + assertFalse(fsTarget.exists(Path.mergePaths(fallbackTarget, vfsTestDir))); + vfs.create(vfsTestDir, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + } } From f86f15cf2003a7c74d6a8dffa4c61236bc0a208a Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sat, 4 Jul 2020 12:24:49 +0530 Subject: [PATCH 080/131] HDFS-15446. CreateSnapshotOp fails during edit log loading for /.reserved/raw/path with error java.io.FileNotFoundException: Directory does not exist: /.reserved/raw/path. Contributed by Stephen O'Donnell. --- .../hdfs/server/namenode/FSDirectory.java | 20 +++++++ .../hdfs/server/namenode/FSEditLogLoader.java | 6 +-- .../namenode/snapshot/TestSnapshot.java | 53 +++++++++++++++++++ 3 files changed, 76 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 527ca241bc56e..fb5c9df8debdf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -734,6 +734,26 @@ public INodesInPath resolvePath(FSPermissionChecker pc, String src, return iip; } + /** + * This method should only be used from internal paths and not those provided + * directly by a user. It resolves a given path into an INodesInPath in a + * similar way to resolvePath(...), only traversal and permissions are not + * checked. + * @param src The path to resolve. + * @return if the path indicates an inode, return path after replacing up to + * {@code } with the corresponding path of the inode, else + * the path in {@code src} as is. If the path refers to a path in + * the "raw" directory, return the non-raw pathname. + * @throws FileNotFoundException + */ + public INodesInPath unprotectedResolvePath(String src) + throws FileNotFoundException { + byte[][] components = INode.getPathComponents(src); + boolean isRaw = isReservedRawName(components); + components = resolveComponents(components, this); + return INodesInPath.resolve(rootDir, components, isRaw); + } + INodesInPath resolvePath(FSPermissionChecker pc, String src, long fileId) throws UnresolvedLinkException, FileNotFoundException, AccessControlException, ParentNotDirectoryException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index c390b652eeaeb..e3694ba4f4ca8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -798,7 +798,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, final String snapshotRoot = renameReservedPathsOnUpgrade(createSnapshotOp.snapshotRoot, logVersion); - INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE); + INodesInPath iip = fsDir.unprotectedResolvePath(snapshotRoot); String path = fsNamesys.getSnapshotManager().createSnapshot( fsDir.getFSNamesystem().getLeaseManager(), iip, snapshotRoot, createSnapshotOp.snapshotName, @@ -816,7 +816,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, final String snapshotRoot = renameReservedPathsOnUpgrade(deleteSnapshotOp.snapshotRoot, logVersion); - INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE); + INodesInPath iip = fsDir.unprotectedResolvePath(snapshotRoot); fsNamesys.getSnapshotManager().deleteSnapshot(iip, deleteSnapshotOp.snapshotName, new INode.ReclaimContext(fsNamesys.dir.getBlockStoragePolicySuite(), @@ -838,7 +838,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, final String snapshotRoot = renameReservedPathsOnUpgrade(renameSnapshotOp.snapshotRoot, logVersion); - INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE); + INodesInPath iip = fsDir.unprotectedResolvePath(snapshotRoot); fsNamesys.getSnapshotManager().renameSnapshot(iip, snapshotRoot, renameSnapshotOp.snapshotOldName, renameSnapshotOp.snapshotNewName, renameSnapshotOp.mtime); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java index d8e53bb2abb5e..0a262f899abf2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java @@ -513,6 +513,59 @@ public void testDeletionSnapshotMtime() throws Exception { newSnapshotStatus.getModificationTime()); } + /** + * HDFS-15446 - ensure that snapshot operations on /.reserved/raw + * paths work and the NN can load the resulting edits. + */ + @Test(timeout = 60000) + public void testSnapshotOpsOnReservedPath() throws Exception { + Path dir = new Path("/dir"); + Path nestedDir = new Path("/nested/dir"); + Path sub = new Path(dir, "sub"); + Path subFile = new Path(sub, "file"); + Path nestedFile = new Path(nestedDir, "file"); + DFSTestUtil.createFile(hdfs, subFile, BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, nestedFile, BLOCKSIZE, REPLICATION, seed); + + hdfs.allowSnapshot(dir); + hdfs.allowSnapshot(nestedDir); + Path reservedDir = new Path("/.reserved/raw/dir"); + Path reservedNestedDir = new Path("/.reserved/raw/nested/dir"); + hdfs.createSnapshot(reservedDir, "s1"); + hdfs.createSnapshot(reservedNestedDir, "s1"); + hdfs.renameSnapshot(reservedDir, "s1", "s2"); + hdfs.renameSnapshot(reservedNestedDir, "s1", "s2"); + hdfs.deleteSnapshot(reservedDir, "s2"); + hdfs.deleteSnapshot(reservedNestedDir, "s2"); + // The original problem with reserved path, is that the NN was unable to + // replay the edits, therefore restarting the NN to ensure it starts + // and no exceptions are raised. + cluster.restartNameNode(true); + } + + /** + * HDFS-15446 - ensure that snapshot operations on /.reserved/raw + * paths work and the NN can load the resulting edits. This test if for + * snapshots at the root level. + */ + @Test(timeout = 60000) + public void testSnapshotOpsOnRootReservedPath() throws Exception { + Path dir = new Path("/"); + Path sub = new Path(dir, "sub"); + Path subFile = new Path(sub, "file"); + DFSTestUtil.createFile(hdfs, subFile, BLOCKSIZE, REPLICATION, seed); + + hdfs.allowSnapshot(dir); + Path reservedDir = new Path("/.reserved/raw"); + hdfs.createSnapshot(reservedDir, "s1"); + hdfs.renameSnapshot(reservedDir, "s1", "s2"); + hdfs.deleteSnapshot(reservedDir, "s2"); + // The original problem with reserved path, is that the NN was unable to + // replay the edits, therefore restarting the NN to ensure it starts + // and no exceptions are raised. + cluster.restartNameNode(true); + } + /** * Prepare a list of modifications. A modification may be a file creation, * file deletion, or a modification operation such as appending to an existing From d20109c171460f3312a760c1309f95b2bf61e0d3 Mon Sep 17 00:00:00 2001 From: ishaniahuja <50942176+ishaniahuja@users.noreply.github.com> Date: Sun, 5 Jul 2020 01:55:14 +0530 Subject: [PATCH 081/131] HADOOP-17058. ABFS: Support for AppendBlob in Hadoop ABFS Driver - Contributed by Ishani Ahuja --- .../hadoop/fs/azurebfs/AbfsConfiguration.java | 8 + .../fs/azurebfs/AzureBlobFileSystemStore.java | 50 +- .../azurebfs/constants/AbfsHttpConstants.java | 1 + .../azurebfs/constants/ConfigurationKeys.java | 3 + .../constants/FileSystemConfigurations.java | 2 + .../azurebfs/constants/HttpQueryParams.java | 1 + .../fs/azurebfs/services/AbfsClient.java | 46 +- .../azurebfs/services/AbfsHttpOperation.java | 10 + .../azurebfs/services/AbfsOutputStream.java | 63 ++- .../services/AbfsOutputStreamContext.java | 12 + .../azurebfs/AbstractAbfsIntegrationTest.java | 4 + .../azurebfs/ITestAbfsNetworkStatistics.java | 52 ++- .../ITestAbfsOutputStreamStatistics.java | 4 + .../azurebfs/ITestAbfsReadWriteAndSeek.java | 3 + .../azurebfs/ITestAbfsStreamStatistics.java | 11 +- .../ITestAzureBlobFileSystemCreate.java | 20 +- .../azurebfs/ITestAzureBlobFileSystemE2E.java | 3 + .../ITestAzureBlobFileSystemFlush.java | 30 +- ...TestAbfsConfigurationFieldsValidation.java | 2 +- .../constants/TestConfigurationKeys.java | 1 + .../services/TestAbfsOutputStream.java | 430 ++++++++++++++++++ 21 files changed, 714 insertions(+), 42 deletions(-) create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java index bbe2274addc86..43021c0fa8b87 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java @@ -153,6 +153,10 @@ public class AbfsConfiguration{ DefaultValue = DEFAULT_FS_AZURE_ATOMIC_RENAME_DIRECTORIES) private String azureAtomicDirs; + @StringConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_APPEND_BLOB_KEY, + DefaultValue = DEFAULT_FS_AZURE_APPEND_BLOB_DIRECTORIES) + private String azureAppendBlobDirs; + @BooleanConfigurationValidatorAnnotation(ConfigurationKey = AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, DefaultValue = DEFAULT_AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION) private boolean createRemoteFileSystemDuringInitialization; @@ -544,6 +548,10 @@ public String getAzureAtomicRenameDirs() { return this.azureAtomicDirs; } + public String getAppendBlobDirs() { + return this.azureAppendBlobDirs; + } + public boolean getCreateRemoteFileSystemDuringInitialization() { // we do not support creating the filesystem when AuthType is SAS return this.createRemoteFileSystemDuringInitialization diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index c310e29870a6d..74908dec1e59e 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -62,6 +62,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes; +import org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations; import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; @@ -145,6 +146,11 @@ public class AzureBlobFileSystemStore implements Closeable { private final IdentityTransformerInterface identityTransformer; private final AbfsPerfTracker abfsPerfTracker; + /** + * The set of directories where we should store files as append blobs. + */ + private Set appendBlobDirSet; + public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, Configuration configuration, AbfsCounters abfsCounters) throws IOException { @@ -196,6 +202,23 @@ public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, throw new IOException(e); } LOG.trace("IdentityTransformer init complete"); + + // Extract the directories that should contain append blobs + String appendBlobDirs = abfsConfiguration.getAppendBlobDirs(); + if (appendBlobDirs.trim().isEmpty()) { + this.appendBlobDirSet = new HashSet(); + } else { + this.appendBlobDirSet = new HashSet<>(Arrays.asList( + abfsConfiguration.getAppendBlobDirs().split(AbfsHttpConstants.COMMA))); + } + } + + /** + * Checks if the given key in Azure Storage should be stored as a page + * blob instead of block blob. + */ + public boolean isAppendBlobKey(String key) { + return isKeyForDirectorySet(key, appendBlobDirSet); } /** @@ -431,10 +454,15 @@ public OutputStream createFile(final Path path, isNamespaceEnabled); String relativePath = getRelativePath(path); + boolean isAppendBlob = false; + if (isAppendBlobKey(path.toString())) { + isAppendBlob = true; + } final AbfsRestOperation op = client.createPath(relativePath, true, overwrite, isNamespaceEnabled ? getOctalNotation(permission) : null, - isNamespaceEnabled ? getOctalNotation(umask) : null); + isNamespaceEnabled ? getOctalNotation(umask) : null, + isAppendBlob); perfInfo.registerResult(op.getResult()).registerSuccess(true); return new AbfsOutputStream( @@ -442,16 +470,21 @@ public OutputStream createFile(final Path path, statistics, relativePath, 0, - populateAbfsOutputStreamContext()); + populateAbfsOutputStreamContext(isAppendBlob)); } } - private AbfsOutputStreamContext populateAbfsOutputStreamContext() { + private AbfsOutputStreamContext populateAbfsOutputStreamContext(boolean isAppendBlob) { + int bufferSize = abfsConfiguration.getWriteBufferSize(); + if (isAppendBlob && bufferSize > FileSystemConfigurations.APPENDBLOB_MAX_WRITE_BUFFER_SIZE) { + bufferSize = FileSystemConfigurations.APPENDBLOB_MAX_WRITE_BUFFER_SIZE; + } return new AbfsOutputStreamContext(abfsConfiguration.getSasTokenRenewPeriodForStreamsInSeconds()) - .withWriteBufferSize(abfsConfiguration.getWriteBufferSize()) + .withWriteBufferSize(bufferSize) .enableFlush(abfsConfiguration.isFlushEnabled()) .disableOutputStreamFlush(abfsConfiguration.isOutputStreamFlushDisabled()) .withStreamStatistics(new AbfsOutputStreamStatisticsImpl()) + .withAppendBlob(isAppendBlob) .build(); } @@ -468,7 +501,7 @@ public void createDirectory(final Path path, final FsPermission permission, fina final AbfsRestOperation op = client.createPath(getRelativePath(path), false, true, isNamespaceEnabled ? getOctalNotation(permission) : null, - isNamespaceEnabled ? getOctalNotation(umask) : null); + isNamespaceEnabled ? getOctalNotation(umask) : null, false); perfInfo.registerResult(op.getResult()).registerSuccess(true); } } @@ -544,12 +577,17 @@ public OutputStream openFileForWrite(final Path path, final FileSystem.Statistic perfInfo.registerSuccess(true); + boolean isAppendBlob = false; + if (isAppendBlobKey(path.toString())) { + isAppendBlob = true; + } + return new AbfsOutputStream( client, statistics, relativePath, offset, - populateAbfsOutputStreamContext()); + populateAbfsOutputStreamContext(isAppendBlob)); } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java index 8d45513da58da..38b79c9412f4c 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java @@ -40,6 +40,7 @@ public final class AbfsHttpConstants { public static final String CHECK_ACCESS = "checkAccess"; public static final String GET_STATUS = "getStatus"; public static final String DEFAULT_TIMEOUT = "90"; + public static final String APPEND_BLOB_TYPE = "appendblob"; public static final String TOKEN_VERSION = "2"; public static final String JAVA_VENDOR = "java.vendor"; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java index 67ce0f59167a6..b5feee64ab476 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java @@ -59,6 +59,9 @@ public final class ConfigurationKeys { public static final String FS_AZURE_ENABLE_AUTOTHROTTLING = "fs.azure.enable.autothrottling"; public static final String FS_AZURE_ALWAYS_USE_HTTPS = "fs.azure.always.use.https"; public static final String FS_AZURE_ATOMIC_RENAME_KEY = "fs.azure.atomic.rename.key"; + /** Provides a config to provide comma separated path prefixes on which Appendblob based files are created + * Default is empty. **/ + public static final String FS_AZURE_APPEND_BLOB_KEY = "fs.azure.appendblob.directories"; public static final String FS_AZURE_READ_AHEAD_QUEUE_DEPTH = "fs.azure.readaheadqueue.depth"; /** Provides a config control to enable or disable ABFS Flush operations - * HFlush and HSync. Default is true. **/ diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java index c12631d96db57..a367daf6ee564 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java @@ -47,6 +47,7 @@ public final class FileSystemConfigurations { // Default upload and download buffer size public static final int DEFAULT_WRITE_BUFFER_SIZE = 8 * ONE_MB; // 8 MB + public static final int APPENDBLOB_MAX_WRITE_BUFFER_SIZE = 4 * ONE_MB; // 4 MB public static final int DEFAULT_READ_BUFFER_SIZE = 4 * ONE_MB; // 4 MB public static final int MIN_BUFFER_SIZE = 16 * ONE_KB; // 16 KB public static final int MAX_BUFFER_SIZE = 100 * ONE_MB; // 100 MB @@ -61,6 +62,7 @@ public final class FileSystemConfigurations { public static final boolean DEFAULT_AZURE_SKIP_USER_GROUP_METADATA_DURING_INITIALIZATION = false; public static final String DEFAULT_FS_AZURE_ATOMIC_RENAME_DIRECTORIES = "/hbase"; + public static final String DEFAULT_FS_AZURE_APPEND_BLOB_DIRECTORIES = ""; public static final int DEFAULT_READ_AHEAD_QUEUE_DEPTH = -1; public static final boolean DEFAULT_ENABLE_FLUSH = true; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java index 9f735f729cb56..5a550ac783f20 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java @@ -38,6 +38,7 @@ public final class HttpQueryParams { public static final String QUERY_PARAM_RETAIN_UNCOMMITTED_DATA = "retainUncommittedData"; public static final String QUERY_PARAM_CLOSE = "close"; public static final String QUERY_PARAM_UPN = "upn"; + public static final String QUERY_PARAM_BLOBTYPE = "blobtype"; private HttpQueryParams() {} } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java index f614bbd41d2ac..f747bd068ccae 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java @@ -272,7 +272,8 @@ public AbfsRestOperation deleteFilesystem() throws AzureBlobFileSystemException } public AbfsRestOperation createPath(final String path, final boolean isFile, final boolean overwrite, - final String permission, final String umask) throws AzureBlobFileSystemException { + final String permission, final String umask, + final boolean isAppendBlob) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); if (!overwrite) { requestHeaders.add(new AbfsHttpHeader(IF_NONE_MATCH, AbfsHttpConstants.STAR)); @@ -288,6 +289,9 @@ public AbfsRestOperation createPath(final String path, final boolean isFile, fin final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(QUERY_PARAM_RESOURCE, isFile ? FILE : DIRECTORY); + if (isAppendBlob) { + abfsUriQueryBuilder.addQuery(QUERY_PARAM_BLOBTYPE, APPEND_BLOB_TYPE); + } String operation = isFile ? SASTokenProvider.CREATE_FILE_OPERATION @@ -380,7 +384,7 @@ public AbfsRestOperation renameIdempotencyCheckOp( } public AbfsRestOperation append(final String path, final long position, final byte[] buffer, final int offset, - final int length, final String cachedSasToken) throws AzureBlobFileSystemException { + final int length, final String cachedSasToken, final boolean isAppendBlob) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); // JDK7 does not support PATCH, so to workaround the issue we will use // PUT and specify the real method in the X-Http-Method-Override header. @@ -401,10 +405,46 @@ public AbfsRestOperation append(final String path, final long position, final by HTTP_METHOD_PUT, url, requestHeaders, buffer, offset, length, sasTokenForReuse); - op.execute(); + try { + op.execute(); + } catch (AzureBlobFileSystemException e) { + if (isAppendBlob && appendSuccessCheckOp(op, path, (position + length))) { + final AbfsRestOperation successOp = new AbfsRestOperation( + AbfsRestOperationType.Append, + this, + HTTP_METHOD_PUT, + url, + requestHeaders, buffer, offset, length, sasTokenForReuse); + successOp.hardSetResult(HttpURLConnection.HTTP_OK); + return successOp; + } + throw e; + } + return op; } + // For AppendBlob its possible that the append succeeded in the backend but the request failed. + // However a retry would fail with an InvalidQueryParameterValue + // (as the current offset would be unacceptable). + // Hence, we pass/succeed the appendblob append call + // in case we are doing a retry after checking the length of the file + public boolean appendSuccessCheckOp(AbfsRestOperation op, final String path, + final long length) throws AzureBlobFileSystemException { + if ((op.isARetriedRequest()) + && (op.getResult().getStatusCode() == HttpURLConnection.HTTP_BAD_REQUEST)) { + final AbfsRestOperation destStatusOp = getPathStatus(path, false); + if (destStatusOp.getResult().getStatusCode() == HttpURLConnection.HTTP_OK) { + String fileLength = destStatusOp.getResult().getResponseHeader( + HttpHeaderConfigurations.CONTENT_LENGTH); + if (length <= Long.parseLong(fileLength)) { + return true; + } + } + } + return false; + } + public AbfsRestOperation flush(final String path, final long position, boolean retainUncommittedData, boolean isClose, final String cachedSasToken) throws AzureBlobFileSystemException { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java index 5dc4a89a53cbc..a63c98261f10d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java @@ -69,6 +69,7 @@ public class AbfsHttpOperation implements AbfsPerfLoggable { private String storageErrorMessage = ""; private String clientRequestId = ""; private String requestId = ""; + private String expectedAppendPos = ""; private ListResultSchema listResultSchema = null; // metrics @@ -126,6 +127,10 @@ public String getClientRequestId() { return clientRequestId; } + public String getExpectedAppendPos() { + return expectedAppendPos; + } + public String getRequestId() { return requestId; } @@ -154,6 +159,8 @@ public String toString() { sb.append(statusCode); sb.append(","); sb.append(storageErrorCode); + sb.append(","); + sb.append(expectedAppendPos); sb.append(",cid="); sb.append(clientRequestId); sb.append(",rid="); @@ -449,6 +456,9 @@ private void processStorageErrorResponse() { case "message": storageErrorMessage = fieldValue; break; + case "ExpectedAppendPos": + expectedAppendPos = fieldValue; + break; default: break; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java index 89afca4220251..6c1e177da615d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java @@ -60,6 +60,7 @@ public class AbfsOutputStream extends OutputStream implements Syncable, StreamCa private boolean closed; private boolean supportFlush; private boolean disableOutputStreamFlush; + private boolean isAppendBlob; private volatile IOException lastError; private long lastFlushOffset; @@ -106,6 +107,7 @@ public AbfsOutputStream( this.supportFlush = abfsOutputStreamContext.isEnableFlush(); this.disableOutputStreamFlush = abfsOutputStreamContext .isDisableOutputStreamFlush(); + this.isAppendBlob = abfsOutputStreamContext.isAppendBlob(); this.lastError = null; this.lastFlushOffset = 0; this.bufferSize = abfsOutputStreamContext.getWriteBufferSize(); @@ -114,8 +116,11 @@ public AbfsOutputStream( this.writeOperations = new ConcurrentLinkedDeque<>(); this.outputStreamStatistics = abfsOutputStreamContext.getStreamStatistics(); - this.maxConcurrentRequestCount = 4 * Runtime.getRuntime().availableProcessors(); - + if (this.isAppendBlob) { + this.maxConcurrentRequestCount = 1; + } else { + this.maxConcurrentRequestCount = 4 * Runtime.getRuntime().availableProcessors(); + } this.threadExecutor = new ThreadPoolExecutor(maxConcurrentRequestCount, maxConcurrentRequestCount, @@ -309,7 +314,50 @@ private synchronized void flushInternalAsync() throws IOException { flushWrittenBytesToServiceAsync(); } + private void writeAppendBlobCurrentBufferToService() throws IOException { + if (bufferIndex == 0) { + return; + } + outputStreamStatistics.writeCurrentBuffer(); + + final byte[] bytes = buffer; + final int bytesLength = bufferIndex; + outputStreamStatistics.bytesToUpload(bytesLength); + buffer = byteBufferPool.getBuffer(false, bufferSize).array(); + bufferIndex = 0; + final long offset = position; + position += bytesLength; + AbfsPerfTracker tracker = client.getAbfsPerfTracker(); + try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, + "writeCurrentBufferToService", "append")) { + AbfsRestOperation op = client.append(path, offset, bytes, 0, + bytesLength, cachedSasToken.get(), this.isAppendBlob); + cachedSasToken.update(op.getSasToken()); + outputStreamStatistics.uploadSuccessful(bytesLength); + perfInfo.registerResult(op.getResult()); + byteBufferPool.putBuffer(ByteBuffer.wrap(bytes)); + perfInfo.registerSuccess(true); + return; + } catch (Exception ex) { + if (ex instanceof AbfsRestOperationException) { + if (((AbfsRestOperationException) ex).getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { + throw new FileNotFoundException(ex.getMessage()); + } + } + if (ex instanceof AzureBlobFileSystemException) { + ex = (AzureBlobFileSystemException) ex; + } + lastError = new IOException(ex); + throw lastError; + } + } + private synchronized void writeCurrentBufferToService() throws IOException { + if (this.isAppendBlob) { + writeAppendBlobCurrentBufferToService(); + return; + } + if (bufferIndex == 0) { return; } @@ -336,7 +384,7 @@ public Void call() throws Exception { try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, "writeCurrentBufferToService", "append")) { AbfsRestOperation op = client.append(path, offset, bytes, 0, - bytesLength, cachedSasToken.get()); + bytesLength, cachedSasToken.get(), false); cachedSasToken.update(op.getSasToken()); perfInfo.registerResult(op.getResult()); byteBufferPool.putBuffer(ByteBuffer.wrap(bytes)); @@ -389,6 +437,11 @@ private synchronized void flushWrittenBytesToServiceAsync() throws IOException { private synchronized void flushWrittenBytesToServiceInternal(final long offset, final boolean retainUncommitedData, final boolean isClose) throws IOException { + // flush is called for appendblob only on close + if (this.isAppendBlob && !isClose) { + return; + } + AbfsPerfTracker tracker = client.getAbfsPerfTracker(); try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, "flushWrittenBytesToServiceInternal", "flush")) { @@ -434,6 +487,10 @@ private void waitForTaskToComplete() throws IOException { for (completed = false; completionService.poll() != null; completed = true) { // keep polling until there is no data } + // for AppendBLob, jobs are not submitted to completion service + if (isAppendBlob) { + completed = true; + } if (!completed) { try { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamContext.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamContext.java index dcd6c45981734..03e4abaf4f625 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamContext.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamContext.java @@ -31,6 +31,8 @@ public class AbfsOutputStreamContext extends AbfsStreamContext { private AbfsOutputStreamStatistics streamStatistics; + private boolean isAppendBlob; + public AbfsOutputStreamContext(final long sasTokenRenewPeriodForStreamsInSeconds) { super(sasTokenRenewPeriodForStreamsInSeconds); } @@ -58,6 +60,12 @@ public AbfsOutputStreamContext withStreamStatistics( return this; } + public AbfsOutputStreamContext withAppendBlob( + final boolean isAppendBlob) { + this.isAppendBlob = isAppendBlob; + return this; + } + public AbfsOutputStreamContext build() { // Validation of parameters to be done here. return this; @@ -78,4 +86,8 @@ public boolean isDisableOutputStreamFlush() { public AbfsOutputStreamStatistics getStreamStatistics() { return streamStatistics; } + + public boolean isAppendBlob() { + return isAppendBlob; + } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java index a80bee65bf4f3..34b3615c1b526 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java @@ -122,6 +122,10 @@ protected AbstractAbfsIntegrationTest() throws Exception { this.testUrl = defaultUri.toString(); abfsConfig.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultUri.toString()); abfsConfig.setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, true); + if (abfsConfig.get(FS_AZURE_TEST_APPENDBLOB_ENABLED) == "true") { + String appendblobDirs = this.testUrl + "," + abfsConfig.get(FS_AZURE_CONTRACT_TEST_URI); + rawConfig.set(FS_AZURE_APPEND_BLOB_KEY, appendblobDirs); + } // For testing purposes, an IP address and port may be provided to override // the host specified in the FileSystem URI. Also note that the format of // the Azure Storage Service URI changes from diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java index 904fdf3f7c16e..b2e1301152111 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java @@ -81,10 +81,18 @@ public void testAbfsHttpSendStatistics() throws IOException { * * bytes_sent : bytes wrote in AbfsOutputStream. */ - connectionsMade = assertAbfsStatistics(AbfsStatistic.CONNECTIONS_MADE, - 6, metricMap); - requestsSent = assertAbfsStatistics(AbfsStatistic.SEND_REQUESTS, 4, - metricMap); + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(sendRequestPath).toString())) { + // no network calls are made for hflush in case of appendblob + connectionsMade = assertAbfsStatistics(AbfsStatistic.CONNECTIONS_MADE, + 5, metricMap); + requestsSent = assertAbfsStatistics(AbfsStatistic.SEND_REQUESTS, 3, + metricMap); + } else { + connectionsMade = assertAbfsStatistics(AbfsStatistic.CONNECTIONS_MADE, + 6, metricMap); + requestsSent = assertAbfsStatistics(AbfsStatistic.SEND_REQUESTS, 4, + metricMap); + } bytesSent = assertAbfsStatistics(AbfsStatistic.BYTES_SENT, testNetworkStatsString.getBytes().length, metricMap); @@ -125,10 +133,18 @@ public void testAbfsHttpSendStatistics() throws IOException { * wrote each time). * */ - assertAbfsStatistics(AbfsStatistic.CONNECTIONS_MADE, - connectionsMade + 1 + LARGE_OPERATIONS * 2, metricMap); - assertAbfsStatistics(AbfsStatistic.SEND_REQUESTS, - requestsSent + 1 + LARGE_OPERATIONS * 2, metricMap); + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(sendRequestPath).toString())) { + // no network calls are made for hflush in case of appendblob + assertAbfsStatistics(AbfsStatistic.CONNECTIONS_MADE, + connectionsMade + 1 + LARGE_OPERATIONS, metricMap); + assertAbfsStatistics(AbfsStatistic.SEND_REQUESTS, + requestsSent + 1 + LARGE_OPERATIONS, metricMap); + } else { + assertAbfsStatistics(AbfsStatistic.CONNECTIONS_MADE, + connectionsMade + 1 + LARGE_OPERATIONS * 2, metricMap); + assertAbfsStatistics(AbfsStatistic.SEND_REQUESTS, + requestsSent + 1 + LARGE_OPERATIONS * 2, metricMap); + } assertAbfsStatistics(AbfsStatistic.BYTES_SENT, bytesSent + LARGE_OPERATIONS * (testNetworkStatsString.getBytes().length), metricMap); @@ -183,8 +199,14 @@ public void testAbfsHttpResponseStatistics() throws IOException { * * bytes_received - This should be equal to bytes sent earlier. */ - getResponses = assertAbfsStatistics(AbfsStatistic.GET_RESPONSES, 8, - metricMap); + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(getResponsePath).toString())) { + //for appendBlob hflush is a no-op + getResponses = assertAbfsStatistics(AbfsStatistic.GET_RESPONSES, 7, + metricMap); + } else { + getResponses = assertAbfsStatistics(AbfsStatistic.GET_RESPONSES, 8, + metricMap); + } // Testing that bytes received is equal to bytes sent. long bytesSend = metricMap.get(AbfsStatistic.BYTES_SENT.getStatName()); bytesReceived = assertAbfsStatistics(AbfsStatistic.BYTES_RECEIVED, @@ -242,8 +264,14 @@ public void testAbfsHttpResponseStatistics() throws IOException { assertAbfsStatistics(AbfsStatistic.BYTES_RECEIVED, bytesReceived + LARGE_OPERATIONS * (testResponseString.getBytes().length), metricMap); - assertAbfsStatistics(AbfsStatistic.GET_RESPONSES, - getResponses + 3 + 2 * LARGE_OPERATIONS, metricMap); + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(getResponsePath).toString())) { + // no network calls are made for hflush in case of appendblob + assertAbfsStatistics(AbfsStatistic.GET_RESPONSES, + getResponses + 3 + LARGE_OPERATIONS, metricMap); + } else { + assertAbfsStatistics(AbfsStatistic.GET_RESPONSES, + getResponses + 3 + 2 * LARGE_OPERATIONS, metricMap); + } } finally { IOUtils.cleanupWithLogger(LOG, out, in); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java index 09cbfde1bebfb..c8640dded3d71 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java @@ -113,6 +113,10 @@ public void testAbfsOutputStreamQueueShrink() throws IOException { final AzureBlobFileSystem fs = getFileSystem(); Path queueShrinkFilePath = path(getMethodName()); String testQueueShrink = "testQueue"; + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(queueShrinkFilePath).toString())) { + // writeOperationsQueue is not used for appendBlob, hence queueShrink is 0 + return; + } try (AbfsOutputStream outForOneOp = createAbfsOutputStreamWithFlushEnabled( fs, queueShrinkFilePath)) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java index a270a00e9132e..52abb097ef311 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.APPENDBLOB_MAX_WRITE_BUFFER_SIZE; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_READ_BUFFER_SIZE; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MAX_BUFFER_SIZE; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MIN_BUFFER_SIZE; @@ -46,6 +47,7 @@ public class ITestAbfsReadWriteAndSeek extends AbstractAbfsScaleTest { public static Iterable sizes() { return Arrays.asList(new Object[][]{{MIN_BUFFER_SIZE}, {DEFAULT_READ_BUFFER_SIZE}, + {APPENDBLOB_MAX_WRITE_BUFFER_SIZE}, {MAX_BUFFER_SIZE}}); } @@ -70,6 +72,7 @@ private void testReadWriteAndSeek(int bufferSize) throws Exception { final byte[] b = new byte[2 * bufferSize]; new Random().nextBytes(b); + try (FSDataOutputStream stream = fs.create(TEST_PATH)) { stream.write(b); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java index 51531f678f635..395a456124bdf 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java @@ -136,8 +136,15 @@ public void testAbfsStreamOps() throws Exception { testReadWriteOps.getBytes().length); } - //Test for 1000000 read operations - assertReadWriteOps("read", largeValue, statistics.getReadOps()); + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(largeOperationsFile).toString())) { + // for appendblob data is already flushed, so there is more data to read. + assertTrue(String.format("The actual value of %d was not equal to the " + + "expected value", statistics.getReadOps()), + statistics.getReadOps() == (largeValue + 3) || statistics.getReadOps() == (largeValue + 4)); + } else { + //Test for 1000000 read operations + assertReadWriteOps("read", largeValue, statistics.getReadOps()); + } } finally { IOUtils.cleanupWithLogger(LOG, inForLargeOperations, diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java index 94368a4f36955..4b8f071b998de 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java @@ -145,15 +145,19 @@ public void testTryWithResources() throws Throwable { out.hsync(); fail("Expected a failure"); } catch (FileNotFoundException fnfe) { - // the exception raised in close() must be in the caught exception's - // suppressed list - Throwable[] suppressed = fnfe.getSuppressed(); - assertEquals("suppressed count", 1, suppressed.length); - Throwable inner = suppressed[0]; - if (!(inner instanceof IOException)) { - throw inner; + //appendblob outputStream does not generate suppressed exception on close as it is + //single threaded code + if (!fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testPath).toString())) { + // the exception raised in close() must be in the caught exception's + // suppressed list + Throwable[] suppressed = fnfe.getSuppressed(); + assertEquals("suppressed count", 1, suppressed.length); + Throwable inner = suppressed[0]; + if (!(inner instanceof IOException)) { + throw inner; + } + GenericTestUtils.assertExceptionContains(fnfe.getMessage(), inner); } - GenericTestUtils.assertExceptionContains(fnfe.getMessage(), inner); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java index ebc9c07e53e59..05c3855f5c89d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java @@ -203,6 +203,9 @@ public void testWriteWithFileNotFoundException() throws Exception { public void testFlushWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Path testFilePath = new Path(methodName.getMethodName()); + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testFilePath).toString())) { + return; + } FSDataOutputStream stream = fs.create(testFilePath); assertTrue(fs.exists(testFilePath)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java index 60f7f7d23f02a..92aa5520ee4fd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java @@ -49,7 +49,8 @@ public class ITestAzureBlobFileSystemFlush extends AbstractAbfsScaleTest { private static final int BASE_SIZE = 1024; private static final int ONE_THOUSAND = 1000; - private static final int TEST_BUFFER_SIZE = 5 * ONE_THOUSAND * BASE_SIZE; + //3000 KB to support appenblob too + private static final int TEST_BUFFER_SIZE = 3 * ONE_THOUSAND * BASE_SIZE; private static final int ONE_MB = 1024 * 1024; private static final int FLUSH_TIMES = 200; private static final int THREAD_SLEEP_TIME = 1000; @@ -226,11 +227,15 @@ private void testFlush(boolean disableOutputStreamFlush) throws Exception { final Path testFilePath = path(methodName.getMethodName()); byte[] buffer = getRandomBytesArray(); - // The test case must write "fs.azure.write.request.size" bytes // to the stream in order for the data to be uploaded to storage. - assertEquals(fs.getAbfsStore().getAbfsConfiguration().getWriteBufferSize(), - buffer.length); + assertTrue(fs.getAbfsStore().getAbfsConfiguration().getWriteBufferSize() + <= buffer.length); + + boolean isAppendBlob = true; + if (!fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testFilePath).toString())) { + isAppendBlob = false; + } try (FSDataOutputStream stream = fs.create(testFilePath)) { stream.write(buffer); @@ -245,7 +250,8 @@ private void testFlush(boolean disableOutputStreamFlush) throws Exception { // Verify that the data can be read if disableOutputStreamFlush is // false; and otherwise cannot be read. - validate(fs.open(testFilePath), buffer, !disableOutputStreamFlush); + /* For Appendlob flush is not needed to update data on server */ + validate(fs.open(testFilePath), buffer, !disableOutputStreamFlush || isAppendBlob); } } @@ -267,10 +273,15 @@ public void testHflushWithFlushDisabled() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); byte[] buffer = getRandomBytesArray(); final Path testFilePath = path(methodName.getMethodName()); + boolean isAppendBlob = false; + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testFilePath).toString())) { + isAppendBlob = true; + } try (FSDataOutputStream stream = getStreamAfterWrite(fs, testFilePath, buffer, false)) { stream.hflush(); - validate(fs, testFilePath, buffer, false); + /* For Appendlob flush is not needed to update data on server */ + validate(fs, testFilePath, buffer, isAppendBlob); } } @@ -322,9 +333,14 @@ public void testHsyncWithFlushDisabled() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); byte[] buffer = getRandomBytesArray(); final Path testFilePath = path(methodName.getMethodName()); + boolean isAppendBlob = false; + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testFilePath).toString())) { + isAppendBlob = true; + } try (FSDataOutputStream stream = getStreamAfterWrite(fs, testFilePath, buffer, false)) { stream.hsync(); - validate(fs, testFilePath, buffer, false); + /* For Appendlob flush is not needed to update data on server */ + validate(fs, testFilePath, buffer, isAppendBlob); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java index 45deb9ebeec4d..d8711876fefc5 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java @@ -189,4 +189,4 @@ public static AbfsConfiguration updateRetryConfigs(AbfsConfiguration abfsConfig, abfsConfig.setMaxBackoffIntervalMilliseconds(backoffTime); return abfsConfig; } -} \ No newline at end of file +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java index 16a3f5703bb3d..72ea7661b5a90 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java @@ -27,6 +27,7 @@ public final class TestConfigurationKeys { public static final String FS_AZURE_ACCOUNT_KEY = "fs.azure.account.key"; public static final String FS_AZURE_CONTRACT_TEST_URI = "fs.contract.test.fs.abfs"; public static final String FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT = "fs.azure.test.namespace.enabled"; + public static final String FS_AZURE_TEST_APPENDBLOB_ENABLED = "fs.azure.test.appendblob.enabled"; public static final String FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_ID = "fs.azure.account.oauth2.contributor.client.id"; public static final String FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_SECRET = "fs.azure.account.oauth2.contributor.client.secret"; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java new file mode 100644 index 0000000000000..4105aa18f210a --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java @@ -0,0 +1,430 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Random; + +import org.junit.Test; + +import org.mockito.ArgumentCaptor; + +import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; +import org.apache.hadoop.conf.Configuration; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.anyLong; + +import static org.assertj.core.api.Assertions.assertThat; + +public final class TestAbfsOutputStream { + + private static final int BUFFER_SIZE = 4096; + private static final int WRITE_SIZE = 1000; + private static final String PATH = "~/testpath"; + private final String globalKey = "fs.azure.configuration"; + private final String accountName1 = "account1"; + private final String accountKey1 = globalKey + "." + accountName1; + private final String accountValue1 = "one"; + + private AbfsOutputStreamContext populateAbfsOutputStreamContext(int writeBufferSize, + boolean isFlushEnabled, + boolean disableOutputStreamFlush, + boolean isAppendBlob) { + return new AbfsOutputStreamContext(2) + .withWriteBufferSize(writeBufferSize) + .enableFlush(isFlushEnabled) + .disableOutputStreamFlush(disableOutputStreamFlush) + .withStreamStatistics(new AbfsOutputStreamStatisticsImpl()) + .withAppendBlob(isAppendBlob) + .build(); + } + + /** + * The test verifies OutputStream shortwrite case(2000bytes write followed by flush, hflush, hsync) is making correct HTTP calls to the server + */ + @Test + public void verifyShortWriteRequest() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), anyLong(), any(byte[].class), anyInt(), anyInt(), any(), anyBoolean())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + final byte[] b = new byte[WRITE_SIZE]; + new Random().nextBytes(b); + out.write(b); + out.hsync(); + ArgumentCaptor acString = ArgumentCaptor.forClass(String.class); + ArgumentCaptor acLong = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acBufferOffset = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acBufferLength = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acByteArray = ArgumentCaptor.forClass(byte[].class); + ArgumentCaptor acAppendBlobAppend = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acSASToken = ArgumentCaptor.forClass(String.class); + + + final byte[] b1 = new byte[2*WRITE_SIZE]; + new Random().nextBytes(b1); + out.write(b1); + out.flush(); + out.hflush(); + + out.hsync(); + + verify(client, times(2)).append(acString.capture(), acLong.capture(), acByteArray.capture(), acBufferOffset.capture(), acBufferLength.capture(), + acSASToken.capture(), acAppendBlobAppend.capture()); + assertThat(Arrays.asList(PATH, PATH)).describedAs("Path of the requests").isEqualTo(acString.getAllValues()); + assertThat(Arrays.asList(Long.valueOf(0), Long.valueOf(WRITE_SIZE))).describedAs("Write Position").isEqualTo(acLong.getAllValues()); + assertThat(Arrays.asList(0, 0)).describedAs("Buffer Offset").isEqualTo(acBufferOffset.getAllValues()); + assertThat(Arrays.asList(WRITE_SIZE, 2*WRITE_SIZE)).describedAs("Buffer length").isEqualTo(acBufferLength.getAllValues()); + + } + + /** + * The test verifies OutputStream Write of WRITE_SIZE(1000 bytes) followed by a close is making correct HTTP calls to the server + */ + @Test + public void verifyWriteRequest() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), anyLong(), any(byte[].class), anyInt(), anyInt(), any(), anyBoolean())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + final byte[] b = new byte[WRITE_SIZE]; + new Random().nextBytes(b); + + for (int i = 0; i < 5; i++) { + out.write(b); + } + out.close(); + + ArgumentCaptor acString = ArgumentCaptor.forClass(String.class); + ArgumentCaptor acLong = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acBufferOffset = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acBufferLength = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acByteArray = ArgumentCaptor.forClass(byte[].class); + ArgumentCaptor acAppendBlobAppend = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acSASToken = ArgumentCaptor.forClass(String.class); + + verify(client, times(2)).append(acString.capture(), acLong.capture(), acByteArray.capture(), acBufferOffset.capture(), acBufferLength.capture(), + acSASToken.capture(), acAppendBlobAppend.capture()); + assertThat(Arrays.asList(PATH, PATH)).describedAs("Path").isEqualTo(acString.getAllValues()); + assertThat(new HashSet(Arrays.asList(Long.valueOf(0), Long.valueOf(BUFFER_SIZE)))).describedAs("Position").isEqualTo(new HashSet( + acLong.getAllValues())); + assertThat(Arrays.asList(0, 0)).describedAs("Buffer Offset").isEqualTo(acBufferOffset.getAllValues()); + assertThat(new HashSet(Arrays.asList(BUFFER_SIZE, 5*WRITE_SIZE-BUFFER_SIZE))).describedAs("Buffer Length").isEqualTo(new HashSet( + acBufferLength.getAllValues())); + + ArgumentCaptor acFlushString = ArgumentCaptor.forClass(String.class); + ArgumentCaptor acFlushLong = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acFlushRetainUnCommittedData = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acFlushClose = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acFlushSASToken = ArgumentCaptor.forClass(String.class); + + verify(client, times(1)).flush(acFlushString.capture(), acFlushLong.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(), + acFlushSASToken.capture()); + assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushString.getAllValues()); + assertThat(Arrays.asList(Long.valueOf(5*WRITE_SIZE))).describedAs("position").isEqualTo(acFlushLong.getAllValues()); + assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues()); + assertThat(Arrays.asList(true)).describedAs("Close flag").isEqualTo(acFlushClose.getAllValues()); + } + + /** + * The test verifies OutputStream Write of BUFFER_SIZE(4KB) followed by a close is making correct HTTP calls to the server + */ + @Test + public void verifyWriteRequestOfBufferSizeAndClose() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsHttpOperation httpOp = mock(AbfsHttpOperation.class); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), anyLong(), any(byte[].class), anyInt(), anyInt(), any(), anyBoolean())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + when(op.getSasToken()).thenReturn("testToken"); + when(op.getResult()).thenReturn(httpOp); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + final byte[] b = new byte[BUFFER_SIZE]; + new Random().nextBytes(b); + + for (int i = 0; i < 2; i++) { + out.write(b); + } + out.close(); + + ArgumentCaptor acString = ArgumentCaptor.forClass(String.class); + ArgumentCaptor acLong = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acBufferOffset = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acBufferLength = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acByteArray = ArgumentCaptor.forClass(byte[].class); + ArgumentCaptor acAppendBlobAppend = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acSASToken = ArgumentCaptor.forClass(String.class); + + verify(client, times(2)).append(acString.capture(), acLong.capture(), acByteArray.capture(), acBufferOffset.capture(), acBufferLength.capture(), + acSASToken.capture(), acAppendBlobAppend.capture()); + assertThat(Arrays.asList(PATH, PATH)).describedAs("path").isEqualTo(acString.getAllValues()); + assertThat(new HashSet(Arrays.asList(Long.valueOf(0), Long.valueOf(BUFFER_SIZE)))).describedAs("Position").isEqualTo(new HashSet( + acLong.getAllValues())); + assertThat(Arrays.asList(0, 0)).describedAs("Buffer Offset").isEqualTo(acBufferOffset.getAllValues()); + assertThat(Arrays.asList(BUFFER_SIZE, BUFFER_SIZE)).describedAs("Buffer Length").isEqualTo(acBufferLength.getAllValues()); + + ArgumentCaptor acFlushString = ArgumentCaptor.forClass(String.class); + ArgumentCaptor acFlushLong = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acFlushRetainUnCommittedData = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acFlushClose = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acFlushSASToken = ArgumentCaptor.forClass(String.class); + + verify(client, times(1)).flush(acFlushString.capture(), acFlushLong.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(), + acFlushSASToken.capture()); + assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushString.getAllValues()); + assertThat(Arrays.asList(Long.valueOf(2*BUFFER_SIZE))).describedAs("position").isEqualTo(acFlushLong.getAllValues()); + assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues()); + assertThat(Arrays.asList(true)).describedAs("Close flag").isEqualTo(acFlushClose.getAllValues()); + + } + + /** + * The test verifies OutputStream Write of BUFFER_SIZE(4KB) is making correct HTTP calls to the server + */ + @Test + public void verifyWriteRequestOfBufferSize() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsHttpOperation httpOp = mock(AbfsHttpOperation.class); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), anyLong(), any(byte[].class), anyInt(), anyInt(), any(), anyBoolean())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + when(op.getSasToken()).thenReturn("testToken"); + when(op.getResult()).thenReturn(httpOp); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + final byte[] b = new byte[BUFFER_SIZE]; + new Random().nextBytes(b); + + for (int i = 0; i < 2; i++) { + out.write(b); + } + Thread.sleep(1000); + + ArgumentCaptor acString = ArgumentCaptor.forClass(String.class); + ArgumentCaptor acLong = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acBufferOffset = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acBufferLength = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acByteArray = ArgumentCaptor.forClass(byte[].class); + ArgumentCaptor acAppendBlobAppend = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acSASToken = ArgumentCaptor.forClass(String.class); + + verify(client, times(2)).append(acString.capture(), acLong.capture(), acByteArray.capture(), acBufferOffset.capture(), acBufferLength.capture(), + acSASToken.capture(), acAppendBlobAppend.capture()); + assertThat(Arrays.asList(PATH, PATH)).describedAs("File Path").isEqualTo(acString.getAllValues()); + assertThat(new HashSet(Arrays.asList(Long.valueOf(0), Long.valueOf(BUFFER_SIZE)))).describedAs("Position in file").isEqualTo( + new HashSet(acLong.getAllValues())); + assertThat(Arrays.asList(0, 0)).describedAs("buffer offset").isEqualTo(acBufferOffset.getAllValues()); + assertThat(Arrays.asList(BUFFER_SIZE, BUFFER_SIZE)).describedAs("buffer length").isEqualTo(acBufferLength.getAllValues()); + + } + + /** + * The test verifies OutputStream Write of BUFFER_SIZE(4KB) on a AppendBlob based stream is making correct HTTP calls to the server + */ + @Test + public void verifyWriteRequestOfBufferSizeWithAppendBlob() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), anyLong(), any(byte[].class), anyInt(), anyInt(), any(), anyBoolean())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, true)); + final byte[] b = new byte[BUFFER_SIZE]; + new Random().nextBytes(b); + + for (int i = 0; i < 2; i++) { + out.write(b); + } + Thread.sleep(1000); + + ArgumentCaptor acString = ArgumentCaptor.forClass(String.class); + ArgumentCaptor acLong = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acBufferOffset = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acBufferLength = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acByteArray = ArgumentCaptor.forClass(byte[].class); + ArgumentCaptor acAppendBlobAppend = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acSASToken = ArgumentCaptor.forClass(String.class); + + verify(client, times(2)).append(acString.capture(), acLong.capture(), acByteArray.capture(), acBufferOffset.capture(), acBufferLength.capture(), + acSASToken.capture(), acAppendBlobAppend.capture()); + assertThat(Arrays.asList(PATH, PATH)).describedAs("File Path").isEqualTo(acString.getAllValues()); + assertThat(Arrays.asList(Long.valueOf(0), Long.valueOf(BUFFER_SIZE))).describedAs("File Position").isEqualTo(acLong.getAllValues()); + assertThat(Arrays.asList(0, 0)).describedAs("Buffer Offset").isEqualTo(acBufferOffset.getAllValues()); + assertThat(Arrays.asList(BUFFER_SIZE, BUFFER_SIZE)).describedAs("Buffer Length").isEqualTo(acBufferLength.getAllValues()); + assertThat(Arrays.asList(true, true)).describedAs("is AppendBlob Append").isEqualTo(acAppendBlobAppend.getAllValues()); + + } + + /** + * The test verifies OutputStream Write of BUFFER_SIZE(4KB) followed by a hflush call is making correct HTTP calls to the server + */ + @Test + public void verifyWriteRequestOfBufferSizeAndHFlush() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), anyLong(), any(byte[].class), anyInt(), anyInt(), any(), anyBoolean())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + final byte[] b = new byte[BUFFER_SIZE]; + new Random().nextBytes(b); + + for (int i = 0; i < 2; i++) { + out.write(b); + } + out.hflush(); + + ArgumentCaptor acString = ArgumentCaptor.forClass(String.class); + ArgumentCaptor acLong = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acBufferOffset = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acBufferLength = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acByteArray = ArgumentCaptor.forClass(byte[].class); + ArgumentCaptor acAppendBlobAppend = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acSASToken = ArgumentCaptor.forClass(String.class); + + verify(client, times(2)).append(acString.capture(), acLong.capture(), acByteArray.capture(), acBufferOffset.capture(), acBufferLength.capture(), + acSASToken.capture(), acAppendBlobAppend.capture()); + assertThat(Arrays.asList(PATH, PATH)).describedAs("File Path").isEqualTo(acString.getAllValues()); + assertThat(new HashSet(Arrays.asList(Long.valueOf(0), Long.valueOf(BUFFER_SIZE)))).describedAs("File Position").isEqualTo( + new HashSet(acLong.getAllValues())); + assertThat(Arrays.asList(0, 0)).describedAs("Buffer Offset").isEqualTo(acBufferOffset.getAllValues()); + assertThat(Arrays.asList(BUFFER_SIZE, BUFFER_SIZE)).describedAs("Buffer Length").isEqualTo(acBufferLength.getAllValues()); + + ArgumentCaptor acFlushString = ArgumentCaptor.forClass(String.class); + ArgumentCaptor acFlushLong = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acFlushRetainUnCommittedData = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acFlushClose = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acFlushSASToken = ArgumentCaptor.forClass(String.class); + + verify(client, times(1)).flush(acFlushString.capture(), acFlushLong.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(), + acFlushSASToken.capture()); + assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushString.getAllValues()); + assertThat(Arrays.asList(Long.valueOf(2*BUFFER_SIZE))).describedAs("position").isEqualTo(acFlushLong.getAllValues()); + assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues()); + assertThat(Arrays.asList(false)).describedAs("Close flag").isEqualTo(acFlushClose.getAllValues()); + + } + + /** + * The test verifies OutputStream Write of BUFFER_SIZE(4KB) followed by a flush call is making correct HTTP calls to the server + */ + @Test + public void verifyWriteRequestOfBufferSizeAndFlush() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), anyLong(), any(byte[].class), anyInt(), anyInt(), any(), anyBoolean())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + final byte[] b = new byte[BUFFER_SIZE]; + new Random().nextBytes(b); + + for (int i = 0; i < 2; i++) { + out.write(b); + } + Thread.sleep(1000); + out.flush(); + Thread.sleep(1000); + + ArgumentCaptor acString = ArgumentCaptor.forClass(String.class); + ArgumentCaptor acLong = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acBufferOffset = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acBufferLength = ArgumentCaptor.forClass(Integer.class); + ArgumentCaptor acByteArray = ArgumentCaptor.forClass(byte[].class); + ArgumentCaptor acAppendBlobAppend = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acSASToken = ArgumentCaptor.forClass(String.class); + + verify(client, times(2)).append(acString.capture(), acLong.capture(), acByteArray.capture(), acBufferOffset.capture(), acBufferLength.capture(), + acSASToken.capture(), acAppendBlobAppend.capture()); + assertThat(Arrays.asList(PATH, PATH)).describedAs("path").isEqualTo(acString.getAllValues()); + assertThat(new HashSet(Arrays.asList(Long.valueOf(0), Long.valueOf(BUFFER_SIZE)))).describedAs("Position").isEqualTo( + new HashSet(acLong.getAllValues())); + assertThat(Arrays.asList(0, 0)).describedAs("Buffer Offset").isEqualTo(acBufferOffset.getAllValues()); + assertThat(Arrays.asList(BUFFER_SIZE, BUFFER_SIZE)).describedAs("Buffer Length").isEqualTo(acBufferLength.getAllValues()); + + } +} From 55a2ae80dc9b45413febd33840b8a653e3e29440 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Sat, 4 Jul 2020 13:45:49 -0700 Subject: [PATCH 082/131] HDFS-15450. Fix NN trash emptier to work if ViewFSOveroadScheme enabled. Contributed by Uma Maheswara Rao G. --- .../hadoop/hdfs/server/namenode/NameNode.java | 7 ++ ...tartupWhenViewFSOverloadSchemeEnabled.java | 88 +++++++++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 74757e563a64d..7c2026c1059b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -384,6 +385,7 @@ public long getProtocolVersion(String protocol, */ @Deprecated public static final int DEFAULT_PORT = DFS_NAMENODE_RPC_PORT_DEFAULT; + public static final String FS_HDFS_IMPL_KEY = "fs.hdfs.impl"; public static final Logger LOG = LoggerFactory.getLogger(NameNode.class.getName()); public static final Logger stateChangeLog = @@ -725,6 +727,11 @@ protected void initialize(Configuration conf) throws IOException { intervals); } } + // Currently NN uses FileSystem.get to initialize DFS in startTrashEmptier. + // If fs.hdfs.impl was overridden by core-site.xml, we may get other + // filesystem. To make sure we get DFS, we are setting fs.hdfs.impl to DFS. + // HDFS-15450 + conf.set(FS_HDFS_IMPL_KEY, DistributedFileSystem.class.getName()); UserGroupInformation.setConfiguration(conf); loginAsNameNodeUser(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java new file mode 100644 index 0000000000000..9d394c004924e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Tests that the NN startup is successful with ViewFSOverloadScheme. + */ +public class TestNNStartupWhenViewFSOverloadSchemeEnabled { + private MiniDFSCluster cluster; + private static final String FS_IMPL_PATTERN_KEY = "fs.%s.impl"; + private static final String HDFS_SCHEME = "hdfs"; + private static final Configuration CONF = new Configuration(); + + @BeforeClass + public static void setUp() { + CONF.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); + CONF.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); + CONF.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1); + CONF.set(String.format(FS_IMPL_PATTERN_KEY, HDFS_SCHEME), + ViewFileSystemOverloadScheme.class.getName()); + CONF.set(String + .format(FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + HDFS_SCHEME), DistributedFileSystem.class.getName()); + // By default trash interval is 0. To trigger TrashEmptier, let's set it to + // >0 value. + CONF.setLong(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 100); + } + + /** + * Tests that the HA mode NameNode startup is successful when + * ViewFSOverloadScheme configured. + */ + @Test(timeout = 30000) + public void testHANameNodeAndDataNodeStartup() throws Exception { + cluster = new MiniDFSCluster.Builder(CONF) + .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1) + .waitSafeMode(false).build(); + cluster.waitActive(); + cluster.transitionToActive(0); + } + + /** + * Tests that the NameNode startup is successful when ViewFSOverloadScheme + * configured. + */ + @Test(timeout = 30000) + public void testNameNodeAndDataNodeStartup() throws Exception { + cluster = + new MiniDFSCluster.Builder(CONF).numDataNodes(1).waitSafeMode(false) + .build(); + cluster.waitActive(); + } + + @After + public void shutdownCluster() { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } +} From 639acb6d8921127cde3174a302f2e3d71b44f052 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Mon, 6 Jul 2020 16:08:36 +0900 Subject: [PATCH 083/131] HADOOP-17111. Replace Guava Optional with Java8+ Optional. Contributed by Ahmed Hussein. --- .../main/resources/checkstyle/checkstyle.xml | 7 ++- .../nodemanager/DefaultContainerExecutor.java | 19 +++--- .../nodemanager/LinuxContainerExecutor.java | 58 +++++++++++++------ .../recovery/TestZKRMStateStorePerf.java | 23 ++++---- 4 files changed, 66 insertions(+), 41 deletions(-) diff --git a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml index 8f3d3f13824ef..54a5943738094 100644 --- a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml +++ b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml @@ -119,7 +119,12 @@ - + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java index c5fc481661bd9..b8f94b8d96dee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java @@ -21,12 +21,7 @@ import static org.apache.hadoop.fs.CreateFlag.CREATE; import static org.apache.hadoop.fs.CreateFlag.OVERWRITE; -import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; -import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import com.google.common.annotations.VisibleForTesting; import java.io.DataOutputStream; import java.io.File; import java.io.FileNotFoundException; @@ -38,7 +33,7 @@ import java.util.EnumSet; import java.util.List; import java.util.Map; - +import java.util.Optional; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.fs.FileContext; @@ -46,6 +41,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.service.ServiceStateException; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.CommandExecutor; @@ -60,15 +56,16 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReapContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext; import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext; import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Optional; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The {@code DefaultContainerExecuter} class offers generic container @@ -333,7 +330,7 @@ public int launchContainer(ContainerStartContext ctx) builder.append("Exception from container-launch.\n") .append("Container id: ").append(containerId).append("\n") .append("Exit code: ").append(exitCode).append("\n"); - if (!Optional.fromNullable(e.getMessage()).or("").isEmpty()) { + if (!Optional.ofNullable(e.getMessage()).orElse("").isEmpty()) { builder.append("Exception message: ") .append(e.getMessage()).append("\n"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java index 79fb817f97f33..bb190fd4073a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java @@ -18,14 +18,46 @@ package org.apache.hadoop.yarn.server.nodemanager; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.APPID; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.APPLICATION_LOCAL_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_ID_STR; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_LAUNCH_PREFIX_COMMANDS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_LOCAL_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_LOG_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_RUN_CMDS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_WORK_DIR; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.FILECACHE_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.LOCALIZED_RESOURCES; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.LOCAL_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.LOG_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.NM_PRIVATE_CONTAINER_SCRIPT_PATH; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.NM_PRIVATE_KEYSTORE_PATH; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.NM_PRIVATE_TOKENS_PATH; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.NM_PRIVATE_TRUSTSTORE_PATH; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.PID; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.PID_FILE_PATH; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.RESOURCES_OPTIONS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.RUN_AS_USER; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.SIGNAL; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.TC_COMMAND_FILE; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER_FILECACHE_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER_LOCAL_DIRS; + import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Optional; -import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; -import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; @@ -53,6 +85,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; +import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerPrepareContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext; @@ -64,17 +97,8 @@ import org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler; import org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler; import org.apache.hadoop.yarn.server.nodemanager.util.LCEResourcesHandler; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.regex.Pattern; - -import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** *

This class provides {@link Container} execution using a native @@ -614,7 +638,7 @@ private int handleExitCode(ContainerExecutionException e, Container container, .append("Container id: " + containerId + "\n") .append("Exit code: " + exitCode + "\n") .append("Exception message: " + e.getMessage() + "\n"); - if (!Optional.fromNullable(e.getErrorOutput()).or("").isEmpty()) { + if (!Optional.ofNullable(e.getErrorOutput()).orElse("").isEmpty()) { builder.append("Shell error output: " + e.getErrorOutput() + "\n"); } //Skip stack trace diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java index 893d1ca627a79..3cb428c5c5960 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java @@ -18,15 +18,18 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery; -import com.google.common.base.Optional; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedHashSet; import java.util.Map; +import java.util.Optional; import java.util.Set; import javax.crypto.SecretKey; - import org.apache.curator.test.TestingServer; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; @@ -34,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; @@ -41,16 +45,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.junit.Before; import org.junit.After; +import org.junit.Before; import org.junit.Test; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestZKRMStateStorePerf extends RMStateStoreTestBase implements Tool { @@ -98,12 +97,12 @@ public void tearDown() throws Exception { } private void initStore(String hostPort) { - Optional optHostPort = Optional.fromNullable(hostPort); + Optional optHostPort = Optional.ofNullable(hostPort); RMContext rmContext = mock(RMContext.class); conf = new YarnConfiguration(); conf.set(YarnConfiguration.RM_ZK_ADDRESS, optHostPort - .or((curatorTestingServer == null) ? "" : curatorTestingServer + .orElse((curatorTestingServer == null) ? "" : curatorTestingServer .getConnectString())); conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode); From 2f500e4635ea4347a55693b1a10a4a4465fe5fac Mon Sep 17 00:00:00 2001 From: Madhusoodan Pataki Date: Mon, 6 Jul 2020 20:55:42 +0530 Subject: [PATCH 084/131] HADOOP-17081. MetricsSystem doesn't start the sink adapters on restart (#2089) Contributed by Madhusoodan P --- .../metrics2/impl/MetricsSystemImpl.java | 6 +++++- .../metrics2/impl/TestMetricsSystemImpl.java | 21 +++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java index 624edc96b8ae7..cf4b4a9810c4f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java @@ -273,7 +273,11 @@ void registerSource(String name, String desc, MetricsSource source) { T register(final String name, final String description, final T sink) { LOG.debug(name +", "+ description); if (allSinks.containsKey(name)) { - LOG.warn("Sink "+ name +" already exists!"); + if(sinks.get(name) == null) { + registerSink(name, description, sink); + } else { + LOG.warn("Sink "+ name +" already exists!"); + } return sink; } allSinks.put(name, sink); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java index 47a3b4cdc092b..1b40a17bdd8a3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java @@ -639,4 +639,25 @@ private static class TestSource2 { private static String getPluginUrlsAsString() { return "file:metrics2-test-plugin.jar"; } + + @Test + public void testMetricSystemRestart() { + MetricsSystemImpl ms = new MetricsSystemImpl("msRestartTestSystem"); + TestSink ts = new TestSink(); + String sinkName = "restartTestSink"; + + try { + ms.start(); + ms.register(sinkName, "", ts); + assertNotNull("no adapter exists for " + sinkName, + ms.getSinkAdapter(sinkName)); + ms.stop(); + + ms.start(); + assertNotNull("no adapter exists for " + sinkName, + ms.getSinkAdapter(sinkName)); + } finally { + ms.stop(); + } + } } From 834372f4040f1e7a00720da5c40407f9b1423b6d Mon Sep 17 00:00:00 2001 From: Shanyu Zhao Date: Mon, 6 Jul 2020 08:43:34 -0700 Subject: [PATCH 085/131] HDFS-15451. Do not discard non-initial block report for provided storage. (#2119). Contributed by Shanyu Zhao. Signed-off-by: He Xiaoqiao --- .../server/blockmanagement/BlockManager.java | 1 + .../blockmanagement/TestBlockManager.java | 53 +++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 7f0f17e7b42fb..f2cd6b9819efb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2759,6 +2759,7 @@ public boolean processReport(final DatanodeID nodeID, storageInfo = node.updateStorage(storage); } if (namesystem.isInStartupSafeMode() + && !StorageType.PROVIDED.equals(storageInfo.getStorageType()) && storageInfo.getBlockReportCount() > 0) { blockLog.info("BLOCK* processReport 0x{}: " + "discarded non-initial block report from {}" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 11ed5ba9a33d8..695377aa5db6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -49,9 +49,11 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; +import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestProvidedImpl; import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; import org.apache.hadoop.hdfs.server.namenode.CacheManager; @@ -1051,6 +1053,57 @@ public void testSafeModeIBRBeforeFirstFullBR() throws Exception { (ds) >= 0); } + @Test + public void testSafeModeWithProvidedStorageBR() throws Exception { + DatanodeDescriptor node0 = spy(nodes.get(0)); + DatanodeStorageInfo ds0 = node0.getStorageInfos()[0]; + node0.setAlive(true); + DatanodeDescriptor node1 = spy(nodes.get(1)); + DatanodeStorageInfo ds1 = node1.getStorageInfos()[0]; + node1.setAlive(true); + + String providedStorageID = DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT; + DatanodeStorage providedStorage = new DatanodeStorage( + providedStorageID, DatanodeStorage.State.NORMAL, StorageType.PROVIDED); + + // create block manager with provided storage enabled + Configuration conf = new HdfsConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED, true); + conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS, + TestProvidedImpl.TestFileRegionBlockAliasMap.class, + BlockAliasMap.class); + BlockManager bmPs = new BlockManager(fsn, false, conf); + bmPs.setBlockPoolId("BP-12344-10.1.1.2-12344"); + + // pretend to be in safemode + doReturn(true).when(fsn).isInStartupSafeMode(); + + // register new node + DatanodeRegistration nodeReg0 = + new DatanodeRegistration(node0, null, null, ""); + bmPs.getDatanodeManager().registerDatanode(nodeReg0); + bmPs.getDatanodeManager().addDatanode(node0); + DatanodeRegistration nodeReg1 = + new DatanodeRegistration(node1, null, null, ""); + bmPs.getDatanodeManager().registerDatanode(nodeReg1); + bmPs.getDatanodeManager().addDatanode(node1); + + // process reports of provided storage and disk storage + bmPs.processReport(node0, providedStorage, BlockListAsLongs.EMPTY, null); + bmPs.processReport(node0, new DatanodeStorage(ds0.getStorageID()), + BlockListAsLongs.EMPTY, null); + bmPs.processReport(node1, providedStorage, BlockListAsLongs.EMPTY, null); + bmPs.processReport(node1, new DatanodeStorage(ds1.getStorageID()), + BlockListAsLongs.EMPTY, null); + + // The provided stoage report should not affect disk storage report + DatanodeStorageInfo dsPs = + bmPs.getProvidedStorageMap().getProvidedStorageInfo(); + assertEquals(2, dsPs.getBlockReportCount()); + assertEquals(1, ds0.getBlockReportCount()); + assertEquals(1, ds1.getBlockReportCount()); + } + @Test public void testFullBR() throws Exception { doReturn(true).when(fsn).isRunning(); From e820baa6e6f7e850ba62cbf150d760bd0ea6d0e0 Mon Sep 17 00:00:00 2001 From: Ye Ni <141253+NickyYe@users.noreply.github.com> Date: Mon, 6 Jul 2020 16:17:09 -0700 Subject: [PATCH 086/131] HDFS-15417. RBF: Get the datanode report from cache for federation WebHDFS operations (#2080) --- .../federation/router/RouterRpcServer.java | 123 +++++++++++++++++- .../router/RouterWebHdfsMethods.java | 9 +- .../federation/router/TestRouterRpc.java | 70 +++++++++- 3 files changed, 191 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 4f1310bb25911..5905a1dbbd370 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -26,6 +26,8 @@ import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_COUNT_KEY; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_KEY; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DN_REPORT_CACHE_EXPIRE; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DN_REPORT_CACHE_EXPIRE_MS_DEFAULT; import java.io.FileNotFoundException; import java.io.IOException; @@ -41,7 +43,19 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; - +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; @@ -219,6 +233,9 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol, private static final ThreadLocal CUR_USER = new ThreadLocal<>(); + /** DN type -> full DN report. */ + private final LoadingCache dnCache; + /** * Construct a router RPC server. * @@ -361,6 +378,23 @@ public RouterRpcServer(Configuration configuration, Router router, this.nnProto = new RouterNamenodeProtocol(this); this.clientProto = new RouterClientProtocol(conf, this); this.routerProto = new RouterUserProtocol(this); + + long dnCacheExpire = conf.getTimeDuration( + DN_REPORT_CACHE_EXPIRE, + DN_REPORT_CACHE_EXPIRE_MS_DEFAULT, TimeUnit.MILLISECONDS); + this.dnCache = CacheBuilder.newBuilder() + .build(new DatanodeReportCacheLoader()); + + // Actively refresh the dn cache in a configured interval + Executors + .newSingleThreadScheduledExecutor() + .scheduleWithFixedDelay(() -> this.dnCache + .asMap() + .keySet() + .parallelStream() + .forEach((key) -> this.dnCache.refresh(key)), + 0, + dnCacheExpire, TimeUnit.MILLISECONDS); } @Override @@ -868,6 +902,50 @@ public DatanodeInfo[] getDatanodeReport(DatanodeReportType type) return clientProto.getDatanodeReport(type); } + /** + * Get the datanode report from cache. + * + * @param type Type of the datanode. + * @return List of datanodes. + * @throws IOException If it cannot get the report. + */ + DatanodeInfo[] getCachedDatanodeReport(DatanodeReportType type) + throws IOException { + try { + DatanodeInfo[] dns = this.dnCache.get(type); + if (dns == null) { + LOG.debug("Get null DN report from cache"); + dns = getCachedDatanodeReportImpl(type); + this.dnCache.put(type, dns); + } + return dns; + } catch (ExecutionException e) { + LOG.error("Cannot get the DN report for {}", type, e); + Throwable cause = e.getCause(); + if (cause instanceof IOException) { + throw (IOException) cause; + } else { + throw new IOException(cause); + } + } + } + + private DatanodeInfo[] getCachedDatanodeReportImpl( + final DatanodeReportType type) throws IOException { + // We need to get the DNs as a privileged user + UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); + RouterRpcServer.setCurrentUser(loginUser); + + try { + DatanodeInfo[] dns = clientProto.getDatanodeReport(type); + LOG.debug("Refresh cached DN report with {} datanodes", dns.length); + return dns; + } finally { + // Reset ugi to remote user for remaining operations. + RouterRpcServer.resetCurrentUser(); + } + } + /** * Get the datanode report with a timeout. * @param type Type of the datanode. @@ -1748,4 +1826,45 @@ public void refreshSuperUserGroupsConfiguration() throws IOException { public String[] getGroupsForUser(String user) throws IOException { return routerProto.getGroupsForUser(user); } -} \ No newline at end of file + + /** + * Deals with loading datanode report into the cache and refresh. + */ + private class DatanodeReportCacheLoader + extends CacheLoader { + + private ListeningExecutorService executorService; + + DatanodeReportCacheLoader() { + ThreadFactory threadFactory = new ThreadFactoryBuilder() + .setNameFormat("DatanodeReport-Cache-Reload") + .setDaemon(true) + .build(); + + executorService = MoreExecutors.listeningDecorator( + Executors.newSingleThreadExecutor(threadFactory)); + } + + @Override + public DatanodeInfo[] load(DatanodeReportType type) throws Exception { + return getCachedDatanodeReportImpl(type); + } + + /** + * Override the reload method to provide an asynchronous implementation, + * so that the query will not be slowed down by the cache refresh. It + * will return the old cache value and schedule a background refresh. + */ + @Override + public ListenableFuture reload( + final DatanodeReportType type, DatanodeInfo[] oldValue) + throws Exception { + return executorService.submit(new Callable() { + @Override + public DatanodeInfo[] call() throws Exception { + return load(type); + } + }); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java index 9f0d06d7695cd..39f06a3b66f4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java @@ -454,19 +454,12 @@ private URI redirectURI(final Router router, final UserGroupInformation ugi, private DatanodeInfo chooseDatanode(final Router router, final String path, final HttpOpParam.Op op, final long openOffset, final String excludeDatanodes) throws IOException { - // We need to get the DNs as a privileged user final RouterRpcServer rpcServer = getRPCServer(router); - UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); - RouterRpcServer.setCurrentUser(loginUser); - DatanodeInfo[] dns = null; try { - dns = rpcServer.getDatanodeReport(DatanodeReportType.LIVE); + dns = rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE); } catch (IOException e) { LOG.error("Cannot get the datanodes from the RPC server", e); - } finally { - // Reset ugi to remote user for remaining operations. - RouterRpcServer.resetCurrentUser(); } HashSet excludes = new HashSet(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index 2b7669d26af42..b9a17ac9bdd5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -67,6 +67,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; @@ -216,6 +218,12 @@ public static void globalSetUp() throws Exception { // Register and verify all NNs with all routers cluster.registerNamenodes(); cluster.waitNamenodeRegistration(); + + // We decrease the DN heartbeat expire interval to make them dead faster + cluster.getCluster().getNamesystem(0).getBlockManager() + .getDatanodeManager().setHeartbeatExpireInterval(5000); + cluster.getCluster().getNamesystem(1).getBlockManager() + .getDatanodeManager().setHeartbeatExpireInterval(5000); } @AfterClass @@ -1777,6 +1785,66 @@ public void testgetGroupsForUser() throws IOException { assertArrayEquals(group, result); } + @Test + public void testGetCachedDatanodeReport() throws Exception { + RouterRpcServer rpcServer = router.getRouter().getRpcServer(); + final DatanodeInfo[] datanodeReport = + rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE); + + // We should have 12 nodes in total + assertEquals(12, datanodeReport.length); + + // We should be caching this information + DatanodeInfo[] datanodeReport1 = + rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE); + assertArrayEquals(datanodeReport1, datanodeReport); + + // Stop one datanode + MiniDFSCluster miniDFSCluster = getCluster().getCluster(); + DataNodeProperties dnprop = miniDFSCluster.stopDataNode(0); + + // We wait until the cached value is updated + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + DatanodeInfo[] dn = null; + try { + dn = rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE); + } catch (IOException ex) { + LOG.error("Error on getCachedDatanodeReport"); + } + return !Arrays.equals(datanodeReport, dn); + } + }, 500, 5 * 1000); + + // The cache should be updated now + final DatanodeInfo[] datanodeReport2 = + rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE); + assertEquals(datanodeReport.length - 1, datanodeReport2.length); + + // Restart the DN we just stopped + miniDFSCluster.restartDataNode(dnprop); + miniDFSCluster.waitActive(); + + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + DatanodeInfo[] dn = null; + try { + dn = rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE); + } catch (IOException ex) { + LOG.error("Error on getCachedDatanodeReport"); + } + return datanodeReport.length == dn.length; + } + }, 500, 5 * 1000); + + // The cache should be updated now + final DatanodeInfo[] datanodeReport3 = + rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE); + assertEquals(datanodeReport.length, datanodeReport3.length); + } + /** * Check the erasure coding policies in the Router and the Namenode. * @return The erasure coding policies. @@ -1814,4 +1882,4 @@ private DFSClient getFileDFSClient(final String path) { } return null; } -} \ No newline at end of file +} From dc0626b5f2f2ba0bd3919650ea231cedd424f77a Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Mon, 6 Jul 2020 18:50:03 -0700 Subject: [PATCH 087/131] HDFS-15449. Optionally ignore port number in mount-table name when picking from initialized uri. Contributed by Uma Maheswara Rao G. --- .../apache/hadoop/fs/viewfs/Constants.java | 13 ++++++ .../hadoop/fs/viewfs/ViewFileSystem.java | 10 ++++- .../viewfs/ViewFileSystemOverloadScheme.java | 13 +++++- .../src/site/markdown/ViewFsOverloadScheme.md | 8 +++- ...mOverloadSchemeHdfsFileSystemContract.java | 4 ++ ...ileSystemOverloadSchemeWithHdfsScheme.java | 45 ++++++++++++++++++- ...wFileSystemOverloadSchemeWithDFSAdmin.java | 17 ++++--- ...ileSystemOverloadSchemeWithFSCommands.java | 2 +- 8 files changed, 97 insertions(+), 15 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java index 28ebf73cf5534..492cb87ee024e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java @@ -104,4 +104,17 @@ public interface Constants { "fs.viewfs.mount.links.as.symlinks"; boolean CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT = true; + + /** + * When initializing the viewfs, authority will be used as the mount table + * name to find the mount link configurations. To make the mount table name + * unique, we may want to ignore port if initialized uri authority contains + * port number. By default, we will consider port number also in + * ViewFileSystem(This default value false, because to support existing + * deployments continue with the current behavior). + */ + String CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME = + "fs.viewfs.ignore.port.in.mount.table.name"; + + boolean CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT = false; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index cb3696507afd9..0beeda253a1ae 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -20,6 +20,8 @@ import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE; import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT; import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS; import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT; import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; @@ -274,9 +276,15 @@ public void initialize(final URI theUri, final Configuration conf) final InnerCache innerCache = new InnerCache(fsGetter); // Now build client side view (i.e. client side mount table) from config. final String authority = theUri.getAuthority(); + String tableName = authority; + if (theUri.getPort() != -1 && config + .getBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT)) { + tableName = theUri.getHost(); + } try { myUri = new URI(getScheme(), authority, "/", null, null); - fsState = new InodeTree(conf, authority) { + fsState = new InodeTree(conf, tableName) { @Override protected FileSystem getTargetFileSystem(final URI uri) throws URISyntaxException, IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java index 672022be82409..2f3359d32e98c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java @@ -31,6 +31,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME; + /****************************************************************************** * This class is extended from the ViewFileSystem for the overloaded scheme * file system. Mount link configurations and in-memory mount table @@ -85,9 +87,14 @@ * Op3: Create file s3a://bucketA/salesDB/dbfile will go to * s3a://bucketA/salesDB/dbfile * - * Note: In ViewFileSystemOverloadScheme, by default the mount links will be + * Note: + * (1) In ViewFileSystemOverloadScheme, by default the mount links will be * represented as non-symlinks. If you want to change this behavior, please see * {@link ViewFileSystem#listStatus(Path)} + * (2) In ViewFileSystemOverloadScheme, only the initialized uri's hostname will + * be considered as the mount table name. When the passed uri has hostname:port, + * it will simply ignore the port number and only hostname will be considered as + * the mount table name. *****************************************************************************/ @InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase", "Hive" }) @InterfaceStability.Evolving @@ -115,6 +122,10 @@ public void initialize(URI theUri, Configuration conf) throws IOException { conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, conf.getBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false)); + /* the default value to true in ViewFSOverloadScheme */ + conf.setBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + conf.getBoolean(Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + true)); if (null != mountTableConfigPath) { MountTableConfigLoader loader = new HCFSMountTableConfigLoader(); loader.load(mountTableConfigPath, conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md index feb0ba2718385..38113cbbb0f06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md @@ -28,7 +28,11 @@ View File System Overload Scheme ### Details -The View File System Overload Scheme is an extension to the View File System. This will allow users to continue to use their existing fs.defaultFS configured scheme or any new scheme name instead of using scheme `viewfs`. Mount link configurations key, value formats are same as in [ViewFS Guide](./ViewFs.html). If a user wants to continue use the same fs.defaultFS and wants to have more mount points, then mount link configurations should have the current fs.defaultFS authority name as mount table name. Example if fs.defaultFS is `hdfs://mycluster`, then the mount link configuration key name should be like in the following format `fs.viewfs.mounttable.*mycluster*.link.`. We will discuss more example configurations in following sections. +The View File System Overload Scheme is an extension to the View File System. This will allow users to continue to use their existing fs.defaultFS configured scheme or any new scheme name instead of using scheme `viewfs`. +Mount link configurations key, value formats are same as in [ViewFS Guide](./ViewFs.html). +If a user wants to continue use the same fs.defaultFS and wants to have more mount points, then mount link configurations should have the ViewFileSystemOverloadScheme initialized uri's hostname as the mount table name. +Example if fs.defaultFS is `hdfs://mycluster`, then the mount link configuration key name should be like in the following format `fs.viewfs.mounttable.*mycluster*.link.`. +Even if the initialized fs uri has hostname:port, it will simply ignore the port number and only consider the hostname as the mount table name. We will discuss more example configurations in following sections. Another important improvement with the ViewFileSystemOverloadScheme is, administrators need not copy the `mount-table.xml` configuration file to 1000s of client nodes. Instead they can keep the mount-table configuration file in a Hadoop compatible file system. So, keeping the configuration file in a central place makes administrators life easier as they can update mount-table in single place. @@ -171,7 +175,7 @@ For example, when the following configuration is used but a path like `viewfs:/f ``` -#### Solution +### Solution To avoid the above problem, the configuration `fs.viewfs.mounttable.default.name.key` has to be set to the name of the cluster, i.e, the following should be added to `core-site.xml` ```xml diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java index e7e74d13763c8..dcfa051c3902d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.fs.viewfs; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT; import static org.junit.Assume.assumeTrue; import java.io.File; @@ -73,6 +75,8 @@ public void setUp() throws Exception { FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, "hdfs"), DistributedFileSystem.class.getName()); + conf.setBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT); URI defaultFSURI = URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY)); ConfigUtil.addLink(conf, defaultFSURI.getAuthority(), "/user", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java index a44af768bdcd5..8b7eb88404a94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java @@ -45,6 +45,8 @@ import org.junit.Before; import org.junit.Test; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT; import static org.junit.Assert.*; @@ -79,6 +81,8 @@ public void startCluster() throws IOException { conf.set(String.format( FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, HDFS_SCHEME), DistributedFileSystem.class.getName()); + conf.setBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitClusterUp(); defaultFSURI = @@ -365,7 +369,7 @@ public void testInvalidOverloadSchemeTargetFS() throws Exception { if (mountTableIfSet != null) { conf.set(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH, mountTableIfSet); } - addMountLinks(defaultFSURI.getAuthority(), + addMountLinks(defaultFSURI.getHost(), new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER, Constants.CONFIG_VIEWFS_LINK_FALLBACK }, new String[] {hdfsTargetPath.toUri().toString(), @@ -593,6 +597,45 @@ public void testNflyRepair() throws Exception { } } + /** + * Tests that the fs initialization should ignore the port number when it's + * extracting the mount table name from uri. + */ + @Test(timeout = 30000) + public void testMountTableNameShouldIgnorePortFromURI() throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + conf = new Configuration(getConf()); + addMountLinks(defaultFSURI.getHost(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER, + Constants.CONFIG_VIEWFS_LINK_FALLBACK}, + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString(), + hdfsTargetPath.toUri().toString()}, conf); + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, + defaultFSURI.toString()); + conf.set(String.format(FS_IMPL_PATTERN_KEY, HDFS_SCHEME), + ViewFileSystemOverloadScheme.class.getName()); + conf.set(String + .format(FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + HDFS_SCHEME), DistributedFileSystem.class.getName()); + conf.setBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, true); + + Path testDirOnRoot = new Path("/test"); + URI uriWithoutPort = new URI("hdfs://" + defaultFSURI.getHost()); + //Initialize with out port + try (FileSystem fs = FileSystem + .get(uriWithoutPort, conf)) { + fs.mkdirs(testDirOnRoot); + fs.delete(testDirOnRoot, true); + } + + //Initialize with port + try (FileSystem fs = FileSystem.get(defaultFSURI, conf)) { + fs.mkdirs(testDirOnRoot); + fs.delete(testDirOnRoot, true); + } + } + private void writeString(final FileSystem nfly, final String testString, final Path testFile) throws IOException { try (FSDataOutputStream fsDos = nfly.create(testFile)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java index a9475ddc8d0a3..aea4704711cb4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java @@ -151,7 +151,7 @@ void addMountLinks(String mountTable, String[] sources, String[] targets, @Test public void testSaveNameSpace() throws Exception { final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); - addMountLinks(defaultFSURI.getAuthority(), + addMountLinks(defaultFSURI.getHost(), new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER }, new String[] {hdfsTargetPath.toUri().toString(), localTargetDir.toURI().toString() }, @@ -177,7 +177,7 @@ public void testSaveNameSpace() throws Exception { @Test public void testSaveNamespaceWithoutSpecifyingFS() throws Exception { final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); - addMountLinks(defaultFSURI.getAuthority(), + addMountLinks(defaultFSURI.getHost(), new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER }, new String[] {hdfsTargetPath.toUri().toString(), localTargetDir.toURI().toString() }, @@ -200,9 +200,8 @@ public void testSaveNamespaceWithoutSpecifyingFS() throws Exception { public void testSafeModeWithWrongFS() throws Exception { final Path hdfsTargetPath = new Path("hdfs://nonExistent" + HDFS_USER_FOLDER); - addMountLinks(defaultFSURI.getAuthority(), - new String[] {HDFS_USER_FOLDER }, - new String[] {hdfsTargetPath.toUri().toString(), }, conf); + addMountLinks(defaultFSURI.getHost(), new String[] {HDFS_USER_FOLDER}, + new String[] {hdfsTargetPath.toUri().toString()}, conf); final DFSAdmin dfsAdmin = new DFSAdmin(conf); redirectStream(); int ret = ToolRunner.run(dfsAdmin, new String[] {"-safemode", "enter" }); @@ -215,7 +214,7 @@ public void testSafeModeWithWrongFS() throws Exception { */ @Test public void testSafeModeShouldFailOnLocalTargetFS() throws Exception { - addMountLinks(defaultFSURI.getAuthority(), new String[] {LOCAL_FOLDER }, + addMountLinks(defaultFSURI.getHost(), new String[] {LOCAL_FOLDER }, new String[] {localTargetDir.toURI().toString() }, conf); final DFSAdmin dfsAdmin = new DFSAdmin(conf); // ViewFSOveloadScheme uri with localfs mount point @@ -247,8 +246,8 @@ public void testSafeModeShouldFailWithoutMountTables() throws Exception { @Test public void testAllowAndDisalllowSnapShot() throws Exception { final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); - addMountLinks(defaultFSURI.getAuthority(), - new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER }, + addMountLinks(defaultFSURI.getHost(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER}, new String[] {hdfsTargetPath.toUri().toString(), localTargetDir.toURI().toString() }, conf); @@ -270,7 +269,7 @@ public void testAllowAndDisalllowSnapShot() throws Exception { @Test public void testSetBalancerBandwidth() throws Exception { final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); - addMountLinks(defaultFSURI.getAuthority(), + addMountLinks(defaultFSURI.getHost(), new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER }, new String[] {hdfsTargetPath.toUri().toString(), localTargetDir.toURI().toString() }, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java index a974377fac01c..099c967e26e71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java @@ -142,7 +142,7 @@ public void testDFWithViewFsOverloadScheme() throws Exception { List mounts = Lists.newArrayList(); mounts.add(HDFS_USER_FOLDER); mounts.add(LOCAL_FOLDER); - addMountLinks(defaultFSURI.getAuthority(), + addMountLinks(defaultFSURI.getHost(), mounts.toArray(new String[mounts.size()]), new String[] {hdfsTargetPath.toUri().toString(), localTargetDir.toURI().toString() }, From f77bbc2123e3b39117f42e2c9471eb83da98380e Mon Sep 17 00:00:00 2001 From: Ye Ni <141253+NickyYe@users.noreply.github.com> Date: Mon, 6 Jul 2020 19:01:46 -0700 Subject: [PATCH 088/131] HDFS-15312. Apply umask when creating directory by WebHDFS (#2096) --- .../src/main/webapps/router/explorer.js | 29 +++++++++++++++++++ .../src/main/webapps/hdfs/explorer.js | 29 +++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js index 6917b70d545ed..490c3934adb6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js @@ -416,9 +416,38 @@ $(this).prop('disabled', true); $(this).button('complete'); + // Get umask from the configuration + var umask, oldUmask, actualUmask; + + $.ajax({'url': '/conf', 'dataType': 'xml', 'async': false}).done( + function(d) { + var $xml = $(d); + $xml.find('property').each(function(idx,v) { + // Current umask config + if ($(v).find('name').text() === 'fs.permissions.umask-mode') { + umask = $(v).find('value').text(); + } + + // Deprecated umask config + if ($(v).find('name').text() === 'dfs.umask') { + oldUmask = $(v).find('value').text(); + } + }); + }); + var url = '/webhdfs/v1' + encode_path(append_path(current_directory, $('#new_directory').val())) + '?op=MKDIRS'; + if (oldUmask) { + actualUmask = 777 - oldUmask; + } else if (umask) { + actualUmask = 777 - umask; + } + + if (actualUmask) { + url = url + '&permission=' + actualUmask; + } + $.ajax(url, { type: 'PUT' } ).done(function(data) { browse_directory(current_directory); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js index cc555670b2214..cbf9df99567c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js @@ -416,9 +416,38 @@ $(this).prop('disabled', true); $(this).button('complete'); + // Get umask from the configuration + var umask, oldUmask, actualUmask; + + $.ajax({'url': '/conf', 'dataType': 'xml', 'async': false}).done( + function(d) { + var $xml = $(d); + $xml.find('property').each(function(idx,v) { + // Current umask config + if ($(v).find('name').text() === 'fs.permissions.umask-mode') { + umask = $(v).find('value').text(); + } + + // Deprecated umask config + if ($(v).find('name').text() === 'dfs.umask') { + oldUmask = $(v).find('value').text(); + } + }); + }); + var url = '/webhdfs/v1' + encode_path(append_path(current_directory, $('#new_directory').val())) + '?op=MKDIRS'; + if (oldUmask) { + actualUmask = 777 - oldUmask; + } else if (umask) { + actualUmask = 777 - umask; + } + + if (actualUmask) { + url = url + '&permission=' + actualUmask; + } + $.ajax(url, { type: 'PUT' } ).done(function(data) { browse_directory(current_directory); From 2bbd00dff498027241a5d84713f4e3f13ac45e65 Mon Sep 17 00:00:00 2001 From: Prabhu Joseph Date: Sun, 5 Jul 2020 14:22:47 +0530 Subject: [PATCH 089/131] YARN-10337. Fix failing testcase TestRMHATimelineCollectors. Contributed by Bilwa S T. --- .../TestRMHATimelineCollectors.java | 31 ++++++++++++++----- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHATimelineCollectors.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHATimelineCollectors.java index d4f156bd0b148..f086d080e3f0b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHATimelineCollectors.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHATimelineCollectors.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.api.records.AppCollectorData; @@ -27,6 +28,8 @@ import org.junit.Before; import org.junit.Test; +import com.google.common.base.Supplier; + import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -97,16 +100,28 @@ public void testRebuildCollectorDataOnFailover() throws Exception { assertEquals(collectorAddr2, results1.get(app2.getApplicationId()).getCollectorAddr()); - Map results2 - = nm2.nodeHeartbeat(true).getAppCollectors(); // addr of app1 should be collectorAddr1 since it's registering (no time - // stamp). - assertEquals(collectorAddr1, - results2.get(app1.getApplicationId()).getCollectorAddr()); - // addr of app2 should be collectorAddr22 since its version number is + // stamp). and addr of app2 should be collectorAddr22 since its version + // number is // greater. - assertEquals(collectorAddr22, - results2.get(app2.getApplicationId()).getCollectorAddr()); + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + try { + Map results2 = nm2 + .nodeHeartbeat(true).getAppCollectors(); + if (null != results2) { + return collectorAddr1 == results2.get(app1.getApplicationId()) + .getCollectorAddr() + && collectorAddr22 == results2.get(app2.getApplicationId()) + .getCollectorAddr(); + } + return false; + } catch (Exception e) { + return false; + } + } + }, 300, 10000); // Now nm1 should get updated collector list nm1.getRegisteringCollectors().clear(); From 4f26454a7d1b560f959cdb2fb0641147a85642da Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Tue, 7 Jul 2020 13:01:46 +0800 Subject: [PATCH 090/131] HDFS-15425. Review Logging of DFSClient. Contributed by Hongbing Wang. --- .../main/java/org/apache/hadoop/hdfs/DFSClient.java | 9 +++------ .../apache/hadoop/hdfs/DFSStripedInputStream.java | 12 ++++-------- .../java/org/apache/hadoop/hdfs/StripeReader.java | 6 ++---- 3 files changed, 9 insertions(+), 18 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 72b2113943756..5a6a0f65f12f6 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -332,12 +332,9 @@ public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, MIN_REPLICATION, HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure. MIN_REPLICATION_DEFAULT); - if (LOG.isDebugEnabled()) { - LOG.debug( - "Sets " + HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure. - MIN_REPLICATION + " to " - + dtpReplaceDatanodeOnFailureReplication); - } + LOG.debug("Sets {} to {}", + HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure. + MIN_REPLICATION, dtpReplaceDatanodeOnFailureReplication); this.ugi = UserGroupInformation.getCurrentUser(); this.namenodeUri = nameNodeUri; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java index ba35d51561162..fa1cf34008ffb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java @@ -110,9 +110,7 @@ public class DFSStripedInputStream extends DFSInputStream { dataBlkNum, parityBlkNum); decoder = CodecUtil.createRawDecoder(dfsClient.getConfiguration(), ecPolicy.getCodecName(), coderOptions); - if (DFSClient.LOG.isDebugEnabled()) { - DFSClient.LOG.debug("Creating an striped input stream for file " + src); - } + DFSClient.LOG.debug("Creating an striped input stream for file {}", src); } private boolean useDirectBuffer() { @@ -465,10 +463,8 @@ protected LocatedBlock refreshLocatedBlock(LocatedBlock block) break; } } - if (DFSClient.LOG.isDebugEnabled()) { - DFSClient.LOG.debug("refreshLocatedBlock for striped blocks, offset=" - + block.getStartOffset() + ". Obtained block " + lb + ", idx=" + idx); - } + DFSClient.LOG.debug("refreshLocatedBlock for striped blocks, offset={}." + + " Obtained block {}, idx={}", block.getStartOffset(), lb, idx); return StripedBlockUtil.constructInternalBlock( lsb, i, cellSize, dataBlkNum, idx); } @@ -526,7 +522,7 @@ protected void reportLostBlock(LocatedBlock lostBlock, if (!warnedNodes.containsAll(dnUUIDs)) { DFSClient.LOG.warn(Arrays.toString(nodes) + " are unavailable and " + "all striping blocks on them are lost. " + - "IgnoredNodes = " + ignoredNodes); + "IgnoredNodes = {}", ignoredNodes); warnedNodes.addAll(dnUUIDs); } } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java index 8fd38bdb3b795..96cbb73a3896d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java @@ -353,10 +353,8 @@ void readStripe() throws IOException { StripingChunkReadResult r = StripedBlockUtil .getNextCompletedStripedRead(service, futures, 0); dfsStripedInputStream.updateReadStats(r.getReadStats()); - if (DFSClient.LOG.isDebugEnabled()) { - DFSClient.LOG.debug("Read task returned: " + r + ", for stripe " - + alignedStripe); - } + DFSClient.LOG.debug("Read task returned: {}, for stripe {}", + r, alignedStripe); StripingChunk returnedChunk = alignedStripe.chunks[r.index]; Preconditions.checkNotNull(returnedChunk); Preconditions.checkState(returnedChunk.state == StripingChunk.PENDING); From 3a4d05b850449c51a13f3a15fe0d756fdf50b4b2 Mon Sep 17 00:00:00 2001 From: Prabhu Joseph Date: Tue, 7 Jul 2020 18:02:29 +0530 Subject: [PATCH 091/131] YARN-8047. RMWebApp make external class pluggable. Contributed by Bilwa S T. --- .../hadoop/yarn/conf/YarnConfiguration.java | 6 +++ .../src/main/resources/yarn-default.xml | 20 +++++++ .../resourcemanager/webapp/RMWebApp.java | 11 ++++ .../resourcemanager/webapp/RmController.java | 53 +++++++++++++++++-- 4 files changed, 87 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 54e8888f0d2aa..156943c7f6b41 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -2390,6 +2390,12 @@ public static boolean isAclEnabled(Configuration conf) { public static final boolean DEFAULT_NM_DOCKER_ALLOW_HOST_PID_NAMESPACE = false; + public static final String YARN_HTTP_WEBAPP_EXTERNAL_CLASSES = + "yarn.http.rmwebapp.external.classes"; + + public static final String YARN_HTTP_WEBAPP_SCHEDULER_PAGE = + "hadoop.http.rmwebapp.scheduler.page.class"; + /** * Whether or not users are allowed to request that Docker containers honor * the debug deletion delay. This is useful for troubleshooting Docker diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 2f97a7cce7acd..1507296e14662 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -3330,6 +3330,26 @@ 20 + + + Used to specify custom web services for Resourcemanager. Value can be + classnames separated by comma. + Ex: org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices, + org.apache.hadoop.yarn.server.resourcemanager.webapp.DummyClass + + yarn.http.rmwebapp.external.classes + + + + + + Used to specify custom scheduler page + + hadoop.http.rmwebapp.scheduler.page.class + + + + The Node Label script to run. Script output Line starting with "NODE_PARTITION:" will be considered as Node Label Partition. In case of diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java index 316e7ed51959d..5075d2505635c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java @@ -55,6 +55,7 @@ public void setup() { bind(RMWebServices.class); bind(GenericExceptionHandler.class); bind(RMWebApp.class).toInstance(this); + bindExternalClasses(); if (rm != null) { bind(ResourceManager.class).toInstance(rm); @@ -97,6 +98,16 @@ public String getRedirectPath() { return super.getRedirectPath(); } + private void bindExternalClasses() { + YarnConfiguration yarnConf = new YarnConfiguration(rm.getConfig()); + Class[] externalClasses = yarnConf + .getClasses(YarnConfiguration.YARN_HTTP_WEBAPP_EXTERNAL_CLASSES); + for (Class c : externalClasses) { + bind(c); + } + } + + private String buildRedirectPath() { // make a copy of the original configuration so not to mutate it. Also use // an YarnConfiguration to force loading of yarn-site.xml. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java index a291e0548dbc6..e511d1122e021 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java @@ -21,13 +21,17 @@ import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.webapp.YarnWebParams.QUEUE_NAME; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; import org.apache.hadoop.yarn.util.StringHelper; import org.apache.hadoop.yarn.webapp.Controller; +import org.apache.hadoop.yarn.webapp.View; import org.apache.hadoop.yarn.webapp.YarnWebParams; import com.google.inject.Inject; @@ -92,9 +96,52 @@ public void scheduler() { render(FairSchedulerPage.class); return; } - - setTitle("Default Scheduler"); - render(DefaultSchedulerPage.class); + + if (rs instanceof FifoScheduler) { + setTitle("FIFO Scheduler"); + render(DefaultSchedulerPage.class); + return; + } + + renderOtherPluginScheduler(rm); + } + + private void renderOtherPluginScheduler(ResourceManager rm) { + ResourceScheduler rs = rm.getResourceScheduler(); + String schedulerName = rs.getClass().getSimpleName(); + + Class cls = PluginSchedulerPageHelper + .getPageClass(rm.getConfig()); + if (cls != null) { + setTitle(schedulerName); + render(cls); + } else { + LOG.warn( + "Render default scheduler page as scheduler page configured doesn't exist"); + setTitle("Default Scheduler"); + render(DefaultSchedulerPage.class); + } + } + + static class PluginSchedulerPageHelper { + private static boolean hasLoaded = false; + private static Class pageClass = null; + public static Class getPageClass(Configuration conf) { + if (!hasLoaded) { + loadPluginSchedulerPageClass(conf); + hasLoaded = true; + } + return pageClass; + } + + private static void loadPluginSchedulerPageClass(Configuration conf) { + Class configuredClass = conf + .getClass(YarnConfiguration.YARN_HTTP_WEBAPP_SCHEDULER_PAGE, null); + if (!View.class.isAssignableFrom(configuredClass)) { + return; + } + pageClass = (Class) configuredClass; + } } public void queue() { From 5b1ed2113b8e938ab2ff0fef7948148cb07e0457 Mon Sep 17 00:00:00 2001 From: Sebastian Nagel Date: Wed, 8 Jul 2020 17:03:15 +0200 Subject: [PATCH 092/131] HADOOP-17117 Fix typos in hadoop-aws documentation (#2127) --- .../tools/hadoop-aws/committer_architecture.md | 10 +++++----- .../src/site/markdown/tools/hadoop-aws/index.md | 16 ++++++++-------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md index 3071754836c53..30ee7b4e7a327 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md @@ -242,7 +242,7 @@ def commitTask(fs, jobAttemptPath, taskAttemptPath, dest): On a genuine filesystem this is an `O(1)` directory rename. -On an object store with a mimiced rename, it is `O(data)` for the copy, +On an object store with a mimicked rename, it is `O(data)` for the copy, along with overhead for listing and deleting all files (For S3, that's `(1 + files/500)` lists, and the same number of delete calls. @@ -476,7 +476,7 @@ def needsTaskCommit(fs, jobAttemptPath, taskAttemptPath, dest): def commitTask(fs, jobAttemptPath, taskAttemptPath, dest): if fs.exists(taskAttemptPath) : - mergePathsV2(fs. taskAttemptPath, dest) + mergePathsV2(fs, taskAttemptPath, dest) ``` ### v2 Task Abort @@ -903,7 +903,7 @@ not be a problem. IBM's [Stocator](https://github.com/SparkTC/stocator) can transform indirect writes of V1/V2 committers into direct writes to the destination directory. -Hpw does it do this? It's a special Hadoop `FileSystem` implementation which +How does it do this? It's a special Hadoop `FileSystem` implementation which recognizes writes to `_temporary` paths and translate them to writes to the base directory. As well as translating the write operation, it also supports a `getFileStatus()` call on the original path, returning details on the file @@ -969,7 +969,7 @@ It is that fact, that a different process may perform different parts of the upload, which make this algorithm viable. -## The Netfix "Staging" committer +## The Netflix "Staging" committer Ryan Blue, of Netflix, has submitted an alternate committer, one which has a number of appealing features @@ -1081,7 +1081,7 @@ output reaches the job commit. Similarly, if a task is aborted, temporary output on the local FS is removed. If a task dies while the committer is running, it is possible for data to be -eft on the local FS or as unfinished parts in S3. +left on the local FS or as unfinished parts in S3. Unfinished upload parts in S3 are not visible to table readers and are cleaned up following the rules in the target bucket's life-cycle policy. diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md index 22b98ed599c81..964bda49dd069 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md @@ -159,7 +159,7 @@ the number of files, during which time partial updates may be visible. If the operations are interrupted, the filesystem is left in an intermediate state. -### Warning #2: Directories are mimiced +### Warning #2: Directories are mimicked The S3A clients mimics directories by: @@ -184,7 +184,7 @@ Parts of Hadoop relying on this can have unexpected behaviour. E.g. the performance recursive listings whenever possible. * It is possible to create files under files if the caller tries hard. * The time to rename a directory is proportional to the number of files -underneath it (directory or indirectly) and the size of the files. (The copyis +underneath it (directory or indirectly) and the size of the files. (The copy is executed inside the S3 storage, so the time is independent of the bandwidth from client to S3). * Directory renames are not atomic: they can fail partway through, and callers @@ -320,7 +320,7 @@ export AWS_SECRET_ACCESS_KEY=my.secret.key If the environment variable `AWS_SESSION_TOKEN` is set, session authentication using "Temporary Security Credentials" is enabled; the Key ID and secret key -must be set to the credentials for that specific sesssion. +must be set to the credentials for that specific session. ```bash export AWS_SESSION_TOKEN=SECRET-SESSION-TOKEN @@ -534,7 +534,7 @@ This means that the default S3A authentication chain can be defined as to directly authenticate with S3 and DynamoDB services. When S3A Delegation tokens are enabled, depending upon the delegation token binding it may be used - to communicate wih the STS endpoint to request session/role + to communicate with the STS endpoint to request session/role credentials. These are loaded and queried in sequence for a valid set of credentials. @@ -630,13 +630,13 @@ The S3A configuration options with sensitive data and `fs.s3a.server-side-encryption.key`) can have their data saved to a binary file stored, with the values being read in when the S3A filesystem URL is used for data access. The reference to this -credential provider then declareed in the hadoop configuration. +credential provider then declared in the Hadoop configuration. For additional reading on the Hadoop Credential Provider API see: [Credential Provider API](../../../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html). -The following configuration options can be storeed in Hadoop Credential Provider +The following configuration options can be stored in Hadoop Credential Provider stores. ``` @@ -725,7 +725,7 @@ of credentials. ### Using secrets from credential providers -Once the provider is set in the Hadoop configuration, hadoop commands +Once the provider is set in the Hadoop configuration, Hadoop commands work exactly as if the secrets were in an XML file. ```bash @@ -761,7 +761,7 @@ used to change the endpoint, encryption and authentication mechanisms of buckets S3Guard options, various minor options. Here are the S3A properties for use in production. The S3Guard options are -documented in the [S3Guard documenents](./s3guard.html); some testing-related +documented in the [S3Guard documents](./s3guard.html); some testing-related options are covered in [Testing](./testing.md). ```xml From 10d218934c9bc143bf8578c92cdbd6df6a4d3b98 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 9 Jul 2020 13:59:47 +0900 Subject: [PATCH 093/131] YARN-10344. Sync netty versions in hadoop-yarn-csi. (#2126) --- .../hadoop-yarn/hadoop-yarn-csi/pom.xml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml index 3d86b6ba20088..ac6ef0b92e24b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml @@ -66,6 +66,18 @@ io.grpc grpc-netty ${grpc.version} + + + + io.netty + netty-codec-http2 + + + + io.netty + netty-handler-proxy + + junit From dfe60392c91be21f574c1659af22f5c381b2675a Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Thu, 9 Jul 2020 12:34:52 +0530 Subject: [PATCH 094/131] YARN-10341. Yarn Service Container Completed event doesn't get processed. Contributed by Bilwa S T. --- .../hadoop/yarn/service/ServiceScheduler.java | 2 +- .../hadoop/yarn/service/TestServiceAM.java | 88 +++++++++++++++++++ 2 files changed, 89 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java index 458a7a1c5c1e7..0d77479b95917 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java @@ -737,7 +737,7 @@ public void onContainersCompleted(List statuses) { LOG.warn( "Container {} Completed. No component instance exists. exitStatus={}. diagnostics={} ", containerId, status.getExitStatus(), status.getDiagnostics()); - return; + continue; } ComponentEvent event = new ComponentEvent(instance.getCompName(), CONTAINER_COMPLETED) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java index bbcbee246802c..5b961a838175b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java @@ -22,22 +22,29 @@ import org.apache.commons.io.FileUtils; import org.apache.curator.test.TestingCluster; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.ResourceTypeInfo; import org.apache.hadoop.yarn.client.api.AMRMClient; import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.Event; +import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.DockerCredentialTokenIdentifier; import org.apache.hadoop.yarn.service.api.records.Artifact; import org.apache.hadoop.yarn.service.api.records.Component; import org.apache.hadoop.yarn.service.api.records.ResourceInformation; import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ServiceState; import org.apache.hadoop.yarn.service.component.ComponentState; import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState; @@ -47,7 +54,9 @@ import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,6 +72,8 @@ import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM; import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; public class TestServiceAM extends ServiceTestUtils{ @@ -72,6 +83,9 @@ public class TestServiceAM extends ServiceTestUtils{ private File basedir; YarnConfiguration conf = new YarnConfiguration(); TestingCluster zkCluster; + @Rule + public ServiceTestUtils.ServiceFSWatcher rule = + new ServiceTestUtils.ServiceFSWatcher(); @Before public void setup() throws Exception { @@ -311,6 +325,80 @@ public void testScheduleWithMultipleResourceTypes() am.stop(); } + @Test + public void testContainerCompletedEventProcessed() throws Exception { + ServiceContext context = createServiceContext("abc"); + MockServiceScheduler scheduler = new MockServiceScheduler(context); + scheduler.init(conf); + ApplicationId appId = ApplicationId.newInstance(0, 0); + ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, + 1); + ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 0); + ContainerStatus containerStatus1 = ContainerStatus.newInstance(containerId1, + org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE, + "successful", 0); + ContainerId containerId2 = ContainerId.newContainerId(appAttemptId, 1); + ContainerStatus containerStatus2 = ContainerStatus.newInstance(containerId2, + org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE, + "successful", 0); + ComponentInstance instance = Mockito.mock(ComponentInstance.class); + Mockito.doReturn("componentInstance").when(instance).getCompName(); + scheduler.addLiveCompInstance(containerId2, instance); + List statuses = new ArrayList<>(); + // First container instance will be null + statuses.add(containerStatus1); + // Second container instance is added + scheduler.addLiveCompInstance(containerId2, instance); + statuses.add(containerStatus2); + scheduler.callbackHandler.onContainersCompleted(statuses); + // For second container event should be dispatched. + verify(scheduler.dispatcher, times(1)).getEventHandler(); + DefaultMetricsSystem.shutdown(); + } + + private ServiceContext createServiceContext(String name) + throws Exception { + Artifact artifact = new Artifact(); + artifact.setId("1"); + artifact.setType(Artifact.TypeEnum.TARBALL); + Service serviceDef = ServiceTestUtils.createExampleApplication(); + ApplicationId applicationId = ApplicationId.newInstance( + System.currentTimeMillis(), 1); + serviceDef.setId(applicationId.toString()); + serviceDef.setName(name); + serviceDef.setState(ServiceState.STARTED); + serviceDef.getComponents().forEach(component -> + component.setArtifact(artifact)); + ServiceContext context = new MockRunningServiceContext(rule, + serviceDef); + context.scheduler.getDispatcher().setDrainEventsOnStop(); + context.scheduler.getDispatcher().start(); + return context; + } + + class MockServiceScheduler extends ServiceScheduler { + private AsyncDispatcher dispatcher; + private AMRMClientCallback callbackHandler = new AMRMClientCallback(); + + MockServiceScheduler(ServiceContext context) { + super(context); + } + + @Override + protected AsyncDispatcher createAsyncDispatcher() { + dispatcher = Mockito.mock(AsyncDispatcher.class); + EventHandler handler = Mockito.mock(EventHandler.class); + Mockito.doReturn(handler).when(dispatcher).getEventHandler(); + return dispatcher; + } + + @Override + protected AMRMClientAsync createAMRMClient() { + return AMRMClientAsync.createAMRMClientAsync(1000, callbackHandler); + } + + } + @Test public void testRecordTokensForContainers() throws Exception { ApplicationId applicationId = ApplicationId.newInstance(123456, 1); From 5dd270e2085c8e8c3428287ed6f0c541a5548a31 Mon Sep 17 00:00:00 2001 From: Sunil G Date: Thu, 9 Jul 2020 12:50:25 +0530 Subject: [PATCH 095/131] YARN-10333. YarnClient obtain Delegation Token for Log Aggregation Path. Contributed by Prabhu Joseph. --- .../hadoop-yarn/hadoop-yarn-client/pom.xml | 13 ++ .../yarn/client/api/impl/YarnClientImpl.java | 56 ++++++++ .../client/api/impl/TestYarnClientImpl.java | 127 ++++++++++++++++++ 3 files changed, 196 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml index 15e0eaf8581ad..e5631189f791f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml @@ -38,6 +38,19 @@ provided + + org.apache.hadoop + hadoop-hdfs + test + + + + org.apache.hadoop + hadoop-hdfs + test-jar + test + + com.google.guava guava diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java index 14133ba4ecd1e..ebc4e761a5425 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java @@ -30,9 +30,12 @@ import java.util.Set; import java.util.concurrent.Future; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.DataInputByteBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; @@ -131,6 +134,8 @@ import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController; +import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; import org.apache.hadoop.yarn.util.ConverterUtils; @@ -314,6 +319,16 @@ public YarnClientApplication createApplication() addTimelineDelegationToken(appContext.getAMContainerSpec()); } + // Automatically add the DT for Log Aggregation path + // This is useful when a separate storage is used for log aggregation + try { + if (isSecurityEnabled()) { + addLogAggregationDelegationToken(appContext.getAMContainerSpec()); + } + } catch (Exception e) { + LOG.warn("Failed to obtain delegation token for Log Aggregation Path", e); + } + //TODO: YARN-1763:Handle RM failovers during the submitApplication call. rmClient.submitApplication(request); @@ -373,6 +388,47 @@ public YarnClientApplication createApplication() return applicationId; } + private void addLogAggregationDelegationToken( + ContainerLaunchContext clc) throws YarnException, IOException { + Credentials credentials = new Credentials(); + DataInputByteBuffer dibb = new DataInputByteBuffer(); + ByteBuffer tokens = clc.getTokens(); + if (tokens != null) { + dibb.reset(tokens); + credentials.readTokenStorageStream(dibb); + tokens.rewind(); + } + + Configuration conf = getConfig(); + String masterPrincipal = YarnClientUtils.getRmPrincipal(conf); + if (StringUtils.isEmpty(masterPrincipal)) { + throw new IOException( + "Can't get Master Kerberos principal for use as renewer"); + } + LOG.debug("Delegation Token Renewer: " + masterPrincipal); + + LogAggregationFileControllerFactory factory = + new LogAggregationFileControllerFactory(conf); + LogAggregationFileController fileController = + factory.getFileControllerForWrite(); + Path remoteRootLogDir = fileController.getRemoteRootLogDir(); + FileSystem fs = remoteRootLogDir.getFileSystem(conf); + + final org.apache.hadoop.security.token.Token[] finalTokens = + fs.addDelegationTokens(masterPrincipal, credentials); + if (finalTokens != null) { + for (org.apache.hadoop.security.token.Token token : finalTokens) { + LOG.info("Added delegation token for log aggregation path " + + remoteRootLogDir + "; "+token); + } + } + + DataOutputBuffer dob = new DataOutputBuffer(); + credentials.writeTokenStorageToStream(dob); + tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); + clc.setTokens(tokens); + } + private void addTimelineDelegationToken( ContainerLaunchContext clc) throws YarnException, IOException { Credentials credentials = new Credentials(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java index a6259a7be05cb..8446f9fbda377 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java @@ -20,6 +20,12 @@ import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.io.DataInputByteBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; @@ -46,12 +52,15 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Collection; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -63,6 +72,8 @@ */ public class TestYarnClientImpl extends ParameterizedSchedulerTestBase { + protected static final String YARN_RM = "yarn-rm@EXAMPLE.COM"; + public TestYarnClientImpl(SchedulerType type) throws IOException { super(type); } @@ -145,6 +156,122 @@ TimelineClient createTimelineClient() throws IOException, YarnException { } } + // Validates if YarnClientImpl automatically adds HDFS Delegation + // token for Log Aggregation Path in a cluster setup with fs.DefaultFS + // set to LocalFileSystem and Log Aggregation Path set to HDFS. + @Test + public void testAutomaitcLogAggregationDelegationToken() + throws Exception { + Configuration conf = getConf(); + SecurityUtil.setAuthenticationMethod( + UserGroupInformation.AuthenticationMethod.KERBEROS, conf); + conf.set(YarnConfiguration.RM_PRINCIPAL, YARN_RM); + String remoteRootLogPath = "/tmp/app-logs"; + + MiniDFSCluster hdfsCluster = null; + try { + // Step 1: Start a MiniDFSCluster for Log Aggregation Path + HdfsConfiguration hdfsConfig = new HdfsConfiguration(); + hdfsCluster = new MiniDFSCluster.Builder(hdfsConfig) + .numDataNodes(1).build(); + + Path remoteRootLogDir = new Path(remoteRootLogPath); + + FileSystem fs = hdfsCluster.getFileSystem(); + fs.mkdirs(remoteRootLogDir); + conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, + fs.getFileStatus(remoteRootLogDir).getPath().toString()); + + // Step 2: Prepare a Mock FileSystem which returns Delegation Token + // when YarnClientImpl invokes + DelegationTokenIdentifier hdfsDT = new DelegationTokenIdentifier(new Text( + "test"), new Text(YARN_RM), null); + final Token dToken = + new Token<>(hdfsDT.getBytes(), new byte[0], hdfsDT.getKind(), + new Text()); + + FileSystem mockFs = mock(FileSystem.class); + doAnswer(new Answer[]>() { + @Override + public Token[] answer(InvocationOnMock invocation) { + Object[] args = invocation.getArguments(); + ((Credentials) args[1]).addToken(hdfsDT.getKind(), dToken); + return new Token[]{dToken}; + } + }).when(mockFs).addDelegationTokens(any(), any()); + + FileSystemTestHelper.addFileSystemForTesting(fs.getUri(), + hdfsConfig, mockFs); + + // Step 3: Prepare a Mock YarnClientImpl + YarnClientImpl client = spy(new YarnClientImpl() { + + @Override + protected void serviceStart() { + rmClient = mock(ApplicationClientProtocol.class); + } + + @Override + protected void serviceStop() { + } + + @Override + public ApplicationReport getApplicationReport(ApplicationId appId) { + ApplicationReport report = mock(ApplicationReport.class); + when(report.getYarnApplicationState()) + .thenReturn(YarnApplicationState.RUNNING); + return report; + } + + @Override + public boolean isSecurityEnabled() { + return true; + } + }); + + client.init(conf); + client.start(); + + // Step 4: Prepare a ApplicationSubmissionContext and submit the app + ApplicationSubmissionContext context = + mock(ApplicationSubmissionContext.class); + ApplicationId applicationId = ApplicationId.newInstance(0, 1); + when(context.getApplicationId()).thenReturn(applicationId); + + DataOutputBuffer dob = new DataOutputBuffer(); + Credentials credentials = new Credentials(); + credentials.writeTokenStorageToStream(dob); + ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); + + ContainerLaunchContext clc = ContainerLaunchContext.newInstance( + null, null, null, null, tokens, null); + when(context.getAMContainerSpec()).thenReturn(clc); + + client.submitApplication(context); + + // Step 5: Verify automatic addition of HDFS DT for log aggregation path + credentials = new Credentials(); + DataInputByteBuffer dibb = new DataInputByteBuffer(); + tokens = clc.getTokens(); + if (tokens != null) { + dibb.reset(tokens); + credentials.readTokenStorageStream(dibb); + tokens.rewind(); + } + Collection> dTokens = + credentials.getAllTokens(); + Assert.assertEquals("Failed to place token for Log Aggregation Path", + 1, dTokens.size()); + Assert.assertEquals("Wrong Token for Log Aggregation", + hdfsDT.getKind(), dTokens.iterator().next().getKind()); + + } finally { + if (hdfsCluster != null) { + hdfsCluster.shutdown(); + } + } + } + @Test public void testAutomaticTimelineDelegationTokenLoading() throws Exception { From f91a8ad88b00b50231f1ae3f8820a25c963bb561 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Thu, 9 Jul 2020 11:33:37 -0700 Subject: [PATCH 096/131] HADOOP-17079. Optimize UGI#getGroups by adding UGI#getGroupsSet. (#2085) --- .../java/org/apache/hadoop/fs/FileSystem.java | 2 +- .../org/apache/hadoop/io/SecureIOUtils.java | 2 +- .../security/CompositeGroupsMapping.java | 24 ++++ .../security/GroupMappingServiceProvider.java | 10 ++ .../org/apache/hadoop/security/Groups.java | 97 +++++++++------ .../security/JniBasedUnixGroupsMapping.java | 17 ++- ...JniBasedUnixGroupsMappingWithFallback.java | 6 + ...UnixGroupsNetgroupMappingWithFallback.java | 6 + .../hadoop/security/LdapGroupsMapping.java | 114 +++++++++--------- .../hadoop/security/NullGroupsMapping.java | 15 +++ .../security/RuleBasedLdapGroupsMapping.java | 17 ++- .../security/ShellBasedUnixGroupsMapping.java | 51 ++++---- .../hadoop/security/UserGroupInformation.java | 52 +++++--- .../security/authorize/AccessControlList.java | 6 +- .../apache/hadoop/http/TestHttpServer.java | 9 ++ .../security/TestCompositeGroupMapping.java | 42 +++++-- .../hadoop/security/TestGroupsCaching.java | 23 +++- .../TestRuleBasedLdapGroupsMapping.java | 10 +- .../hadoop/fs/http/server/HttpFSServer.java | 3 +- .../org/apache/hadoop/lib/service/Groups.java | 3 + .../lib/service/security/GroupsService.java | 10 ++ .../fs/http/server/TestHttpFSServer.java | 7 ++ .../service/security/DummyGroupMapping.java | 15 +++ .../router/RouterPermissionChecker.java | 5 +- .../federation/store/records/MountTable.java | 2 +- ...erRefreshSuperUserGroupsConfiguration.java | 3 + .../router/TestRouterUserMappings.java | 19 +++ .../hadoop/hdfs/server/datanode/DataNode.java | 3 +- .../server/namenode/FSPermissionChecker.java | 2 +- .../security/TestRefreshUserMappings.java | 15 +++ .../v2/hs/server/TestHSAdminServer.java | 15 +++ .../v2/hs/webapp/TestHsWebServicesAcls.java | 6 + .../NetworkTagMappingJsonManager.java | 2 +- .../JavaSandboxLinuxContainerRuntime.java | 8 +- .../placement/PrimaryGroupPlacementRule.java | 10 +- .../SecondaryGroupExistingPlacementRule.java | 12 +- .../UserGroupMappingPlacementRule.java | 17 ++- .../resourcemanager/TestRMAdminService.java | 5 + .../scheduler/fair/PeriodGroupsMapping.java | 10 +- .../scheduler/fair/PrimaryGroupMapping.java | 7 ++ .../scheduler/fair/SimpleGroupsMapping.java | 8 ++ 41 files changed, 502 insertions(+), 188 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index abb31ed869591..8136993b6c78a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -2695,7 +2695,7 @@ static void checkAccessPermissions(FileStatus stat, FsAction mode) if (perm.getUserAction().implies(mode)) { return; } - } else if (ugi.getGroups().contains(stat.getGroup())) { + } else if (ugi.getGroupsSet().contains(stat.getGroup())) { if (perm.getGroupAction().implies(mode)) { return; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java index 9d3c3c1ceeaa7..f14d99227c7cc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java @@ -272,7 +272,7 @@ private static void checkStat(File f, String owner, String group, UserGroupInformation.createRemoteUser(expectedOwner); final String adminsGroupString = "Administrators"; success = owner.equals(adminsGroupString) - && ugi.getGroups().contains(adminsGroupString); + && ugi.getGroupsSet().contains(adminsGroupString); } else { success = false; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java index 5040de1e65056..6f799c1542095 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashSet; import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -106,6 +107,29 @@ public void cacheGroupsAdd(List groups) throws IOException { // does nothing in this provider of user to groups mapping } + @Override + public synchronized Set getGroupsSet(String user) throws IOException { + Set groupSet = new HashSet(); + + Set groups = null; + for (GroupMappingServiceProvider provider : providersList) { + try { + groups = provider.getGroupsSet(user); + } catch (Exception e) { + LOG.warn("Unable to get groups for user {} via {} because: {}", + user, provider.getClass().getSimpleName(), e.toString()); + LOG.debug("Stacktrace: ", e); + } + if (groups != null && !groups.isEmpty()) { + groupSet.addAll(groups); + if (!combined) { + break; + } + } + } + return groupSet; + } + @Override public synchronized Configuration getConf() { return conf; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java index 8b90f5bc7af9e..ff6c86d5febf3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -52,4 +53,13 @@ public interface GroupMappingServiceProvider { * @throws IOException */ public void cacheGroupsAdd(List groups) throws IOException; + + /** + * Get all various group memberships of a given user. + * Returns EMPTY set in case of non-existing user + * @param user User's name + * @return set of group memberships of user + * @throws IOException + */ + Set getGroupsSet(String user) throws IOException; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java index b29278bd20751..961ec7d591924 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java @@ -26,7 +26,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; @@ -78,8 +77,8 @@ public class Groups { private final GroupMappingServiceProvider impl; - private final LoadingCache> cache; - private final AtomicReference>> staticMapRef = + private final LoadingCache> cache; + private final AtomicReference>> staticMapRef = new AtomicReference<>(); private final long cacheTimeout; private final long negativeCacheTimeout; @@ -168,8 +167,7 @@ private void parseStaticMapping(Configuration conf) { CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT); Collection mappings = StringUtils.getStringCollection( staticMapping, ";"); - Map> staticUserToGroupsMap = - new HashMap>(); + Map> staticUserToGroupsMap = new HashMap<>(); for (String users : mappings) { Collection userToGroups = StringUtils.getStringCollection(users, "="); @@ -181,10 +179,10 @@ private void parseStaticMapping(Configuration conf) { String[] userToGroupsArray = userToGroups.toArray(new String[userToGroups .size()]); String user = userToGroupsArray[0]; - List groups = Collections.emptyList(); + Set groups = Collections.emptySet(); if (userToGroupsArray.length == 2) { - groups = (List) StringUtils - .getStringCollection(userToGroupsArray[1]); + groups = new LinkedHashSet(StringUtils + .getStringCollection(userToGroupsArray[1])); } staticUserToGroupsMap.put(user, groups); } @@ -203,15 +201,47 @@ private IOException noGroupsForUser(String user) { /** * Get the group memberships of a given user. * If the user's group is not cached, this method may block. + * Note this method can be expensive as it involves Set->List conversion. + * For user with large group membership (i.e., > 1000 groups), we recommend + * using getGroupSet to avoid the conversion and fast membership look up via + * contains(). * @param user User's name - * @return the group memberships of the user + * @return the group memberships of the user as list * @throws IOException if user does not exist + * @deprecated Use {@link #getGroupsSet(String user)} instead. */ + @Deprecated public List getGroups(final String user) throws IOException { + return Collections.unmodifiableList(new ArrayList<>( + getGroupInternal(user))); + } + + /** + * Get the group memberships of a given user. + * If the user's group is not cached, this method may block. + * This provide better performance when user has large group membership via + * 1) avoid set->list->set conversion for the caller UGI/PermissionCheck + * 2) fast lookup using contains() via Set instead of List + * @param user User's name + * @return the group memberships of the user as set + * @throws IOException if user does not exist + */ + public Set getGroupsSet(final String user) throws IOException { + return Collections.unmodifiableSet(getGroupInternal(user)); + } + + /** + * Get the group memberships of a given user. + * If the user's group is not cached, this method may block. + * @param user User's name + * @return the group memberships of the user as Set + * @throws IOException if user does not exist + */ + private Set getGroupInternal(final String user) throws IOException { // No need to lookup for groups of static users - Map> staticUserToGroupsMap = staticMapRef.get(); + Map> staticUserToGroupsMap = staticMapRef.get(); if (staticUserToGroupsMap != null) { - List staticMapping = staticUserToGroupsMap.get(user); + Set staticMapping = staticUserToGroupsMap.get(user); if (staticMapping != null) { return staticMapping; } @@ -267,7 +297,7 @@ public long read() { /** * Deals with loading data into the cache. */ - private class GroupCacheLoader extends CacheLoader> { + private class GroupCacheLoader extends CacheLoader> { private ListeningExecutorService executorService; @@ -308,7 +338,7 @@ private class GroupCacheLoader extends CacheLoader> { * @throws IOException to prevent caching negative entries */ @Override - public List load(String user) throws Exception { + public Set load(String user) throws Exception { LOG.debug("GroupCacheLoader - load."); TraceScope scope = null; Tracer tracer = Tracer.curThreadTracer(); @@ -316,9 +346,9 @@ public List load(String user) throws Exception { scope = tracer.newScope("Groups#fetchGroupList"); scope.addKVAnnotation("user", user); } - List groups = null; + Set groups = null; try { - groups = fetchGroupList(user); + groups = fetchGroupSet(user); } finally { if (scope != null) { scope.close(); @@ -334,9 +364,7 @@ public List load(String user) throws Exception { throw noGroupsForUser(user); } - // return immutable de-duped list - return Collections.unmodifiableList( - new ArrayList<>(new LinkedHashSet<>(groups))); + return groups; } /** @@ -345,8 +373,8 @@ public List load(String user) throws Exception { * implementation, otherwise is arranges for the cache to be updated later */ @Override - public ListenableFuture> reload(final String key, - List oldValue) + public ListenableFuture> reload(final String key, + Set oldValue) throws Exception { LOG.debug("GroupCacheLoader - reload (async)."); if (!reloadGroupsInBackground) { @@ -354,19 +382,16 @@ public ListenableFuture> reload(final String key, } backgroundRefreshQueued.incrementAndGet(); - ListenableFuture> listenableFuture = - executorService.submit(new Callable>() { - @Override - public List call() throws Exception { - backgroundRefreshQueued.decrementAndGet(); - backgroundRefreshRunning.incrementAndGet(); - List results = load(key); - return results; - } + ListenableFuture> listenableFuture = + executorService.submit(() -> { + backgroundRefreshQueued.decrementAndGet(); + backgroundRefreshRunning.incrementAndGet(); + Set results = load(key); + return results; }); - Futures.addCallback(listenableFuture, new FutureCallback>() { + Futures.addCallback(listenableFuture, new FutureCallback>() { @Override - public void onSuccess(List result) { + public void onSuccess(Set result) { backgroundRefreshSuccess.incrementAndGet(); backgroundRefreshRunning.decrementAndGet(); } @@ -380,11 +405,12 @@ public void onFailure(Throwable t) { } /** - * Queries impl for groups belonging to the user. This could involve I/O and take awhile. + * Queries impl for groups belonging to the user. + * This could involve I/O and take awhile. */ - private List fetchGroupList(String user) throws IOException { + private Set fetchGroupSet(String user) throws IOException { long startMs = timer.monotonicNow(); - List groupList = impl.getGroups(user); + Set groups = impl.getGroupsSet(user); long endMs = timer.monotonicNow(); long deltaMs = endMs - startMs ; UserGroupInformation.metrics.addGetGroups(deltaMs); @@ -392,8 +418,7 @@ private List fetchGroupList(String user) throws IOException { LOG.warn("Potential performance problem: getGroups(user=" + user +") " + "took " + deltaMs + " milliseconds."); } - - return groupList; + return groups; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java index a0f6142a3c5c7..6c24427f3e50e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java @@ -20,8 +20,11 @@ import java.io.IOException; import java.util.Arrays; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; +import org.apache.commons.collections.CollectionUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -75,6 +78,18 @@ static private void logError(int groupId, String error) { @Override public List getGroups(String user) throws IOException { + return Arrays.asList(getGroupsInternal(user)); + } + + @Override + public Set getGroupsSet(String user) throws IOException { + String[] groups = getGroupsInternal(user); + Set result = new LinkedHashSet(groups.length); + CollectionUtils.addAll(result, groups); + return result; + } + + private String[] getGroupsInternal(String user) throws IOException { String[] groups = new String[0]; try { groups = getGroupsForUser(user); @@ -85,7 +100,7 @@ public List getGroups(String user) throws IOException { LOG.info("Error getting groups for " + user + ": " + e.getMessage()); } } - return Arrays.asList(groups); + return groups; } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java index f1644305d917e..cc47df1462678 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.PerformanceAdvisory; @@ -61,4 +62,9 @@ public void cacheGroupsAdd(List groups) throws IOException { impl.cacheGroupsAdd(groups); } + @Override + public Set getGroupsSet(String user) throws IOException { + return impl.getGroupsSet(user); + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java index fcc47cb796f33..3d4bd588a5344 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; import org.apache.hadoop.util.NativeCodeLoader; import org.slf4j.Logger; @@ -60,4 +61,9 @@ public void cacheGroupsAdd(List groups) throws IOException { impl.cacheGroupsAdd(groups); } + @Override + public Set getGroupsSet(String user) throws IOException { + return impl.getGroupsSet(user); + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java index 7c53948cc1f98..3f656990517af 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java @@ -33,6 +33,7 @@ import java.util.Collections; import java.util.Hashtable; import java.util.Iterator; +import java.util.LinkedHashSet; import java.util.List; import java.util.HashSet; import java.util.Collection; @@ -302,12 +303,12 @@ public class LdapGroupsMapping } private DirContext ctx; - private Configuration conf; + private volatile Configuration conf; - private Iterator ldapUrls; + private volatile Iterator ldapUrls; private String currentLdapUrl; - private boolean useSsl; + private volatile boolean useSsl; private String keystore; private String keystorePass; private String truststore; @@ -320,21 +321,21 @@ public class LdapGroupsMapping private Iterator bindUsers; private BindUserInfo currentBindUser; - private String userbaseDN; + private volatile String userbaseDN; private String groupbaseDN; private String groupSearchFilter; - private String userSearchFilter; - private String memberOfAttr; + private volatile String userSearchFilter; + private volatile String memberOfAttr; private String groupMemberAttr; - private String groupNameAttr; - private int groupHierarchyLevels; - private String posixUidAttr; - private String posixGidAttr; + private volatile String groupNameAttr; + private volatile int groupHierarchyLevels; + private volatile String posixUidAttr; + private volatile String posixGidAttr; private boolean isPosix; - private boolean useOneQuery; + private volatile boolean useOneQuery; private int numAttempts; - private int numAttemptsBeforeFailover; - private String ldapCtxFactoryClassName; + private volatile int numAttemptsBeforeFailover; + private volatile String ldapCtxFactoryClassName; /** * Returns list of groups for a user. @@ -348,38 +349,7 @@ public class LdapGroupsMapping */ @Override public synchronized List getGroups(String user) { - /* - * Normal garbage collection takes care of removing Context instances when - * they are no longer in use. Connections used by Context instances being - * garbage collected will be closed automatically. So in case connection is - * closed and gets CommunicationException, retry some times with new new - * DirContext/connection. - */ - - // Tracks the number of attempts made using the same LDAP server - int atemptsBeforeFailover = 1; - - for (int attempt = 1; attempt <= numAttempts; attempt++, - atemptsBeforeFailover++) { - try { - return doGetGroups(user, groupHierarchyLevels); - } catch (AuthenticationException e) { - switchBindUser(e); - } catch (NamingException e) { - LOG.warn("Failed to get groups for user {} (attempt={}/{}) using {}. " + - "Exception: ", user, attempt, numAttempts, currentLdapUrl, e); - LOG.trace("TRACE", e); - - if (failover(atemptsBeforeFailover, numAttemptsBeforeFailover)) { - atemptsBeforeFailover = 0; - } - } - - // Reset ctx so that new DirContext can be created with new connection - this.ctx = null; - } - - return Collections.emptyList(); + return new ArrayList<>(getGroupsSet(user)); } /** @@ -458,10 +428,10 @@ private NamingEnumeration lookupPosixGroup(SearchResult result, * @return a list of strings representing group names of the user. * @throws NamingException if unable to find group names */ - private List lookupGroup(SearchResult result, DirContext c, + private Set lookupGroup(SearchResult result, DirContext c, int goUpHierarchy) throws NamingException { - List groups = new ArrayList<>(); + Set groups = new LinkedHashSet<>(); Set groupDNs = new HashSet<>(); NamingEnumeration groupResults; @@ -484,11 +454,7 @@ private List lookupGroup(SearchResult result, DirContext c, getGroupNames(groupResult, groups, groupDNs, goUpHierarchy > 0); } if (goUpHierarchy > 0 && !isPosix) { - // convert groups to a set to ensure uniqueness - Set groupset = new HashSet<>(groups); - goUpGroupHierarchy(groupDNs, goUpHierarchy, groupset); - // convert set back to list for compatibility - groups = new ArrayList<>(groupset); + goUpGroupHierarchy(groupDNs, goUpHierarchy, groups); } } return groups; @@ -507,7 +473,7 @@ private List lookupGroup(SearchResult result, DirContext c, * return an empty string array. * @throws NamingException if unable to get group names */ - List doGetGroups(String user, int goUpHierarchy) + Set doGetGroups(String user, int goUpHierarchy) throws NamingException { DirContext c = getDirContext(); @@ -518,11 +484,11 @@ List doGetGroups(String user, int goUpHierarchy) if (!results.hasMoreElements()) { LOG.debug("doGetGroups({}) returned no groups because the " + "user is not found.", user); - return Collections.emptyList(); + return Collections.emptySet(); } SearchResult result = results.nextElement(); - List groups = Collections.emptyList(); + Set groups = Collections.emptySet(); if (useOneQuery) { try { /** @@ -536,7 +502,7 @@ List doGetGroups(String user, int goUpHierarchy) memberOfAttr + "' attribute." + "Returned user object: " + result.toString()); } - groups = new ArrayList<>(); + groups = new LinkedHashSet<>(); NamingEnumeration groupEnumeration = groupDNAttr.getAll(); while (groupEnumeration.hasMore()) { String groupDN = groupEnumeration.next().toString(); @@ -700,6 +666,42 @@ public void cacheGroupsAdd(List groups) { // does nothing in this provider of user to groups mapping } + @Override + public Set getGroupsSet(String user) { + /* + * Normal garbage collection takes care of removing Context instances when + * they are no longer in use. Connections used by Context instances being + * garbage collected will be closed automatically. So in case connection is + * closed and gets CommunicationException, retry some times with new new + * DirContext/connection. + */ + + // Tracks the number of attempts made using the same LDAP server + int atemptsBeforeFailover = 1; + + for (int attempt = 1; attempt <= numAttempts; attempt++, + atemptsBeforeFailover++) { + try { + return doGetGroups(user, groupHierarchyLevels); + } catch (AuthenticationException e) { + switchBindUser(e); + } catch (NamingException e) { + LOG.warn("Failed to get groups for user {} (attempt={}/{}) using {}. " + + "Exception: ", user, attempt, numAttempts, currentLdapUrl, e); + LOG.trace("TRACE", e); + + if (failover(atemptsBeforeFailover, numAttemptsBeforeFailover)) { + atemptsBeforeFailover = 0; + } + } + + // Reset ctx so that new DirContext can be created with new connection + this.ctx = null; + } + + return Collections.emptySet(); + } + @Override public synchronized Configuration getConf() { return conf; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NullGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NullGroupsMapping.java index f3d048daf990a..9592ecc32c012 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NullGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NullGroupsMapping.java @@ -15,8 +15,10 @@ */ package org.apache.hadoop.security; +import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Set; /** * This class provides groups mapping for {@link UserGroupInformation} when the @@ -31,6 +33,19 @@ public class NullGroupsMapping implements GroupMappingServiceProvider { public void cacheGroupsAdd(List groups) { } + /** + * Get all various group memberships of a given user. + * Returns EMPTY set in case of non-existing user + * + * @param user User's name + * @return set of group memberships of user + * @throws IOException + */ + @Override + public Set getGroupsSet(String user) throws IOException { + return Collections.emptySet(); + } + /** * Returns an empty list. * @param user ignored diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java index 6accf2fdced02..5fadcc3ced58b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.security; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -25,7 +24,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; import java.util.stream.Collectors; /** @@ -88,4 +89,18 @@ public synchronized List getGroups(String user) { } } + public synchronized Set getGroupsSet(String user) { + Set groups = super.getGroupsSet(user); + switch (rule) { + case TO_UPPER: + return groups.stream().map(StringUtils::toUpperCase).collect( + Collectors.toCollection(LinkedHashSet::new)); + case TO_LOWER: + return groups.stream().map(StringUtils::toLowerCase).collect( + Collectors.toCollection(LinkedHashSet::new)); + case NONE: + default: + return groups; + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java index 31f43980552f2..f3432a6f91762 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java @@ -18,8 +18,11 @@ package org.apache.hadoop.security; import java.io.IOException; -import java.util.LinkedList; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; import java.util.StringTokenizer; import java.util.concurrent.TimeUnit; @@ -53,7 +56,7 @@ public class ShellBasedUnixGroupsMapping extends Configured private long timeout = CommonConfigurationKeys. HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT; - private static final List EMPTY_GROUPS = new LinkedList<>(); + private static final Set EMPTY_GROUPS_SET = Collections.emptySet(); @Override public void setConf(Configuration conf) { @@ -94,7 +97,7 @@ public String toString() { */ @Override public List getGroups(String userName) throws IOException { - return getUnixGroups(userName); + return new ArrayList(getUnixGroups(userName)); } /** @@ -115,6 +118,11 @@ public void cacheGroupsAdd(List groups) throws IOException { // does nothing in this provider of user to groups mapping } + @Override + public Set getGroupsSet(String userName) throws IOException { + return getUnixGroups(userName); + } + /** * Create a ShellCommandExecutor object using the user's name. * @@ -192,44 +200,33 @@ private boolean handleExecutorTimeout( * group is returned first. * @throws IOException if encounter any error when running the command */ - private List getUnixGroups(String user) throws IOException { + private Set getUnixGroups(String user) throws IOException { ShellCommandExecutor executor = createGroupExecutor(user); - List groups; + Set groups; try { executor.execute(); groups = resolveFullGroupNames(executor.getOutput()); } catch (ExitCodeException e) { if (handleExecutorTimeout(executor, user)) { - return EMPTY_GROUPS; + return EMPTY_GROUPS_SET; } else { try { groups = resolvePartialGroupNames(user, e.getMessage(), executor.getOutput()); } catch (PartialGroupNameException pge) { LOG.warn("unable to return groups for user {}", user, pge); - return EMPTY_GROUPS; + return EMPTY_GROUPS_SET; } } } catch (IOException ioe) { if (handleExecutorTimeout(executor, user)) { - return EMPTY_GROUPS; + return EMPTY_GROUPS_SET; } else { // If its not an executor timeout, we should let the caller handle it throw ioe; } } - - // remove duplicated primary group - if (!Shell.WINDOWS) { - for (int i = 1; i < groups.size(); i++) { - if (groups.get(i).equals(groups.get(0))) { - groups.remove(i); - break; - } - } - } - return groups; } @@ -242,13 +239,13 @@ private List getUnixGroups(String user) throws IOException { * @return a linked list of group names * @throws PartialGroupNameException */ - private List parsePartialGroupNames(String groupNames, + private Set parsePartialGroupNames(String groupNames, String groupIDs) throws PartialGroupNameException { StringTokenizer nameTokenizer = new StringTokenizer(groupNames, Shell.TOKEN_SEPARATOR_REGEX); StringTokenizer idTokenizer = new StringTokenizer(groupIDs, Shell.TOKEN_SEPARATOR_REGEX); - List groups = new LinkedList(); + Set groups = new LinkedHashSet<>(); while (nameTokenizer.hasMoreTokens()) { // check for unresolvable group names. if (!idTokenizer.hasMoreTokens()) { @@ -277,10 +274,10 @@ private List parsePartialGroupNames(String groupNames, * @param userName the user's name * @param errMessage error message from the shell command * @param groupNames the incomplete list of group names - * @return a list of resolved group names + * @return a set of resolved group names * @throws PartialGroupNameException if the resolution fails or times out */ - private List resolvePartialGroupNames(String userName, + private Set resolvePartialGroupNames(String userName, String errMessage, String groupNames) throws PartialGroupNameException { // Exception may indicate that some group names are not resolvable. // Shell-based implementation should tolerate unresolvable groups names, @@ -322,16 +319,16 @@ private List resolvePartialGroupNames(String userName, } /** - * Split group names into a linked list. + * Split group names into a set. * * @param groupNames a string representing the user's group names - * @return a linked list of group names + * @return a set of group names */ @VisibleForTesting - protected List resolveFullGroupNames(String groupNames) { + protected Set resolveFullGroupNames(String groupNames) { StringTokenizer tokenizer = new StringTokenizer(groupNames, Shell.TOKEN_SEPARATOR_REGEX); - List groups = new LinkedList(); + Set groups = new LinkedHashSet<>(); while (tokenizer.hasMoreTokens()) { groups.add(tokenizer.nextToken()); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 8c84a8d31a063..5269e5a33061a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -40,7 +40,6 @@ import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.EnumMap; @@ -1483,8 +1482,8 @@ public UserGroupInformation getRealUser() { * map that has the translation of usernames to groups. */ private static class TestingGroups extends Groups { - private final Map> userToGroupsMapping = - new HashMap>(); + private final Map> userToGroupsMapping = + new HashMap<>(); private Groups underlyingImplementation; private TestingGroups(Groups underlyingImplementation) { @@ -1494,17 +1493,22 @@ private TestingGroups(Groups underlyingImplementation) { @Override public List getGroups(String user) throws IOException { - List result = userToGroupsMapping.get(user); - + return new ArrayList<>(getGroupsSet(user)); + } + + @Override + public Set getGroupsSet(String user) throws IOException { + Set result = userToGroupsMapping.get(user); if (result == null) { - result = underlyingImplementation.getGroups(user); + result = underlyingImplementation.getGroupsSet(user); } - return result; } private void setUserGroups(String user, String[] groups) { - userToGroupsMapping.put(user, Arrays.asList(groups)); + Set groupsSet = new LinkedHashSet<>(); + Collections.addAll(groupsSet, groups); + userToGroupsMapping.put(user, groupsSet); } } @@ -1563,11 +1567,11 @@ public String getShortUserName() { } public String getPrimaryGroupName() throws IOException { - List groups = getGroups(); - if (groups.isEmpty()) { + Set groupsSet = getGroupsSet(); + if (groupsSet.isEmpty()) { throw new IOException("There is no primary group for UGI " + this); } - return groups.get(0); + return groupsSet.iterator().next(); } /** @@ -1680,21 +1684,24 @@ private synchronized Credentials getCredentialsInternal() { } /** - * Get the group names for this user. {@link #getGroups()} is less + * Get the group names for this user. {@link #getGroupsSet()} is less * expensive alternative when checking for a contained element. * @return the list of users with the primary group first. If the command * fails, it returns an empty list. */ public String[] getGroupNames() { - List groups = getGroups(); - return groups.toArray(new String[groups.size()]); + Collection groupsSet = getGroupsSet(); + return groupsSet.toArray(new String[groupsSet.size()]); } /** - * Get the group names for this user. + * Get the group names for this user. {@link #getGroupsSet()} is less + * expensive alternative when checking for a contained element. * @return the list of users with the primary group first. If the command * fails, it returns an empty list. + * @deprecated Use {@link #getGroupsSet()} instead. */ + @Deprecated public List getGroups() { ensureInitialized(); try { @@ -1705,6 +1712,21 @@ public List getGroups() { } } + /** + * Get the groups names for the user as a Set. + * @return the set of users with the primary group first. If the command + * fails, it returns an empty set. + */ + public Set getGroupsSet() { + ensureInitialized(); + try { + return groups.getGroupsSet(getShortUserName()); + } catch (IOException ie) { + LOG.debug("Failed to get groups for user {}", getShortUserName(), ie); + return Collections.emptySet(); + } + } + /** * Return the username. */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java index 8af47d6e9d5e9..e86d918b05504 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java @@ -24,6 +24,7 @@ import java.util.HashSet; import java.util.LinkedList; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -231,8 +232,9 @@ public final boolean isUserInList(UserGroupInformation ugi) { if (allAllowed || users.contains(ugi.getShortUserName())) { return true; } else if (!groups.isEmpty()) { - for (String group : ugi.getGroups()) { - if (groups.contains(group)) { + Set ugiGroups = ugi.getGroupsSet(); + for (String group : groups) { + if (ugiGroups.contains(group)) { return true; } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java index e0c87e93a9ac0..ad9617dca79de 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java @@ -62,8 +62,10 @@ import java.util.Arrays; import java.util.Enumeration; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import java.util.concurrent.CountDownLatch; @@ -410,6 +412,13 @@ static void clearMapping() { public List getGroups(String user) throws IOException { return mapping.get(user); } + + @Override + public Set getGroupsSet(String user) throws IOException { + Set result = new HashSet(); + result.addAll(mapping.get(user)); + return result; + } } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java index 0a2d42c27329a..1803fb1a05806 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java @@ -22,7 +22,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.List; +import java.util.Set; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; @@ -87,13 +89,22 @@ public void cacheGroupsRefresh() throws IOException { public void cacheGroupsAdd(List groups) throws IOException { } - + protected List toList(String group) { if (group != null) { return Arrays.asList(new String[] {group}); } return new ArrayList(); } + + protected Set toSet(String group) { + if (group != null) { + Set result = new HashSet<>(); + result.add(group); + return result; + } + return new HashSet(); + } protected void checkTestConf(String expectedValue) { String configValue = getConf().get(PROVIDER_SPECIFIC_CONF_KEY); @@ -106,32 +117,49 @@ protected void checkTestConf(String expectedValue) { private static class UserProvider extends GroupMappingProviderBase { @Override public List getGroups(String user) throws IOException { + return toList(getGroupInternal(user)); + } + + @Override + public Set getGroupsSet(String user) throws IOException { + return toSet(getGroupInternal(user)); + } + + private String getGroupInternal(String user) throws IOException { checkTestConf(PROVIDER_SPECIFIC_CONF_VALUE_FOR_USER); - + String group = null; if (user.equals(john.name)) { group = john.group; } else if (user.equals(jack.name)) { group = jack.group; } - - return toList(group); + return group; } } private static class ClusterProvider extends GroupMappingProviderBase { @Override public List getGroups(String user) throws IOException { + return toList(getGroupsInternal(user)); + } + + @Override + public Set getGroupsSet(String user) throws IOException { + return toSet(getGroupsInternal(user)); + } + + private String getGroupsInternal(String user) throws IOException { checkTestConf(PROVIDER_SPECIFIC_CONF_VALUE_FOR_CLUSTER); - + String group = null; if (user.equals(hdfs.name)) { group = hdfs.group; } else if (user.equals(jack.name)) { // jack has another group from clusterProvider group = jack.group2; } - - return toList(group); + return group; + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java index 46e9f92258502..7620068cf67e4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java @@ -21,9 +21,9 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; -import java.util.LinkedList; import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -75,7 +75,7 @@ public static class FakeGroupMapping extends ShellBasedUnixGroupsMapping { private static volatile CountDownLatch latch = null; @Override - public List getGroups(String user) throws IOException { + public Set getGroupsSet(String user) throws IOException { TESTLOG.info("Getting groups for " + user); delayIfNecessary(); @@ -86,9 +86,14 @@ public List getGroups(String user) throws IOException { } if (blackList.contains(user)) { - return new LinkedList(); + return Collections.emptySet(); } - return new LinkedList(allGroups); + return new LinkedHashSet<>(allGroups); + } + + @Override + public List getGroups(String user) throws IOException { + return new ArrayList<>(getGroupsSet(user)); } /** @@ -129,7 +134,7 @@ public static void clearAll() throws IOException { TESTLOG.info("Resetting FakeGroupMapping"); blackList.clear(); allGroups.clear(); - requestCount = 0; + resetRequestCount(); getGroupsDelayMs = 0; throwException = false; latch = null; @@ -197,6 +202,12 @@ public List getGroups(String user) throws IOException { throw new IOException("For test"); } + @Override + public Set getGroupsSet(String user) throws IOException { + requestCount++; + throw new IOException("For test"); + } + public static int getRequestCount() { return requestCount; } @@ -550,7 +561,7 @@ public void testExceptionOnBackgroundRefreshHandled() throws Exception { FakeGroupMapping.clearBlackList(); // We make an initial request to populate the cache - groups.getGroups("me"); + List g1 = groups.getGroups("me"); // add another group groups.cacheGroupsAdd(Arrays.asList("grp3")); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestRuleBasedLdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestRuleBasedLdapGroupsMapping.java index cd04ae09e3148..8862fd7b60984 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestRuleBasedLdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestRuleBasedLdapGroupsMapping.java @@ -24,7 +24,9 @@ import javax.naming.NamingException; import java.util.ArrayList; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; import static org.apache.hadoop.security.RuleBasedLdapGroupsMapping .CONVERSION_RULE_KEY; @@ -40,7 +42,7 @@ public class TestRuleBasedLdapGroupsMapping { public void testGetGroupsToUpper() throws NamingException { RuleBasedLdapGroupsMapping groupsMapping = Mockito.spy( new RuleBasedLdapGroupsMapping()); - List groups = new ArrayList<>(); + Set groups = new LinkedHashSet<>(); groups.add("group1"); groups.add("group2"); Mockito.doReturn(groups).when((LdapGroupsMapping) groupsMapping) @@ -61,7 +63,7 @@ public void testGetGroupsToUpper() throws NamingException { public void testGetGroupsToLower() throws NamingException { RuleBasedLdapGroupsMapping groupsMapping = Mockito.spy( new RuleBasedLdapGroupsMapping()); - List groups = new ArrayList<>(); + Set groups = new LinkedHashSet<>(); groups.add("GROUP1"); groups.add("GROUP2"); Mockito.doReturn(groups).when((LdapGroupsMapping) groupsMapping) @@ -82,7 +84,7 @@ public void testGetGroupsToLower() throws NamingException { public void testGetGroupsInvalidRule() throws NamingException { RuleBasedLdapGroupsMapping groupsMapping = Mockito.spy( new RuleBasedLdapGroupsMapping()); - List groups = new ArrayList<>(); + Set groups = new LinkedHashSet<>(); groups.add("group1"); groups.add("GROUP2"); Mockito.doReturn(groups).when((LdapGroupsMapping) groupsMapping) @@ -93,7 +95,7 @@ public void testGetGroupsInvalidRule() throws NamingException { conf.set(CONVERSION_RULE_KEY, "none"); groupsMapping.setConf(conf); - Assert.assertEquals(groups, groupsMapping.getGroups("admin")); + Assert.assertEquals(groups, groupsMapping.getGroupsSet("admin")); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index 17be09ea1f331..bae9dd19b4053 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -96,6 +96,7 @@ import java.util.EnumSet; import java.util.List; import java.util.Map; +import java.util.Set; /** * Main class of HttpFSServer server. @@ -288,7 +289,7 @@ public InputStream run() throws Exception { case INSTRUMENTATION: { enforceRootPath(op.value(), path); Groups groups = HttpFSServerWebApp.get().get(Groups.class); - List userGroups = groups.getGroups(user.getShortUserName()); + Set userGroups = groups.getGroupsSet(user.getShortUserName()); if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { throw new AccessControlException( "User not in HttpFSServer admin group"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Groups.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Groups.java index 90733f9cdc7e4..2cc942f8e03e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Groups.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Groups.java @@ -22,10 +22,13 @@ import java.io.IOException; import java.util.List; +import java.util.Set; @InterfaceAudience.Private public interface Groups { public List getGroups(String user) throws IOException; + Set getGroupsSet(String user) throws IOException; + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/GroupsService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/GroupsService.java index 560a3ccf6ebe4..8de0630c9b11b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/GroupsService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/GroupsService.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; @InterfaceAudience.Private public class GroupsService extends BaseService implements Groups { @@ -50,9 +51,18 @@ public Class getInterface() { return Groups.class; } + /** + * @deprecated use {@link #getGroupsSet(String user)} + */ + @Deprecated @Override public List getGroups(String user) throws IOException { return hGroups.getGroups(user); } + @Override + public Set getGroupsSet(String user) throws IOException { + return hGroups.getGroupsSet(user); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java index a5bbb92f2153b..6739393924e5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -60,9 +60,11 @@ import java.text.MessageFormat; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; @@ -170,6 +172,11 @@ public List getGroups(String user) throws IOException { return Arrays.asList(HadoopUsersConfTestHelper.getHadoopUserGroups(user)); } + @Override + public Set getGroupsSet(String user) throws IOException { + return new HashSet<>(getGroups(user)); + } + } private Configuration createHttpFSConf(boolean addDelegationTokenAuthHandler, diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java index 9ef786db2d3c0..2693deff7d93a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java @@ -21,7 +21,9 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Set; +import com.google.common.collect.Sets; import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.test.HadoopUsersConfTestHelper; @@ -47,4 +49,17 @@ public void cacheGroupsRefresh() throws IOException { @Override public void cacheGroupsAdd(List groups) throws IOException { } + + @Override + public Set getGroupsSet(String user) throws IOException { + if (user.equals("root")) { + return Sets.newHashSet("admin"); + } else if (user.equals("nobody")) { + return Sets.newHashSet("nobody"); + } else { + String[] groups = HadoopUsersConfTestHelper.getHadoopUserGroups(user); + return (groups != null) ? Sets.newHashSet(groups) : + Collections.emptySet(); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java index b1e68b6de8324..23a3c6e759e7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs.server.federation.router; import java.io.IOException; -import java.util.Arrays; -import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -126,8 +124,7 @@ public void checkSuperuserPrivilege() throws AccessControlException { } // Is the user a member of the super group? - List groups = ugi.getGroups(); - if (groups.contains(superGroup)) { + if (ugi.getGroupsSet().contains(superGroup)) { return; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java index d1351a340c3cf..282fe6cbb53e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java @@ -149,7 +149,7 @@ public static MountTable newInstance(final String src, // Set permission fields UserGroupInformation ugi = NameNode.getRemoteUser(); record.setOwnerName(ugi.getShortUserName()); - String group = ugi.getGroups().isEmpty() ? ugi.getShortUserName() + String group = ugi.getGroupsSet().isEmpty() ? ugi.getShortUserName() : ugi.getPrimaryGroupName(); record.setGroupName(group); record.setMode(new FsPermission( diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRefreshSuperUserGroupsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRefreshSuperUserGroupsConfiguration.java index fb88882243fed..62fcf31cee60d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRefreshSuperUserGroupsConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRefreshSuperUserGroupsConfiguration.java @@ -45,6 +45,7 @@ import java.net.URLDecoder; import java.util.ArrayList; import java.util.Arrays; +import java.util.LinkedHashSet; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; @@ -135,6 +136,8 @@ private void testRefreshSuperUserGroupsConfigurationInternal( when(ugi.getRealUser()).thenReturn(impersonator); when(ugi.getUserName()).thenReturn("victim"); when(ugi.getGroups()).thenReturn(Arrays.asList("groupVictim")); + when(ugi.getGroupsSet()).thenReturn(new LinkedHashSet<>(Arrays.asList( + "groupVictim"))); // Exception should be thrown before applying config LambdaTestUtils.intercept( diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java index dc7ebbf0d3475..19d1c436bc827 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.federation.router; +import com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; @@ -56,7 +57,9 @@ import java.net.URL; import java.net.URLDecoder; import java.util.ArrayList; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; import java.util.concurrent.TimeUnit; import static org.junit.Assert.assertArrayEquals; @@ -111,6 +114,16 @@ public void cacheGroupsRefresh() throws IOException { @Override public void cacheGroupsAdd(List groups) throws IOException { } + + @Override + public Set getGroupsSet(String user) throws IOException { + LOG.info("Getting groups in MockUnixGroupsMapping"); + String g1 = user + (10 * i + 1); + String g2 = user + (10 * i + 2); + Set s = Sets.newHashSet(g1, g2); + i++; + return s; + } } @Before @@ -191,6 +204,10 @@ private void testRefreshSuperUserGroupsConfigurationInternal( final List groupNames2 = new ArrayList<>(); groupNames2.add("gr3"); groupNames2.add("gr4"); + final Set groupNamesSet1 = new LinkedHashSet<>(); + groupNamesSet1.addAll(groupNames1); + final Set groupNamesSet2 = new LinkedHashSet<>(); + groupNamesSet2.addAll(groupNames2); //keys in conf String userKeyGroups = DefaultImpersonationProvider.getTestProvider(). @@ -222,6 +239,8 @@ private void testRefreshSuperUserGroupsConfigurationInternal( // set groups for users when(ugi1.getGroups()).thenReturn(groupNames1); when(ugi2.getGroups()).thenReturn(groupNames2); + when(ugi1.getGroupsSet()).thenReturn(groupNamesSet1); + when(ugi2.getGroupsSet()).thenReturn(groupNamesSet2); // check before refresh LambdaTestUtils.intercept(AuthorizationException.class, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index e242cc826dbe3..7f740c39d8cf2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -1082,8 +1082,7 @@ private void checkSuperuserPrivilege() throws IOException, AccessControlExceptio } // Is the user a member of the super group? - List groups = callerUgi.getGroups(); - if (groups.contains(supergroup)) { + if (callerUgi.getGroupsSet().contains(supergroup)) { return; } // Not a superuser. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index d60098273d738..93fefeea317d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -103,7 +103,7 @@ protected FSPermissionChecker(String fsOwner, String supergroup, this.fsOwner = fsOwner; this.supergroup = supergroup; this.callerUgi = callerUgi; - this.groups = callerUgi.getGroups(); + this.groups = callerUgi.getGroupsSet(); user = callerUgi.getShortUserName(); isSuper = user.equals(fsOwner) || groups.contains(supergroup); this.attributeProvider = attributeProvider; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java index 2d7410a405cc9..5c026d7d77bee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java @@ -34,8 +34,11 @@ import java.net.URL; import java.net.URLDecoder; import java.util.ArrayList; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; +import com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -84,6 +87,16 @@ public void cacheGroupsRefresh() throws IOException { @Override public void cacheGroupsAdd(List groups) throws IOException { } + + @Override + public Set getGroupsSet(String user) { + LOG.info("Getting groups in MockUnixGroupsMapping"); + String g1 = user + (10 * i + 1); + String g2 = user + (10 * i + 2); + Set s = Sets.newHashSet(g1, g2); + i++; + return s; + } } @Before @@ -196,6 +209,8 @@ public void testRefreshSuperUserGroupsConfiguration() throws Exception { // set groups for users when(ugi1.getGroups()).thenReturn(groupNames1); when(ugi2.getGroups()).thenReturn(groupNames2); + when(ugi1.getGroupsSet()).thenReturn(new LinkedHashSet<>(groupNames1)); + when(ugi2.getGroupsSet()).thenReturn(new LinkedHashSet<>(groupNames2)); // check before diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/server/TestHSAdminServer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/server/TestHSAdminServer.java index 1eb1d1c58d369..b961a23c723d0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/server/TestHSAdminServer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/server/TestHSAdminServer.java @@ -26,7 +26,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; @@ -56,6 +58,7 @@ import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.yarn.logaggregation.AggregatedLogDeletionService; +import org.mockito.internal.util.collections.Sets; @RunWith(Parameterized.class) public class TestHSAdminServer { @@ -91,6 +94,15 @@ public void cacheGroupsRefresh() throws IOException { @Override public void cacheGroupsAdd(List groups) throws IOException { } + + @Override + public Set getGroupsSet(String user) throws IOException { + Set result = new LinkedHashSet<>(); + result.add(user + (10 * i + 1)); + result.add(user + (10 * i +2)); + i++; + return result; + } } @Parameters @@ -189,6 +201,9 @@ public void testRefreshSuperUserGroups() throws Exception { when(superUser.getUserName()).thenReturn("superuser"); when(ugi.getGroups()) .thenReturn(Arrays.asList(new String[] { "group3" })); + when(ugi.getGroupsSet()) + .thenReturn(Sets.newSet("group3")); + when(ugi.getUserName()).thenReturn("regularUser"); // Set super user groups not to include groups of regularUser diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java index 960993ed7f706..8d4f635e11d68 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java @@ -28,6 +28,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -276,6 +277,11 @@ public void cacheGroupsRefresh() throws IOException { @Override public void cacheGroupsAdd(List groups) throws IOException { } + + @Override + public Set getGroupsSet(String user) throws IOException { + return Collections.emptySet(); + } } private static class MockJobForAcls implements Job { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingJsonManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingJsonManager.java index eba0ce1deeb17..242300b9cbf6b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingJsonManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingJsonManager.java @@ -86,7 +86,7 @@ public String getNetworkTagHexID(Container container) { container.getUser()); List groups = this.networkTagMapping.getGroups(); for(Group group : groups) { - if (userUGI.getGroups().contains(group.getGroupName())) { + if (userUGI.getGroupsSet().contains(group.getGroupName())) { return group.getNetworkTagID(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java index b4ea66dde2c09..0a25d105b10cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java @@ -303,9 +303,9 @@ public boolean isRuntimeRequested(Map env) { private static List getGroupPolicyFiles(Configuration conf, String user) throws ContainerExecutionException { Groups groups = Groups.getUserToGroupsMappingService(conf); - List userGroups; + Set userGroups; try { - userGroups = groups.getGroups(user); + userGroups = groups.getGroupsSet(user); } catch (IOException e) { throw new ContainerExecutionException("Container user does not exist"); } @@ -330,11 +330,11 @@ private boolean isSandboxContainerWhitelisted(String username, String whitelistGroup = configuration.get( YarnConfiguration.YARN_CONTAINER_SANDBOX_WHITELIST_GROUP); Groups groups = Groups.getUserToGroupsMappingService(configuration); - List userGroups; + Set userGroups; boolean isWhitelisted = false; try { - userGroups = groups.getGroups(username); + userGroups = groups.getGroupsSet(username); } catch (IOException e) { throw new ContainerExecutionException("Container user does not exist"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PrimaryGroupPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PrimaryGroupPlacementRule.java index 73e5cd0148473..948194f4dbb08 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PrimaryGroupPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PrimaryGroupPlacementRule.java @@ -30,7 +30,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.List; +import java.util.Set; import static org.apache.hadoop.yarn.server.resourcemanager.placement.FairQueuePlacementUtils.DOT; import static org.apache.hadoop.yarn.server.resourcemanager.placement.FairQueuePlacementUtils.assureRoot; @@ -62,19 +62,19 @@ public ApplicationPlacementContext getPlacementForApp( // All users should have at least one group the primary group. If no groups // are returned then there is a real issue. - final List groupList; + final Set groupSet; try { - groupList = groupProvider.getGroups(user); + groupSet = groupProvider.getGroupsSet(user); } catch (IOException ioe) { throw new YarnException("Group resolution failed", ioe); } - if (groupList.isEmpty()) { + if (groupSet.isEmpty()) { LOG.error("Group placement rule failed: No groups returned for user {}", user); throw new YarnException("No groups returned for user " + user); } - String cleanGroup = cleanName(groupList.get(0)); + String cleanGroup = cleanName(groupSet.iterator().next()); String queueName; PlacementRule parentRule = getParentRule(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/SecondaryGroupExistingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/SecondaryGroupExistingPlacementRule.java index 9acdbccc32ef9..8e6ccb3413e78 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/SecondaryGroupExistingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/SecondaryGroupExistingPlacementRule.java @@ -30,7 +30,8 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.List; +import java.util.Iterator; +import java.util.Set; import static org.apache.hadoop.yarn.server.resourcemanager.placement.FairQueuePlacementUtils.DOT; import static org.apache.hadoop.yarn.server.resourcemanager.placement.FairQueuePlacementUtils.assureRoot; @@ -65,9 +66,9 @@ public ApplicationPlacementContext getPlacementForApp( // All users should have at least one group the primary group. If no groups // are returned then there is a real issue. - final List groupList; + final Set groupSet; try { - groupList = groupProvider.getGroups(user); + groupSet = groupProvider.getGroupsSet(user); } catch (IOException ioe) { throw new YarnException("Group resolution failed", ioe); } @@ -90,8 +91,9 @@ public ApplicationPlacementContext getPlacementForApp( parentQueue); } // now check the groups inside the parent - for (int i = 1; i < groupList.size(); i++) { - String group = cleanName(groupList.get(i)); + Iterator it = groupSet.iterator(); + while (it.hasNext()) { + String group = cleanName(it.next()); String queueName = parentQueue == null ? assureRoot(group) : parentQueue + DOT + group; if (configuredQueue(queueName)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java index 71d9bb78805d9..a5faefe796cc2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java @@ -20,7 +20,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; +import java.util.Set; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -74,18 +76,21 @@ public UserGroupMappingPlacementRule(){ } private String getPrimaryGroup(String user) throws IOException { - return groups.getGroups(user).get(0); + return groups.getGroupsSet(user).iterator().next(); } private String getSecondaryGroup(String user) throws IOException { - List groupsList = groups.getGroups(user); + Set groupsSet = groups.getGroupsSet(user); String secondaryGroup = null; // Traverse all secondary groups (as there could be more than one // and position is not guaranteed) and ensure there is queue with // the same name - for (int i = 1; i < groupsList.size(); i++) { - if (this.queueManager.getQueue(groupsList.get(i)) != null) { - secondaryGroup = groupsList.get(i); + Iterator it = groupsSet.iterator(); + it.next(); + while (it.hasNext()) { + String group = it.next(); + if (this.queueManager.getQueue(group) != null) { + secondaryGroup = group; break; } } @@ -180,7 +185,7 @@ private ApplicationPlacementContext getPlacementForUser(String user) } } if (mapping.getType().equals(MappingType.GROUP)) { - for (String userGroups : groups.getGroups(user)) { + for (String userGroups : groups.getGroupsSet(user)) { if (userGroups.equals(mapping.getSource())) { if (mapping.getQueue().equals(CURRENT_USER_MAPPING)) { if (LOG.isDebugEnabled()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java index e235632f75cf9..32f1ab1d9bb1c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java @@ -1459,6 +1459,11 @@ public void cacheGroupsAdd(List groups) throws IOException { // Do nothing } + @Override + public Set getGroupsSet(String user) throws IOException { + return ImmutableSet.copyOf(group); + } + public static void updateGroups() { group.clear(); group.add("test_group_D"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/PeriodGroupsMapping.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/PeriodGroupsMapping.java index 9586381d97b5a..b6c50c0b56ecb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/PeriodGroupsMapping.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/PeriodGroupsMapping.java @@ -18,17 +18,20 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; +import com.google.common.collect.ImmutableSet; import org.apache.hadoop.security.GroupMappingServiceProvider; import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Set; public class PeriodGroupsMapping implements GroupMappingServiceProvider { @Override public List getGroups(String user) { - return Arrays.asList(user + ".group", user + "subgroup1", user + "subgroup2"); + return Arrays.asList(user + ".group", user + "subgroup1", + user + "subgroup2"); } @Override @@ -41,4 +44,9 @@ public void cacheGroupsAdd(List groups) throws IOException { throw new UnsupportedOperationException(); } + @Override + public Set getGroupsSet(String user) throws IOException { + return ImmutableSet.of(user + ".group", user + "subgroup1", + user + "subgroup2"); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/PrimaryGroupMapping.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/PrimaryGroupMapping.java index 11415b0f7571f..a34ca8bb24ab8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/PrimaryGroupMapping.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/PrimaryGroupMapping.java @@ -22,7 +22,9 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.List; +import java.util.Set; /** * Group Mapping class used for test cases. Returns only primary group of the @@ -44,4 +46,9 @@ public void cacheGroupsRefresh() throws IOException { public void cacheGroupsAdd(List groups) throws IOException { throw new UnsupportedOperationException(); } + + @Override + public Set getGroupsSet(String user) throws IOException { + return Collections.singleton(user + "group"); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java index 9c916e36418bd..1fbab2dee72e9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java @@ -21,7 +21,9 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Set; +import com.google.common.collect.ImmutableSet; import org.apache.hadoop.security.GroupMappingServiceProvider; public class SimpleGroupsMapping implements GroupMappingServiceProvider { @@ -45,4 +47,10 @@ public void cacheGroupsRefresh() throws IOException { @Override public void cacheGroupsAdd(List groups) throws IOException { } + + @Override + public Set getGroupsSet(String user) throws IOException { + return ImmutableSet.of(user + "group", user + "subgroup1", + user + "subgroup2"); + } } From 0e694b20b9d59cc46882df506dcea386020b1e4d Mon Sep 17 00:00:00 2001 From: Siyao Meng <50227127+smengcl@users.noreply.github.com> Date: Thu, 9 Jul 2020 12:38:52 -0700 Subject: [PATCH 097/131] HDFS-15462. Add fs.viewfs.overload.scheme.target.ofs.impl to core-default.xml (#2131) --- .../hadoop-common/src/main/resources/core-default.xml | 8 ++++++++ .../apache/hadoop/conf/TestCommonConfigurationFields.java | 1 + 2 files changed, 9 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index accb1b91a937a..cf156af196461 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -967,6 +967,14 @@ child file system and ViewFSOverloadScheme's schemes are s3a. + + fs.viewfs.overload.scheme.target.ofs.impl + org.apache.hadoop.fs.ozone.RootedOzoneFileSystem + The RootedOzoneFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are ofs. + + + fs.viewfs.overload.scheme.target.o3fs.impl org.apache.hadoop.fs.ozone.OzoneFileSystem diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java index 3b9947e213512..dd9f41a7a3527 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java @@ -133,6 +133,7 @@ public void initializeMemberVariables() { xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.hdfs.impl"); xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.http.impl"); xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.https.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.ofs.impl"); xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.o3fs.impl"); xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.oss.impl"); xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.s3a.impl"); From 3e700066394fb9f516e23537d8abb4661409cae1 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Sat, 11 Jul 2020 23:50:04 -0700 Subject: [PATCH 098/131] HDFS-15464: ViewFsOverloadScheme should work when -fs option pointing to remote cluster without mount links (#2132). Contributed by Uma Maheswara Rao G. --- .../org/apache/hadoop/fs/FsConstants.java | 2 + .../apache/hadoop/fs/viewfs/InodeTree.java | 22 ++++++++--- .../hadoop/fs/viewfs/ViewFileSystem.java | 13 ++++++- .../viewfs/ViewFileSystemOverloadScheme.java | 12 ++++++ .../org/apache/hadoop/fs/viewfs/ViewFs.java | 16 +++++++- .../hadoop/fs/viewfs/TestViewFsConfig.java | 2 +- .../TestViewFsOverloadSchemeListStatus.java | 39 ++++++++++++++----- .../src/site/markdown/ViewFsOverloadScheme.md | 3 +- ...wFileSystemOverloadSchemeWithDFSAdmin.java | 20 ++++++---- 9 files changed, 102 insertions(+), 27 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java index 07c16b22358c1..344048f0ceeb1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java @@ -44,4 +44,6 @@ public interface FsConstants { public static final String VIEWFS_SCHEME = "viewfs"; String FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN = "fs.viewfs.overload.scheme.target.%s.impl"; + String VIEWFS_TYPE = "viewfs"; + String VIEWFSOS_TYPE = "viewfsOverloadScheme"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java index 3d709b13bfc09..422e7337b57fb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java @@ -34,6 +34,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.security.UserGroupInformation; @@ -67,7 +68,7 @@ enum ResultKind { // the root of the mount table private final INode root; // the fallback filesystem - private final INodeLink rootFallbackLink; + private INodeLink rootFallbackLink; // the homedir for this mount table private final String homedirPrefix; private List> mountPoints = new ArrayList>(); @@ -460,7 +461,8 @@ Configuration getConfig() { * @throws FileAlreadyExistsException * @throws IOException */ - protected InodeTree(final Configuration config, final String viewName) + protected InodeTree(final Configuration config, final String viewName, + final URI theUri, boolean initingUriAsFallbackOnNoMounts) throws UnsupportedFileSystemException, URISyntaxException, FileAlreadyExistsException, IOException { String mountTableName = viewName; @@ -596,9 +598,19 @@ protected InodeTree(final Configuration config, final String viewName) } if (!gotMountTableEntry) { - throw new IOException( - "ViewFs: Cannot initialize: Empty Mount table in config for " + - "viewfs://" + mountTableName + "/"); + if (!initingUriAsFallbackOnNoMounts) { + throw new IOException( + "ViewFs: Cannot initialize: Empty Mount table in config for " + + "viewfs://" + mountTableName + "/"); + } + StringBuilder msg = + new StringBuilder("Empty mount table detected for ").append(theUri) + .append(" and considering itself as a linkFallback."); + FileSystem.LOG.info(msg.toString()); + rootFallbackLink = + new INodeLink(mountTableName, ugi, getTargetFileSystem(theUri), + theUri); + getRootDir().addFallbackLink(rootFallbackLink); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 0beeda253a1ae..1fc531e05635d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -258,6 +258,14 @@ public String getScheme() { return FsConstants.VIEWFS_SCHEME; } + /** + * Returns the ViewFileSystem type. + * @return viewfs + */ + String getType() { + return FsConstants.VIEWFS_TYPE; + } + /** * Called after a new FileSystem instance is constructed. * @param theUri a uri whose authority section names the host, port, etc. for @@ -284,7 +292,10 @@ public void initialize(final URI theUri, final Configuration conf) } try { myUri = new URI(getScheme(), authority, "/", null, null); - fsState = new InodeTree(conf, tableName) { + boolean initingUriAsFallbackOnNoMounts = + !FsConstants.VIEWFS_TYPE.equals(getType()); + fsState = new InodeTree(conf, tableName, theUri, + initingUriAsFallbackOnNoMounts) { @Override protected FileSystem getTargetFileSystem(final URI uri) throws URISyntaxException, IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java index 2f3359d32e98c..2165a3f9ee688 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java @@ -95,6 +95,10 @@ * be considered as the mount table name. When the passed uri has hostname:port, * it will simply ignore the port number and only hostname will be considered as * the mount table name. + * (3) If there are no mount links configured with the initializing uri's + * hostname as the mount table name, then it will automatically consider the + * current uri as fallback( ex: fs.viewfs.mounttable..linkFallBack) + * target fs uri. *****************************************************************************/ @InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase", "Hive" }) @InterfaceStability.Evolving @@ -109,6 +113,14 @@ public String getScheme() { return myUri.getScheme(); } + /** + * Returns the ViewFileSystem type. + * @return viewfs + */ + String getType() { + return FsConstants.VIEWFSOS_TYPE; + } + @Override public void initialize(URI theUri, Configuration conf) throws IOException { this.myUri = theUri; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index a63960c55de0c..95b596bde367d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -196,7 +196,16 @@ URI[] getTargets() { return targets; } } - + + /** + * Returns the ViewFileSystem type. + * + * @return viewfs + */ + String getType() { + return FsConstants.VIEWFS_TYPE; + } + public ViewFs(final Configuration conf) throws IOException, URISyntaxException { this(FsConstants.VIEWFS_URI, conf); @@ -222,7 +231,10 @@ public ViewFs(final Configuration conf) throws IOException, CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT); // Now build client side view (i.e. client side mount table) from config. String authority = theUri.getAuthority(); - fsState = new InodeTree(conf, authority) { + boolean initingUriAsFallbackOnNoMounts = + !FsConstants.VIEWFS_TYPE.equals(getType()); + fsState = new InodeTree(conf, authority, theUri, + initingUriAsFallbackOnNoMounts) { @Override protected AbstractFileSystem getTargetFileSystem(final URI uri) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java index 136837fc801c4..56f5b2d997dc2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java @@ -39,7 +39,7 @@ public void testInvalidConfig() throws IOException, URISyntaxException { class Foo { } - new InodeTree(conf, null) { + new InodeTree(conf, null, null, false) { @Override protected Foo getTargetFileSystem(final URI uri) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java index 0cf691481f720..300fdd8b333f1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java @@ -46,9 +46,17 @@ public class TestViewFsOverloadSchemeListStatus { private static final File TEST_DIR = GenericTestUtils.getTestDir(TestViewfsFileStatus.class.getSimpleName()); + private Configuration conf; + private static final String FILE_NAME = "file"; @Before public void setUp() { + conf = new Configuration(); + conf.set(String.format("fs.%s.impl", FILE_NAME), + ViewFileSystemOverloadScheme.class.getName()); + conf.set(String + .format(FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + FILE_NAME), LocalFileSystem.class.getName()); FileUtil.fullyDelete(TEST_DIR); assertTrue(TEST_DIR.mkdirs()); } @@ -77,15 +85,9 @@ public void testListStatusACL() throws IOException, URISyntaxException { File childDir = new File(TEST_DIR, childDirectoryName); childDir.mkdirs(); - Configuration conf = new Configuration(); ConfigUtil.addLink(conf, "/file", infile.toURI()); ConfigUtil.addLink(conf, "/dir", childDir.toURI()); - String fileScheme = "file"; - conf.set(String.format("fs.%s.impl", fileScheme), - ViewFileSystemOverloadScheme.class.getName()); - conf.set(String - .format(FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, - fileScheme), LocalFileSystem.class.getName()); + String fileUriStr = "file:///"; try (FileSystem vfs = FileSystem.get(new URI(fileUriStr), conf)) { assertEquals(ViewFileSystemOverloadScheme.class, vfs.getClass()); @@ -95,9 +97,8 @@ public void testListStatusACL() throws IOException, URISyntaxException { .getRawFileSystem(new Path(fileUriStr), conf); FileStatus fileStat = localFs.getFileStatus(new Path(infile.getPath())); FileStatus dirStat = localFs.getFileStatus(new Path(childDir.getPath())); - for (FileStatus status : statuses) { - if (status.getPath().getName().equals(fileScheme)) { + if (status.getPath().getName().equals(FILE_NAME)) { assertEquals(fileStat.getPermission(), status.getPermission()); } else { assertEquals(dirStat.getPermission(), status.getPermission()); @@ -111,7 +112,7 @@ public void testListStatusACL() throws IOException, URISyntaxException { statuses = vfs.listStatus(new Path("/")); for (FileStatus status : statuses) { - if (status.getPath().getName().equals(fileScheme)) { + if (status.getPath().getName().equals(FILE_NAME)) { assertEquals(FsPermission.valueOf("-rwxr--r--"), status.getPermission()); assertFalse(status.isDirectory()); @@ -124,6 +125,24 @@ public void testListStatusACL() throws IOException, URISyntaxException { } } + /** + * Tests that ViewFSOverloadScheme should consider initialized fs as fallback + * if there are no mount links configured. + */ + @Test(timeout = 30000) + public void testViewFSOverloadSchemeWithoutAnyMountLinks() throws Exception { + try (FileSystem fs = FileSystem.get(TEST_DIR.toPath().toUri(), conf)) { + ViewFileSystemOverloadScheme vfs = (ViewFileSystemOverloadScheme) fs; + assertEquals(0, vfs.getMountPoints().length); + Path testFallBack = new Path("test", FILE_NAME); + assertTrue(vfs.mkdirs(testFallBack)); + FileStatus[] status = vfs.listStatus(testFallBack.getParent()); + assertEquals(FILE_NAME, status[0].getPath().getName()); + assertEquals(testFallBack.getName(), + vfs.getFileLinkStatus(testFallBack).getPath().getName()); + } + } + @AfterClass public static void cleanup() throws IOException { FileUtil.fullyDelete(TEST_DIR); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md index 38113cbbb0f06..564bc034e7597 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md @@ -33,8 +33,9 @@ Mount link configurations key, value formats are same as in [ViewFS Guide](./Vie If a user wants to continue use the same fs.defaultFS and wants to have more mount points, then mount link configurations should have the ViewFileSystemOverloadScheme initialized uri's hostname as the mount table name. Example if fs.defaultFS is `hdfs://mycluster`, then the mount link configuration key name should be like in the following format `fs.viewfs.mounttable.*mycluster*.link.`. Even if the initialized fs uri has hostname:port, it will simply ignore the port number and only consider the hostname as the mount table name. We will discuss more example configurations in following sections. +If there are no mount links configured with the initializing uri's hostname as the mount table name, then it will automatically consider the current uri as fallback(`fs.viewfs.mounttable.*mycluster*.linkFallback`) target fs uri. -Another important improvement with the ViewFileSystemOverloadScheme is, administrators need not copy the `mount-table.xml` configuration file to 1000s of client nodes. Instead they can keep the mount-table configuration file in a Hadoop compatible file system. So, keeping the configuration file in a central place makes administrators life easier as they can update mount-table in single place. +Another important improvement with the ViewFileSystemOverloadScheme is, administrators need not copy the `mount-table.xml` configuration file to 1000s of client nodes. Instead, they can keep the mount-table configuration file in a Hadoop compatible file system. So, keeping the configuration file in a central place makes administrators life easier as they can update mount-table in single place. ### Enabling View File System Overload Scheme diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java index aea4704711cb4..39df141a37be6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java @@ -228,16 +228,22 @@ public void testSafeModeShouldFailOnLocalTargetFS() throws Exception { } /** - * Tests safemode with ViewFSOverloadScheme, but without mounttables. + * Tests safemode get with ViewFSOverloadScheme, but without any mount links + * configured. The ViewFSOverloadScheme should consider initialized fs as + * fallback fs automatically. */ @Test - public void testSafeModeShouldFailWithoutMountTables() throws Exception { + public void testGetSafemodeWithoutMountLinksConfigured() throws Exception { final DFSAdmin dfsAdmin = new DFSAdmin(conf); - String uri = defaultFSURI.toString(); - redirectStream(); - int ret = ToolRunner.run(dfsAdmin, - new String[] {"-fs", uri, "-safemode", "enter" }); - assertEquals(-1, ret); + try { + redirectStream(); + int ret = ToolRunner.run(dfsAdmin, + new String[] {"-fs", defaultFSURI.toString(), "-safemode", "get"}); + assertOutMsg("Safe mode is OFF", 0); + assertEquals(0, ret); + } finally { + dfsAdmin.close(); + } } /** From 84b74b335c0251afa672643352c6b7ecf003e0fb Mon Sep 17 00:00:00 2001 From: lfengnan Date: Sun, 12 Jul 2020 00:10:12 -0700 Subject: [PATCH 099/131] HDFS-15447 RBF: Add top real owners metrics for delegation tokens (#2110) --- .../AbstractDelegationTokenSecretManager.java | 101 +++++++++++++++++- .../ZKDelegationTokenSecretManager.java | 3 + .../server/federation/metrics/RBFMetrics.java | 18 +++- .../federation/metrics/RouterMBean.java | 7 ++ .../federation/router/RBFConfigKeys.java | 4 + .../ZKDelegationTokenSecretManagerImpl.java | 3 +- .../src/main/resources/hdfs-rbf-default.xml | 10 ++ .../security/TestRouterSecurityManager.java | 68 ++++++++++++ 8 files changed, 209 insertions(+), 5 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java index 3a22cee881070..eb65799757f66 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java @@ -22,9 +22,12 @@ import java.io.DataInputStream; import java.io.IOException; import java.security.MessageDigest; +import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -34,6 +37,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.Text; +import org.apache.hadoop.metrics2.util.Metrics2Util.NameValuePair; +import org.apache.hadoop.metrics2.util.Metrics2Util.TopN; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.HadoopKerberosName; import org.apache.hadoop.security.token.SecretManager; @@ -64,7 +69,13 @@ private String formatTokenId(TokenIdent id) { */ protected final Map currentTokens = new ConcurrentHashMap<>(); - + + /** + * Map of token real owners to its token count. This is used to generate + * metrics of top users by owned tokens. + */ + protected final Map tokenOwnerStats = new ConcurrentHashMap<>(); + /** * Sequence number to create DelegationTokenIdentifier. * Protected by this object lock. @@ -292,6 +303,7 @@ protected DelegationTokenInformation getTokenInfo(TokenIdent ident) { protected void storeToken(TokenIdent ident, DelegationTokenInformation tokenInfo) throws IOException { currentTokens.put(ident, tokenInfo); + addTokenForOwnerStats(ident); storeNewToken(ident, tokenInfo.getRenewDate()); } @@ -339,6 +351,7 @@ public synchronized void addPersistedDelegationToken( if (getTokenInfo(identifier) == null) { currentTokens.put(identifier, new DelegationTokenInformation(renewDate, password, getTrackingIdIfEnabled(identifier))); + addTokenForOwnerStats(identifier); } else { throw new IOException("Same delegation token being added twice: " + formatTokenId(identifier)); @@ -578,6 +591,7 @@ public synchronized TokenIdent cancelToken(Token token, if (info == null) { throw new InvalidToken("Token not found " + formatTokenId(id)); } + removeTokenForOwnerStats(id); removeStoredToken(id); return id; } @@ -634,6 +648,7 @@ private void removeExpiredToken() throws IOException { long renewDate = entry.getValue().getRenewDate(); if (renewDate < now) { expiredTokens.add(entry.getKey()); + removeTokenForOwnerStats(entry.getKey()); i.remove(); } } @@ -726,4 +741,88 @@ public TokenIdent decodeTokenIdentifier(Token token) throws IOExcept return token.decodeIdentifier(); } + /** + * Return top token real owners list as well as the tokens count. + * + * @param n top number of users + * @return map of owners to counts + */ + public List getTopTokenRealOwners(int n) { + n = Math.min(n, tokenOwnerStats.size()); + if (n == 0) { + return new ArrayList<>(); + } + + TopN topN = new TopN(n); + for (Map.Entry entry : tokenOwnerStats.entrySet()) { + topN.offer(new NameValuePair( + entry.getKey(), entry.getValue())); + } + + List list = new ArrayList<>(); + while (!topN.isEmpty()) { + list.add(topN.poll()); + } + Collections.reverse(list); + return list; + } + + /** + * Return the real owner for a token. If this is a token from a proxy user, + * the real/effective user will be returned. + * + * @param id + * @return real owner + */ + private String getTokenRealOwner(TokenIdent id) { + String realUser; + if (id.getRealUser() != null && !id.getRealUser().toString().isEmpty()) { + realUser = id.getRealUser().toString(); + } else { + // if there is no real user -> this is a non proxy user + // the user itself is the real owner + realUser = id.getUser().getUserName(); + } + return realUser; + } + + /** + * Add token stats to the owner to token count mapping. + * + * @param id + */ + private void addTokenForOwnerStats(TokenIdent id) { + String realOwner = getTokenRealOwner(id); + tokenOwnerStats.put(realOwner, + tokenOwnerStats.getOrDefault(realOwner, 0L)+1); + } + + /** + * Remove token stats to the owner to token count mapping. + * + * @param id + */ + private void removeTokenForOwnerStats(TokenIdent id) { + String realOwner = getTokenRealOwner(id); + if (tokenOwnerStats.containsKey(realOwner)) { + // unlikely to be less than 1 but in case + if (tokenOwnerStats.get(realOwner) <= 1) { + tokenOwnerStats.remove(realOwner); + } else { + tokenOwnerStats.put(realOwner, tokenOwnerStats.get(realOwner)-1); + } + } + } + + /** + * This method syncs token information from currentTokens to tokenOwnerStats. + * It is used when the currentTokens is initialized or refreshed. This is + * called from a single thread thus no synchronization is needed. + */ + protected void syncTokenOwnerStats() { + tokenOwnerStats.clear(); + for (TokenIdent id : currentTokens.keySet()) { + addTokenForOwnerStats(id); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java index f50035d03773e..276573ba00c9b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java @@ -457,6 +457,9 @@ private void loadFromZKCache(final boolean isTokenCache) { ++count; } } + if (isTokenCache) { + syncTokenOwnerStats(); + } if (count > 0) { LOG.warn("Ignored {} nodes while loading {} cache.", count, cacheName); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java index 1ff3d7b246f05..e13815bb4ac3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java @@ -124,7 +124,8 @@ public class RBFMetrics implements RouterMBean, FederationMBean { private MountTableStore mountTableStore; /** Router state store. */ private RouterStore routerStore; - + /** The number of top token owners reported in metrics. */ + private int topTokenRealOwners; public RBFMetrics(Router router) throws IOException { this.router = router; @@ -166,7 +167,9 @@ public RBFMetrics(Router router) throws IOException { Configuration conf = router.getConfig(); this.timeOut = conf.getTimeDuration(RBFConfigKeys.DN_REPORT_TIME_OUT, RBFConfigKeys.DN_REPORT_TIME_OUT_MS_DEFAULT, TimeUnit.MILLISECONDS); - + this.topTokenRealOwners = conf.getInt( + RBFConfigKeys.DFS_ROUTER_METRICS_TOP_NUM_TOKEN_OWNERS_KEY, + RBFConfigKeys.DFS_ROUTER_METRICS_TOP_NUM_TOKEN_OWNERS_KEY_DEFAULT); } /** @@ -649,6 +652,17 @@ public long getCurrentTokensCount() { return -1; } + @Override + public String getTopTokenRealOwners() { + RouterSecurityManager mgr = + this.router.getRpcServer().getRouterSecurityManager(); + if (mgr != null && mgr.getSecretManager() != null) { + return JSON.toString(mgr.getSecretManager() + .getTopTokenRealOwners(this.topTokenRealOwners)); + } + return ""; + } + @Override public boolean isSecurityEnabled() { return UserGroupInformation.isSecurityEnabled(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RouterMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RouterMBean.java index daec1548b57a9..087c5b4bacfb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RouterMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RouterMBean.java @@ -101,4 +101,11 @@ public interface RouterMBean { * @return true, if security is enabled. */ boolean isSecurityEnabled(); + + /** + * Get the top delegation token owners(realUser). + * + * @return Json string of owners to token counts + */ + String getTopTokenRealOwners(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java index adc97209ece4d..7b06ca428bd38 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java @@ -79,6 +79,10 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic { public static final Class DFS_ROUTER_METRICS_CLASS_DEFAULT = FederationRPCPerformanceMonitor.class; + public static final String DFS_ROUTER_METRICS_TOP_NUM_TOKEN_OWNERS_KEY = + FEDERATION_ROUTER_PREFIX + "top.num.token.realowners"; + public static final int + DFS_ROUTER_METRICS_TOP_NUM_TOKEN_OWNERS_KEY_DEFAULT = 10; // HDFS Router heartbeat public static final String DFS_ROUTER_HEARTBEAT_ENABLE = diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java index 2d55026c807af..a83be71e59d83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java @@ -30,8 +30,6 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; import java.io.IOException; import java.util.HashSet; import java.util.List; @@ -197,6 +195,7 @@ private void rebuildTokenCache(boolean initial) throws IOException { } } } + syncTokenOwnerStats(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml index deab5494cd6ab..4bd2ac36b1377 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml @@ -657,4 +657,14 @@ + + dfs.federation.router.top.num.token.realowners + 10 + + The number of top real owners by tokens count to report in the JMX metrics. + Real owners are the effective users whose cretential are used to generate + the tokens. + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterSecurityManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterSecurityManager.java index b88fd147ac6d8..d62837ccb13d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterSecurityManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterSecurityManager.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.federation.router.security.token.ZKDelegationTokenSecretManagerImpl; import org.apache.hadoop.io.Text; +import org.apache.hadoop.metrics2.util.Metrics2Util.NameValuePair; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; @@ -50,6 +51,7 @@ import org.hamcrest.core.StringContains; import java.io.IOException; +import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -124,6 +126,72 @@ public void testDelegationTokens() throws IOException { securityManager.renewDelegationToken(token); } + @Test + public void testDelgationTokenTopOwners() throws Exception { + UserGroupInformation.reset(); + List topOwners; + + UserGroupInformation user = UserGroupInformation + .createUserForTesting("abc", new String[]{"router_group"}); + UserGroupInformation.setLoginUser(user); + Token dt = securityManager.getDelegationToken(new Text("abc")); + topOwners = securityManager.getSecretManager().getTopTokenRealOwners(2); + assertEquals(1, topOwners.size()); + assertEquals("abc", topOwners.get(0).getName()); + assertEquals(1, topOwners.get(0).getValue()); + + securityManager.renewDelegationToken(dt); + topOwners = securityManager.getSecretManager().getTopTokenRealOwners(2); + assertEquals(1, topOwners.size()); + assertEquals("abc", topOwners.get(0).getName()); + assertEquals(1, topOwners.get(0).getValue()); + + securityManager.cancelDelegationToken(dt); + topOwners = securityManager.getSecretManager().getTopTokenRealOwners(2); + assertEquals(0, topOwners.size()); + + + // Use proxy user - the code should use the proxy user as the real owner + UserGroupInformation routerUser = + UserGroupInformation.createRemoteUser("router"); + UserGroupInformation proxyUser = UserGroupInformation + .createProxyUserForTesting("abc", + routerUser, + new String[]{"router_group"}); + UserGroupInformation.setLoginUser(proxyUser); + + Token proxyDT = securityManager.getDelegationToken(new Text("router")); + topOwners = securityManager.getSecretManager().getTopTokenRealOwners(2); + assertEquals(1, topOwners.size()); + assertEquals("router", topOwners.get(0).getName()); + assertEquals(1, topOwners.get(0).getValue()); + + // router to renew tokens + UserGroupInformation.setLoginUser(routerUser); + securityManager.renewDelegationToken(proxyDT); + topOwners = securityManager.getSecretManager().getTopTokenRealOwners(2); + assertEquals(1, topOwners.size()); + assertEquals("router", topOwners.get(0).getName()); + assertEquals(1, topOwners.get(0).getValue()); + + securityManager.cancelDelegationToken(proxyDT); + topOwners = securityManager.getSecretManager().getTopTokenRealOwners(2); + assertEquals(0, topOwners.size()); + + + // check rank by more users + securityManager.getDelegationToken(new Text("router")); + securityManager.getDelegationToken(new Text("router")); + UserGroupInformation.setLoginUser(user); + securityManager.getDelegationToken(new Text("router")); + topOwners = securityManager.getSecretManager().getTopTokenRealOwners(2); + assertEquals(2, topOwners.size()); + assertEquals("router", topOwners.get(0).getName()); + assertEquals(2, topOwners.get(0).getValue()); + assertEquals("abc", topOwners.get(1).getName()); + assertEquals(1, topOwners.get(1).getValue()); + } + @Test public void testVerifyToken() throws IOException { UserGroupInformation.reset(); From b97fea65e70bee4f5ea81c544396f8e9fa860ab0 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Mon, 13 Jul 2020 14:12:48 +0800 Subject: [PATCH 100/131] HDFS-14498 LeaseManager can loop forever on the file for which create has failed. Contributed by Stephen O'Donnell. --- .../hdfs/server/namenode/FSNamesystem.java | 11 ++ .../apache/hadoop/hdfs/TestLeaseRecovery.java | 107 ++++++++++++++++++ 2 files changed, 118 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 5e50b581a183a..993c2832dce94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3677,6 +3677,17 @@ boolean internalReleaseLease(Lease lease, String src, INodesInPath iip, " internalReleaseLease: Committed blocks are minimally" + " replicated, lease removed, file" + src + " closed."); return true; // closed! + } else if (penultimateBlockMinStorage && lastBlock.getNumBytes() == 0) { + // HDFS-14498 - this is a file with a final block of zero bytes and was + // likely left in this state by a client which exited unexpectedly + pendingFile.removeLastBlock(lastBlock); + finalizeINodeFileUnderConstruction(src, pendingFile, + iip.getLatestSnapshotId(), false); + NameNode.stateChangeLog.warn("BLOCK*" + + " internalReleaseLease: Committed last block is zero bytes with" + + " insufficient replicas. Final block removed, lease removed, file " + + src + " closed."); + return true; } // Cannot close file right now, since some blocks // are not yet minimally replicated. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java index c82b47cec94df..a1cce3effa448 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java @@ -24,15 +24,18 @@ import java.util.EnumSet; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -43,6 +46,7 @@ import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Test; @@ -314,4 +318,107 @@ public void testLeaseRecoveryAndAppend() throws Exception { } } } + + /** + * HDFS-14498 - test lease can be recovered for a file where the final + * block was never registered with the DNs, and hence the IBRs will never + * be received. In this case the final block should be zero bytes and can + * be removed. + */ + @Test + public void testLeaseRecoveryEmptyCommittedLastBlock() throws Exception { + Configuration conf = new Configuration(); + DFSClient client = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + DistributedFileSystem dfs = cluster.getFileSystem(); + client = + new DFSClient(cluster.getNameNode().getServiceRpcAddress(), conf); + String file = "/test/f1"; + Path filePath = new Path(file); + + createCommittedNotCompleteFile(client, file); + + // Ensure a different client cannot append the file + try { + dfs.append(filePath); + fail("Append to a file(lease is held by another client) should fail"); + } catch (RemoteException e) { + assertTrue(e.getMessage().contains("file lease is currently owned")); + } + + // Ensure the lease can be recovered on the first try + boolean recovered = client.recoverLease(file); + assertEquals(true, recovered); + + // Ensure the recovered file can now be written + FSDataOutputStream append = dfs.append(filePath); + append.write("test".getBytes()); + append.close(); + } finally { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + if (client != null) { + client.close(); + } + } + } + + /** + * HDFS-14498 - similar to testLeaseRecoveryEmptyCommittedLastBlock except + * we wait for the lease manager to recover the lease automatically. + */ + @Test + public void testLeaseManagerRecoversEmptyCommittedLastBlock() + throws Exception { + Configuration conf = new Configuration(); + DFSClient client = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + client = + new DFSClient(cluster.getNameNode().getServiceRpcAddress(), conf); + String file = "/test/f1"; + + createCommittedNotCompleteFile(client, file); + waitLeaseRecovery(cluster); + + GenericTestUtils.waitFor(() -> { + String holder = NameNodeAdapter + .getLeaseHolderForPath(cluster.getNameNode(), file); + return holder == null; + }, 100, 10000); + + } finally { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + if (client != null) { + client.close(); + } + } + } + + private void createCommittedNotCompleteFile(DFSClient client, String file) + throws IOException { + HdfsFileStatus stat = client.getNamenode() + .create(file, new FsPermission("777"), "test client", + new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), + true, (short) 1, 1024 * 1024 * 128L, + new CryptoProtocolVersion[0], null, null); + // Add a block to the file + LocatedBlock blk = client.getNamenode() + .addBlock(file, "test client", null, + new DatanodeInfo[0], stat.getFileId(), new String[0], null); + // Without writing anything to the file, or setting up the DN pipeline + // attempt to close the file. This will fail (return false) as the NN will + // be expecting the registered block to be reported from the DNs via IBR, + // but that will never happen, as the pipeline was never established + boolean closed = client.getNamenode().complete( + file, "test client", blk.getBlock(), stat.getFileId()); + assertEquals(false, closed); + } + } From b9fa5e0182c19adc4ff4cd2d9265a36ce9913178 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Mon, 13 Jul 2020 13:30:02 +0100 Subject: [PATCH 101/131] HDFS-13934. Multipart uploaders to be created through FileSystem/FileContext. Contributed by Steve Loughran. Change-Id: Iebd34140c1a0aa71f44a3f4d0fee85f6bdf123a3 --- .../apache/hadoop/fs/AbstractFileSystem.java | 30 ++ .../hadoop/fs/CommonPathCapabilities.java | 8 + .../org/apache/hadoop/fs/FileContext.java | 27 ++ .../java/org/apache/hadoop/fs/FileSystem.java | 32 +- .../apache/hadoop/fs/FilterFileSystem.java | 13 +- .../java/org/apache/hadoop/fs/FilterFs.java | 6 + .../apache/hadoop/fs/InternalOperations.java | 36 +- .../apache/hadoop/fs/MultipartUploader.java | 123 ++--- .../hadoop/fs/MultipartUploaderBuilder.java | 83 ++++ .../hadoop/fs/MultipartUploaderFactory.java | 76 ---- .../fs/impl/AbstractMultipartUploader.java | 142 ++++++ .../FileSystemMultipartUploader.java | 158 +++++-- .../FileSystemMultipartUploaderBuilder.java | 90 ++++ .../hadoop/fs/impl/FutureIOSupport.java | 28 +- .../fs/impl/MultipartUploaderBuilderImpl.java | 215 +++++++++ ....apache.hadoop.fs.MultipartUploaderFactory | 16 - .../markdown/filesystem/multipartuploader.md | 137 ++++-- .../hadoop/fs/TestFilterFileSystem.java | 23 + .../apache/hadoop/fs/TestHarFileSystem.java | 3 + ...AbstractContractMultipartUploaderTest.java | 348 ++++++++++----- .../TestLocalFSContractMultipartUploader.java | 61 --- .../hadoop/hdfs/DistributedFileSystem.java | 8 + .../hdfs/client/DfsPathCapabilities.java | 1 + .../hadoop/hdfs/web/WebHdfsFileSystem.java | 8 + ....apache.hadoop.fs.MultipartUploaderFactory | 16 - .../apache/hadoop/fs/s3a/S3AFileSystem.java | 59 ++- .../hadoop/fs/s3a/S3AInstrumentation.java | 9 +- .../hadoop/fs/s3a/S3AMultipartUploader.java | 216 --------- .../org/apache/hadoop/fs/s3a/Statistic.java | 24 +- .../hadoop/fs/s3a/WriteOperationHelper.java | 22 +- .../apache/hadoop/fs/s3a/WriteOperations.java | 335 ++++++++++++++ .../hadoop/fs/s3a/impl/ContextAccessors.java | 8 + .../fs/s3a/impl/S3AMultipartUploader.java | 420 ++++++++++++++++++ .../s3a/impl/S3AMultipartUploaderBuilder.java | 66 +++ .../hadoop/fs/s3a/impl/StoreContext.java | 37 +- .../fs/s3a/impl/StoreContextBuilder.java | 189 ++++++++ .../S3AMultipartUploaderStatistics.java | 39 ++ .../S3AMultipartUploaderStatisticsImpl.java | 98 ++++ .../fs/s3a/s3guard/BulkOperationState.java | 4 + .../fs/s3a/s3guard/DynamoDBMetadataStore.java | 20 +- .../org.apache.hadoop.fs.MultipartUploader | 2 +- ....apache.hadoop.fs.MultipartUploaderFactory | 15 - .../ITestS3AContractMultipartUploader.java | 52 +-- .../s3a/impl/TestPartialDeleteFailures.java | 46 +- .../TestS3AMultipartUploaderSupport.java | 48 +- 45 files changed, 2603 insertions(+), 794 deletions(-) rename hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java => hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InternalOperations.java (51%) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderBuilder.java delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java rename hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/{ => impl}/FileSystemMultipartUploader.java (52%) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploaderBuilder.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java delete mode 100644 hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory delete mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory delete mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploader.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploaderBuilder.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/statistics/S3AMultipartUploaderStatistics.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/statistics/S3AMultipartUploaderStatisticsImpl.java delete mode 100644 hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory rename hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/{ => impl}/TestS3AMultipartUploaderSupport.java (56%) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index 32926d55de874..ec346b482a452 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -1382,4 +1382,34 @@ public boolean hasPathCapability(final Path path, return false; } } + + /** + * Create a multipart uploader. + * @param basePath file path under which all files are uploaded + * @return a MultipartUploaderBuilder object to build the uploader + * @throws IOException if some early checks cause IO failures. + * @throws UnsupportedOperationException if support is checked early. + */ + @InterfaceStability.Unstable + public MultipartUploaderBuilder createMultipartUploader(Path basePath) + throws IOException { + methodNotSupported(); + return null; + } + + /** + * Helper method that throws an {@link UnsupportedOperationException} for the + * current {@link FileSystem} method being called. + */ + protected final void methodNotSupported() { + // The order of the stacktrace elements is (from top to bottom): + // - java.lang.Thread.getStackTrace + // - org.apache.hadoop.fs.FileSystem.methodNotSupported + // - + // therefore, to find out the current method name, we use the element at + // index 2. + String name = Thread.currentThread().getStackTrace()[2].getMethodName(); + throw new UnsupportedOperationException(getClass().getCanonicalName() + + " does not support method " + name); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java index fb46ef81e36fa..539b3e27c0351 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java @@ -131,4 +131,12 @@ private CommonPathCapabilities() { @InterfaceStability.Unstable public static final String FS_EXPERIMENTAL_BATCH_LISTING = "fs.capability.batch.listing"; + + /** + * Does the store support multipart uploading? + * Value: {@value}. + */ + public static final String FS_MULTIPART_UPLOADER = + "fs.capability.multipart.uploader"; + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index e9d8ea4a4ec1f..e5f4ef3809f18 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -2957,4 +2957,31 @@ public boolean hasPathCapability(Path path, String capability) (fs, p) -> fs.hasPathCapability(p, capability)); } + /** + * Return a set of server default configuration values based on path. + * @param path path to fetch server defaults + * @return server default configuration values for path + * @throws IOException an I/O error occurred + */ + public FsServerDefaults getServerDefaults(final Path path) + throws IOException { + return FsLinkResolution.resolve(this, + fixRelativePart(path), + (fs, p) -> fs.getServerDefaults(p)); + } + + /** + * Create a multipart uploader. + * @param basePath file path under which all files are uploaded + * @return a MultipartUploaderBuilder object to build the uploader + * @throws IOException if some early checks cause IO failures. + * @throws UnsupportedOperationException if support is checked early. + */ + @InterfaceStability.Unstable + public MultipartUploaderBuilder createMultipartUploader(Path basePath) + throws IOException { + return FsLinkResolution.resolve(this, + fixRelativePart(basePath), + (fs, p) -> fs.createMultipartUploader(p)); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 8136993b6c78a..ab5040486dffc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -132,22 +132,35 @@ * New methods may be marked as Unstable or Evolving for their initial release, * as a warning that they are new and may change based on the * experience of use in applications. + *

* Important note for developers - * - * If you're making changes here to the public API or protected methods, + *

+ * If you are making changes here to the public API or protected methods, * you must review the following subclasses and make sure that * they are filtering/passing through new methods as appropriate. + *

* - * {@link FilterFileSystem}: methods are passed through. + * {@link FilterFileSystem}: methods are passed through. If not, + * then {@code TestFilterFileSystem.MustNotImplement} must be + * updated with the unsupported interface. + * Furthermore, if the new API's support is probed for via + * {@link #hasPathCapability(Path, String)} then + * {@link FilterFileSystem#hasPathCapability(Path, String)} + * must return false, always. + *

* {@link ChecksumFileSystem}: checksums are created and * verified. + *

* {@code TestHarFileSystem} will need its {@code MustNotImplement} * interface updated. + *

* * There are some external places your changes will break things. * Do co-ordinate changes here. + *

* * HBase: HBoss + *

* Hive: HiveShim23 * {@code shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java} * @@ -4644,4 +4657,17 @@ public CompletableFuture build() throws IOException { } + /** + * Create a multipart uploader. + * @param basePath file path under which all files are uploaded + * @return a MultipartUploaderBuilder object to build the uploader + * @throws IOException if some early checks cause IO failures. + * @throws UnsupportedOperationException if support is checked early. + */ + @InterfaceStability.Unstable + public MultipartUploaderBuilder createMultipartUploader(Path basePath) + throws IOException { + methodNotSupported(); + return null; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index cf12ea3898a7f..42410974db17c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -41,6 +41,8 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.Progressable; +import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; + /**************************************************************** * A FilterFileSystem contains * some other file system, which it uses as @@ -728,7 +730,16 @@ protected CompletableFuture openFileWithOptions( @Override public boolean hasPathCapability(final Path path, final String capability) throws IOException { - return fs.hasPathCapability(path, capability); + switch (validatePathCapabilityArgs(makeQualified(path), capability)) { + case CommonPathCapabilities.FS_MULTIPART_UPLOADER: + case CommonPathCapabilities.FS_EXPERIMENTAL_BATCH_LISTING: + // operations known to be unsupported, irrespective of what + // the wrapped class implements. + return false; + default: + // the feature is not implemented. + return fs.hasPathCapability(path, capability); + } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java index e197506edc88b..27e75d8a25df6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java @@ -448,4 +448,10 @@ public boolean hasPathCapability(final Path path, throws IOException { return myFs.hasPathCapability(path, capability); } + + @Override + public MultipartUploaderBuilder createMultipartUploader(final Path basePath) + throws IOException { + return myFs.createMultipartUploader(basePath); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InternalOperations.java similarity index 51% rename from hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InternalOperations.java index e9959c192df83..2db33eead9288 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InternalOperations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,26 +15,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileSystemMultipartUploader; -import org.apache.hadoop.fs.MultipartUploader; -import org.apache.hadoop.fs.MultipartUploaderFactory; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +package org.apache.hadoop.fs; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; + /** - * Support for HDFS multipart uploads, built on - * {@link FileSystem#concat(Path, Path[])}. + * This method allows access to Package-scoped operations from classes + * in org.apache.hadoop.fs.impl and other file system implementations + * in the hadoop modules. + * This is absolutely not for used by any other application or library. */ -public class DFSMultipartUploaderFactory extends MultipartUploaderFactory { - protected MultipartUploader createMultipartUploader(FileSystem fs, - Configuration conf) { - if (fs.getScheme().equals(HdfsConstants.HDFS_URI_SCHEME)) { - return new FileSystemMultipartUploader(fs); - } - return null; +@InterfaceAudience.Private +public class InternalOperations { + + @SuppressWarnings("deprecation") // rename w/ OVERWRITE + public void rename(FileSystem fs, final Path src, final Path dst, + final Options.Rename...options) throws IOException { + fs.rename(src, dst, options); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java index 7ed987eed90dd..89848dc29ded0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,45 +15,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.hadoop.fs; import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.concurrent.CompletableFuture; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import static com.google.common.base.Preconditions.checkArgument; - /** * MultipartUploader is an interface for copying files multipart and across - * multiple nodes. Users should: - *
    - *
  1. Initialize an upload.
  2. - *
  3. Upload parts in any order.
  4. - *
  5. Complete the upload in order to have it materialize in the destination - * FS.
  6. - *
+ * multiple nodes. */ -@InterfaceAudience.Private +@InterfaceAudience.Public @InterfaceStability.Unstable -public abstract class MultipartUploader implements Closeable { - public static final Logger LOG = - LoggerFactory.getLogger(MultipartUploader.class); +public interface MultipartUploader extends Closeable { - /** - * Perform any cleanup. - * The upload is not required to support any operations after this. - * @throws IOException problems on close. - */ - @Override - public void close() throws IOException { - } /** * Initialize a multipart upload. @@ -61,94 +42,64 @@ public void close() throws IOException { * @return unique identifier associating part uploads. * @throws IOException IO failure */ - public abstract UploadHandle initialize(Path filePath) throws IOException; + CompletableFuture startUpload(Path filePath) + throws IOException; /** * Put part as part of a multipart upload. * It is possible to have parts uploaded in any order (or in parallel). - * @param filePath Target path for upload (same as {@link #initialize(Path)}). + * @param uploadId Identifier from {@link #startUpload(Path)}. + * @param partNumber Index of the part relative to others. + * @param filePath Target path for upload (as {@link #startUpload(Path)}). * @param inputStream Data for this part. Implementations MUST close this * stream after reading in the data. - * @param partNumber Index of the part relative to others. - * @param uploadId Identifier from {@link #initialize(Path)}. * @param lengthInBytes Target length to read from the stream. * @return unique PartHandle identifier for the uploaded part. * @throws IOException IO failure */ - public abstract PartHandle putPart(Path filePath, InputStream inputStream, - int partNumber, UploadHandle uploadId, long lengthInBytes) + CompletableFuture putPart( + UploadHandle uploadId, + int partNumber, + Path filePath, + InputStream inputStream, + long lengthInBytes) throws IOException; /** * Complete a multipart upload. - * @param filePath Target path for upload (same as {@link #initialize(Path)}. + * @param uploadId Identifier from {@link #startUpload(Path)}. + * @param filePath Target path for upload (as {@link #startUpload(Path)}. * @param handles non-empty map of part number to part handle. - * from {@link #putPart(Path, InputStream, int, UploadHandle, long)}. - * @param multipartUploadId Identifier from {@link #initialize(Path)}. + * from {@link #putPart(UploadHandle, int, Path, InputStream, long)}. * @return unique PathHandle identifier for the uploaded file. * @throws IOException IO failure */ - public abstract PathHandle complete(Path filePath, - Map handles, - UploadHandle multipartUploadId) + CompletableFuture complete( + UploadHandle uploadId, + Path filePath, + Map handles) throws IOException; /** * Aborts a multipart upload. - * @param filePath Target path for upload (same as {@link #initialize(Path)}. - * @param multipartUploadId Identifier from {@link #initialize(Path)}. + * @param uploadId Identifier from {@link #startUpload(Path)}. + * @param filePath Target path for upload (same as {@link #startUpload(Path)}. * @throws IOException IO failure + * @return a future; the operation will have completed */ - public abstract void abort(Path filePath, UploadHandle multipartUploadId) + CompletableFuture abort(UploadHandle uploadId, Path filePath) throws IOException; /** - * Utility method to validate uploadIDs. - * @param uploadId Upload ID - * @throws IllegalArgumentException invalid ID - */ - protected void checkUploadId(byte[] uploadId) - throws IllegalArgumentException { - checkArgument(uploadId != null, "null uploadId"); - checkArgument(uploadId.length > 0, - "Empty UploadId is not valid"); - } - - /** - * Utility method to validate partHandles. - * @param partHandles handles - * @throws IllegalArgumentException if the parts are invalid + * Best effort attempt to aborts multipart uploads under a path. + * Not all implementations support this, and those which do may + * be vulnerable to eventually consistent listings of current uploads + * -some may be missed. + * @param path path to abort uploads under. + * @return a future to the number of entries aborted; + * -1 if aborting is unsupported + * @throws IOException IO failure */ - protected void checkPartHandles(Map partHandles) { - checkArgument(!partHandles.isEmpty(), - "Empty upload"); - partHandles.keySet() - .stream() - .forEach(key -> - checkArgument(key > 0, - "Invalid part handle index %s", key)); - } + CompletableFuture abortUploadsUnderPath(Path path) throws IOException; - /** - * Check all the arguments to the - * {@link #putPart(Path, InputStream, int, UploadHandle, long)} operation. - * @param filePath Target path for upload (same as {@link #initialize(Path)}). - * @param inputStream Data for this part. Implementations MUST close this - * stream after reading in the data. - * @param partNumber Index of the part relative to others. - * @param uploadId Identifier from {@link #initialize(Path)}. - * @param lengthInBytes Target length to read from the stream. - * @throws IllegalArgumentException invalid argument - */ - protected void checkPutArguments(Path filePath, - InputStream inputStream, - int partNumber, - UploadHandle uploadId, - long lengthInBytes) throws IllegalArgumentException { - checkArgument(filePath != null, "null filePath"); - checkArgument(inputStream != null, "null inputStream"); - checkArgument(partNumber > 0, "Invalid part number: %d", partNumber); - checkArgument(uploadId != null, "null uploadId"); - checkArgument(lengthInBytes >= 0, "Invalid part length: %d", lengthInBytes); - } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderBuilder.java new file mode 100644 index 0000000000000..381bfaa07f6d1 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderBuilder.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import javax.annotation.Nonnull; +import java.io.IOException; + +import org.apache.hadoop.fs.permission.FsPermission; + +/** + * Builder interface for Multipart readers. + * @param + * @param + */ +public interface MultipartUploaderBuilder> + extends FSBuilder { + + /** + * Set permission for the file. + */ + B permission(@Nonnull FsPermission perm); + + /** + * Set the size of the buffer to be used. + */ + B bufferSize(int bufSize); + + /** + * Set replication factor. + */ + B replication(short replica); + + /** + * Set block size. + */ + B blockSize(long blkSize); + + /** + * Create an FSDataOutputStream at the specified path. + */ + B create(); + + /** + * Set to true to overwrite the existing file. + * Set it to false, an exception will be thrown when calling {@link #build()} + * if the file exists. + */ + B overwrite(boolean overwrite); + + /** + * Append to an existing file (optional operation). + */ + B append(); + + /** + * Set checksum opt. + */ + B checksumOpt(@Nonnull Options.ChecksumOpt chksumOpt); + + /** + * Create the FSDataOutputStream to write on the file system. + * + * @throws IllegalArgumentException if the parameters are not valid. + * @throws IOException on errors when file system creates or appends the file. + */ + S build() throws IllegalArgumentException, IOException; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java deleted file mode 100644 index e35b6bf18bbd6..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Iterator; -import java.util.ServiceLoader; - -/** - * {@link ServiceLoader}-driven uploader API for storage services supporting - * multipart uploads. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public abstract class MultipartUploaderFactory { - public static final Logger LOG = - LoggerFactory.getLogger(MultipartUploaderFactory.class); - - /** - * Multipart Uploaders listed as services. - */ - private static ServiceLoader serviceLoader = - ServiceLoader.load(MultipartUploaderFactory.class, - MultipartUploaderFactory.class.getClassLoader()); - - // Iterate through the serviceLoader to avoid lazy loading. - // Lazy loading would require synchronization in concurrent use cases. - static { - Iterator iterServices = serviceLoader.iterator(); - while (iterServices.hasNext()) { - iterServices.next(); - } - } - - /** - * Get the multipart loader for a specific filesystem. - * @param fs filesystem - * @param conf configuration - * @return an uploader, or null if one was found. - * @throws IOException failure during the creation process. - */ - public static MultipartUploader get(FileSystem fs, Configuration conf) - throws IOException { - MultipartUploader mpu = null; - for (MultipartUploaderFactory factory : serviceLoader) { - mpu = factory.createMultipartUploader(fs, conf); - if (mpu != null) { - break; - } - } - return mpu; - } - - protected abstract MultipartUploader createMultipartUploader(FileSystem fs, - Configuration conf) throws IOException; -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java new file mode 100644 index 0000000000000..d8b7fe0744087 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.impl; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; + +import com.google.common.base.Preconditions; + +import org.apache.hadoop.fs.MultipartUploader; +import org.apache.hadoop.fs.PartHandle; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.UploadHandle; + +import static com.google.common.base.Preconditions.checkArgument; + +/** + * Standard base class for Multipart Uploaders. + */ +public abstract class AbstractMultipartUploader implements MultipartUploader { + + /** + * Base path of upload. + */ + private final Path basePath; + + /** + * Instantiate. + * @param basePath base path + */ + protected AbstractMultipartUploader(final Path basePath) { + this.basePath = Objects.requireNonNull(basePath, "null path"); + } + + /** + * Perform any cleanup. + * The upload is not required to support any operations after this. + * @throws IOException problems on close. + */ + @Override + public void close() throws IOException { + } + + protected Path getBasePath() { + return basePath; + } + + /** + * Validate a path. + * @param path path to check. + */ + protected void checkPath(Path path) { + Objects.requireNonNull(path, "null path"); + Preconditions.checkArgument(path.toString().startsWith(basePath.toString()), + "Path %s is not under %s", path, basePath); + } + + /** + * Utility method to validate uploadIDs. + * @param uploadId Upload ID + * @throws IllegalArgumentException invalid ID + */ + protected void checkUploadId(byte[] uploadId) + throws IllegalArgumentException { + checkArgument(uploadId != null, "null uploadId"); + checkArgument(uploadId.length > 0, + "Empty UploadId is not valid"); + } + + /** + * Utility method to validate partHandles. + * @param partHandles handles + * @throws IllegalArgumentException if the parts are invalid + */ + protected void checkPartHandles(Map partHandles) { + checkArgument(!partHandles.isEmpty(), + "Empty upload"); + partHandles.keySet() + .stream() + .forEach(key -> + checkArgument(key > 0, + "Invalid part handle index %s", key)); + } + + /** + * Check all the arguments to the + * {@link MultipartUploader#putPart(UploadHandle, int, Path, InputStream, long)} + * operation. + * @param filePath Target path for upload (as {@link #startUpload(Path)}). + * @param inputStream Data for this part. Implementations MUST close this + * stream after reading in the data. + * @param partNumber Index of the part relative to others. + * @param uploadId Identifier from {@link #startUpload(Path)}. + * @param lengthInBytes Target length to read from the stream. + * @throws IllegalArgumentException invalid argument + */ + protected void checkPutArguments(Path filePath, + InputStream inputStream, + int partNumber, + UploadHandle uploadId, + long lengthInBytes) throws IllegalArgumentException { + checkPath(filePath); + checkArgument(inputStream != null, "null inputStream"); + checkArgument(partNumber > 0, "Invalid part number: %d", partNumber); + checkArgument(uploadId != null, "null uploadId"); + checkArgument(lengthInBytes >= 0, "Invalid part length: %d", lengthInBytes); + } + + /** + * {@inheritDoc}. + * @param path path to abort uploads under. + * @return a future to -1. + * @throws IOException + */ + public CompletableFuture abortUploadsUnderPath(Path path) + throws IOException { + checkPath(path); + CompletableFuture f = new CompletableFuture<>(); + f.complete(-1); + return f; + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java similarity index 52% rename from hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java index b77c244220a9e..ae0def0e378d4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java @@ -14,24 +14,42 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.fs; + +package org.apache.hadoop.fs.impl; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Comparator; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; +import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.commons.compress.utils.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BBPartHandle; +import org.apache.hadoop.fs.BBUploadHandle; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FSDataOutputStreamBuilder; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.InternalOperations; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.PartHandle; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathHandle; +import org.apache.hadoop.fs.UploadHandle; import org.apache.hadoop.fs.permission.FsPermission; import static org.apache.hadoop.fs.Path.mergePaths; @@ -50,40 +68,82 @@ */ @InterfaceAudience.Private @InterfaceStability.Unstable -public class FileSystemMultipartUploader extends MultipartUploader { +public class FileSystemMultipartUploader extends AbstractMultipartUploader { + + private static final Logger LOG = LoggerFactory.getLogger( + FileSystemMultipartUploader.class); private final FileSystem fs; - public FileSystemMultipartUploader(FileSystem fs) { + private final FileSystemMultipartUploaderBuilder builder; + + private final FsPermission permission; + + private final long blockSize; + + private final Options.ChecksumOpt checksumOpt; + + public FileSystemMultipartUploader( + final FileSystemMultipartUploaderBuilder builder, + FileSystem fs) { + super(builder.getPath()); + this.builder = builder; this.fs = fs; + blockSize = builder.getBlockSize(); + checksumOpt = builder.getChecksumOpt(); + permission = builder.getPermission(); } @Override - public UploadHandle initialize(Path filePath) throws IOException { - Path collectorPath = createCollectorPath(filePath); - fs.mkdirs(collectorPath, FsPermission.getDirDefault()); + public CompletableFuture startUpload(Path filePath) + throws IOException { + checkPath(filePath); + return FutureIOSupport.eval(() -> { + Path collectorPath = createCollectorPath(filePath); + fs.mkdirs(collectorPath, FsPermission.getDirDefault()); - ByteBuffer byteBuffer = ByteBuffer.wrap( - collectorPath.toString().getBytes(Charsets.UTF_8)); - return BBUploadHandle.from(byteBuffer); + ByteBuffer byteBuffer = ByteBuffer.wrap( + collectorPath.toString().getBytes(Charsets.UTF_8)); + return BBUploadHandle.from(byteBuffer); + }); } @Override - public PartHandle putPart(Path filePath, InputStream inputStream, - int partNumber, UploadHandle uploadId, long lengthInBytes) + public CompletableFuture putPart(UploadHandle uploadId, + int partNumber, Path filePath, + InputStream inputStream, + long lengthInBytes) throws IOException { checkPutArguments(filePath, inputStream, partNumber, uploadId, lengthInBytes); + return FutureIOSupport.eval(() -> innerPutPart(filePath, + inputStream, partNumber, uploadId, lengthInBytes)); + } + + private PartHandle innerPutPart(Path filePath, + InputStream inputStream, + int partNumber, + UploadHandle uploadId, + long lengthInBytes) + throws IOException { byte[] uploadIdByteArray = uploadId.toByteArray(); checkUploadId(uploadIdByteArray); Path collectorPath = new Path(new String(uploadIdByteArray, 0, uploadIdByteArray.length, Charsets.UTF_8)); Path partPath = mergePaths(collectorPath, mergePaths(new Path(Path.SEPARATOR), - new Path(Integer.toString(partNumber) + ".part"))); - try(FSDataOutputStream fsDataOutputStream = - fs.createFile(partPath).build()) { - IOUtils.copy(inputStream, fsDataOutputStream, 4096); + new Path(partNumber + ".part"))); + final FSDataOutputStreamBuilder fileBuilder = fs.createFile(partPath); + if (checksumOpt != null) { + fileBuilder.checksumOpt(checksumOpt); + } + if (permission != null) { + fileBuilder.permission(permission); + } + try (FSDataOutputStream fsDataOutputStream = + fileBuilder.blockSize(blockSize).build()) { + IOUtils.copy(inputStream, fsDataOutputStream, + this.builder.getBufferSize()); } finally { cleanupWithLogger(LOG, inputStream); } @@ -106,16 +166,36 @@ private PathHandle getPathHandle(Path filePath) throws IOException { private long totalPartsLen(List partHandles) throws IOException { long totalLen = 0; - for (Path p: partHandles) { + for (Path p : partHandles) { totalLen += fs.getFileStatus(p).getLen(); } return totalLen; } @Override - @SuppressWarnings("deprecation") // rename w/ OVERWRITE - public PathHandle complete(Path filePath, Map handleMap, - UploadHandle multipartUploadId) throws IOException { + public CompletableFuture complete( + UploadHandle uploadId, + Path filePath, + Map handleMap) throws IOException { + + checkPath(filePath); + return FutureIOSupport.eval(() -> + innerComplete(uploadId, filePath, handleMap)); + } + + /** + * The upload complete operation. + * @param multipartUploadId the ID of the upload + * @param filePath path + * @param handleMap map of handles + * @return the path handle + * @throws IOException failure + */ + private PathHandle innerComplete( + UploadHandle multipartUploadId, Path filePath, + Map handleMap) throws IOException { + + checkPath(filePath); checkUploadId(multipartUploadId.toByteArray()); @@ -133,6 +213,13 @@ public PathHandle complete(Path filePath, Map handleMap, }) .collect(Collectors.toList()); + int count = partHandles.size(); + // built up to identify duplicates -if the size of this set is + // below that of the number of parts, then there's a duplicate entry. + Set values = new HashSet<>(count); + values.addAll(partHandles); + Preconditions.checkArgument(values.size() == count, + "Duplicate PartHandles"); byte[] uploadIdByteArray = multipartUploadId.toByteArray(); Path collectorPath = new Path(new String(uploadIdByteArray, 0, uploadIdByteArray.length, Charsets.UTF_8)); @@ -146,35 +233,30 @@ public PathHandle complete(Path filePath, Map handleMap, fs.create(filePathInsideCollector).close(); fs.concat(filePathInsideCollector, partHandles.toArray(new Path[handles.size()])); - fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE); + new InternalOperations() + .rename(fs, filePathInsideCollector, filePath, + Options.Rename.OVERWRITE); } fs.delete(collectorPath, true); return getPathHandle(filePath); } @Override - public void abort(Path filePath, UploadHandle uploadId) throws IOException { + public CompletableFuture abort(UploadHandle uploadId, + Path filePath) + throws IOException { + checkPath(filePath); byte[] uploadIdByteArray = uploadId.toByteArray(); checkUploadId(uploadIdByteArray); Path collectorPath = new Path(new String(uploadIdByteArray, 0, uploadIdByteArray.length, Charsets.UTF_8)); - // force a check for a file existing; raises FNFE if not found - fs.getFileStatus(collectorPath); - fs.delete(collectorPath, true); - } - - /** - * Factory for creating MultipartUploaderFactory objects for file:// - * filesystems. - */ - public static class Factory extends MultipartUploaderFactory { - protected MultipartUploader createMultipartUploader(FileSystem fs, - Configuration conf) { - if (fs.getScheme().equals("file")) { - return new FileSystemMultipartUploader(fs); - } + return FutureIOSupport.eval(() -> { + // force a check for a file existing; raises FNFE if not found + fs.getFileStatus(collectorPath); + fs.delete(collectorPath, true); return null; - } + }); } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploaderBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploaderBuilder.java new file mode 100644 index 0000000000000..7c4d995c69d1b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploaderBuilder.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.impl; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.EnumSet; + +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; + +/** + * Builder for {@link FileSystemMultipartUploader}. + */ +public class FileSystemMultipartUploaderBuilder extends + MultipartUploaderBuilderImpl { + + public FileSystemMultipartUploaderBuilder( + @Nonnull final FileSystem fileSystem, + @Nonnull final Path path) { + super(fileSystem, path); + } + + @Override + public FileSystemMultipartUploaderBuilder getThisBuilder() { + return this; + } + + @Override + public FileSystemMultipartUploader build() + throws IllegalArgumentException, IOException { + return new FileSystemMultipartUploader(this, getFS()); + } + + @Override + public FileSystem getFS() { + return super.getFS(); + } + + @Override + public FsPermission getPermission() { + return super.getPermission(); + } + + @Override + public int getBufferSize() { + return super.getBufferSize(); + } + + @Override + public short getReplication() { + return super.getReplication(); + } + + @Override + public EnumSet getFlags() { + return super.getFlags(); + } + + @Override + public Options.ChecksumOpt getChecksumOpt() { + return super.getChecksumOpt(); + } + + @Override + protected long getBlockSize() { + return super.getBlockSize(); + } + + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java index 26856e5b935e0..f13d701803d7e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.InterruptedIOException; import java.util.Map; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; @@ -52,7 +53,7 @@ private FutureIOSupport() { * @throws IOException if something went wrong * @throws RuntimeException any nested RTE thrown */ - public static T awaitFuture(final Future future) + public static T awaitFuture(final Future future) throws InterruptedIOException, IOException, RuntimeException { try { return future.get(); @@ -224,4 +225,29 @@ public static void propagateOptions( } } } + + /** + * Evaluate a CallableRaisingIOE in the current thread, + * converting IOEs to RTEs and propagating. + * @param callable callable to invoke + * @param Return type. + * @return the evaluated result. + * @throws UnsupportedOperationException fail fast if unsupported + * @throws IllegalArgumentException invalid argument + */ + public static CompletableFuture eval( + FunctionsRaisingIOE.CallableRaisingIOE callable) { + CompletableFuture result = new CompletableFuture<>(); + try { + result.complete(callable.apply()); + } catch (UnsupportedOperationException | IllegalArgumentException tx) { + // fail fast here + throw tx; + } catch (Throwable tx) { + // fail lazily here to ensure callers expect all File IO operations to + // surface later + result.completeExceptionally(tx); + } + return result; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java new file mode 100644 index 0000000000000..6c3336e6882b3 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java @@ -0,0 +1,215 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.impl; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.EnumSet; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.MultipartUploader; +import org.apache.hadoop.fs.MultipartUploaderBuilder; +import org.apache.hadoop.fs.Options.ChecksumOpt; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; + +import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; + +/** + * Builder for {@link MultipartUploader} implementations. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class MultipartUploaderBuilderImpl + > + extends AbstractFSBuilderImpl + implements MultipartUploaderBuilder { + + private final FileSystem fs; + + private FsPermission permission; + + private int bufferSize; + + private short replication; + + private long blockSize; + + private final EnumSet flags = EnumSet.noneOf(CreateFlag.class); + + private ChecksumOpt checksumOpt; + + /** + * Return the concrete implementation of the builder instance. + */ + public abstract B getThisBuilder(); + + /** + * Construct from a {@link FileContext}. + * + * @param fc FileContext + * @param p path. + * @throws IOException failure + */ + protected MultipartUploaderBuilderImpl(@Nonnull FileContext fc, + @Nonnull Path p) throws IOException { + super(checkNotNull(p)); + checkNotNull(fc); + this.fs = null; + + FsServerDefaults defaults = fc.getServerDefaults(p); + bufferSize = defaults.getFileBufferSize(); + replication = defaults.getReplication(); + blockSize = defaults.getBlockSize(); + } + + /** + * Constructor. + */ + protected MultipartUploaderBuilderImpl(@Nonnull FileSystem fileSystem, + @Nonnull Path p) { + super(fileSystem.makeQualified(checkNotNull(p))); + checkNotNull(fileSystem); + fs = fileSystem; + bufferSize = fs.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, + IO_FILE_BUFFER_SIZE_DEFAULT); + replication = fs.getDefaultReplication(p); + blockSize = fs.getDefaultBlockSize(p); + } + + protected FileSystem getFS() { + checkNotNull(fs); + return fs; + } + + protected FsPermission getPermission() { + if (permission == null) { + permission = FsPermission.getFileDefault(); + } + return permission; + } + + /** + * Set permission for the file. + */ + @Override + public B permission(@Nonnull final FsPermission perm) { + checkNotNull(perm); + permission = perm; + return getThisBuilder(); + } + + protected int getBufferSize() { + return bufferSize; + } + + /** + * Set the size of the buffer to be used. + */ + @Override + public B bufferSize(int bufSize) { + bufferSize = bufSize; + return getThisBuilder(); + } + + protected short getReplication() { + return replication; + } + + /** + * Set replication factor. + */ + @Override + public B replication(short replica) { + replication = replica; + return getThisBuilder(); + } + + protected long getBlockSize() { + return blockSize; + } + + /** + * Set block size. + */ + @Override + public B blockSize(long blkSize) { + blockSize = blkSize; + return getThisBuilder(); + } + + protected EnumSet getFlags() { + return flags; + } + + /** + * Create an FSDataOutputStream at the specified path. + */ + @Override + public B create() { + flags.add(CreateFlag.CREATE); + return getThisBuilder(); + } + + /** + * Set to true to overwrite the existing file. + * Set it to false, an exception will be thrown when calling {@link #build()} + * if the file exists. + */ + @Override + public B overwrite(boolean overwrite) { + if (overwrite) { + flags.add(CreateFlag.OVERWRITE); + } else { + flags.remove(CreateFlag.OVERWRITE); + } + return getThisBuilder(); + } + + /** + * Append to an existing file (optional operation). + */ + @Override + public B append() { + flags.add(CreateFlag.APPEND); + return getThisBuilder(); + } + + protected ChecksumOpt getChecksumOpt() { + return checksumOpt; + } + + /** + * Set checksum opt. + */ + @Override + public B checksumOpt(@Nonnull final ChecksumOpt chksumOpt) { + checkNotNull(chksumOpt); + checksumOpt = chksumOpt; + return getThisBuilder(); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory deleted file mode 100644 index f0054fedb8e1c..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.fs.FileSystemMultipartUploader$Factory diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/multipartuploader.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/multipartuploader.md index 629c0c418fdf2..906c592eea09d 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/multipartuploader.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/multipartuploader.md @@ -14,14 +14,14 @@ - + -# class `org.apache.hadoop.fs.MultipartUploader` +# interface `org.apache.hadoop.fs.MultipartUploader` -The abstract `MultipartUploader` class is the original class to upload a file +The `MultipartUploader` can upload a file using multiple parts to Hadoop-supported filesystems. The benefits of a multipart upload is that the file can be uploaded from multiple clients or processes in parallel and the results will not be visible to other clients until @@ -30,13 +30,12 @@ the `complete` function is called. When implemented by an object store, uploaded data may incur storage charges, even before it is visible in the filesystems. Users of this API must be diligent and always perform best-effort attempts to complete or abort the upload. +The `abortUploadsUnderPath(path)` operation can help here. ## Invariants -All the requirements of a valid MultipartUploader are considered implicit +All the requirements of a valid `MultipartUploader` are considered implicit econditions and postconditions: -all operations on a valid MultipartUploader MUST result in a new -MultipartUploader that is also valid. The operations of a single multipart upload may take place across different instance of a multipart uploader, across different processes and hosts. @@ -45,16 +44,28 @@ It is therefore a requirement that: 1. All state needed to upload a part, complete an upload or abort an upload must be contained within or retrievable from an upload handle. -1. If an upload handle is marshalled to another process, then, if the -receiving process has the correct permissions, it may participate in the -upload, by uploading one or more parts, by completing an upload, and/or by -aborting the upload. +1. That handle MUST be serializable; it MUST be deserializable to different +processes executing the exact same version of Hadoop. + +1. different hosts/processes MAY upload different parts, sequentially or +simultaneously. The order in which they are uploaded to the filesystem +MUST NOT constrain the order in which the data is stored in the final file. + +1. An upload MAY be completed on a different instance than any which uploaded +parts. + +1. The output of an upload MUST NOT be visible at the final destination +until the upload may complete. + +1. It is not an error if a single multipart uploader instance initiates +or completes multiple uploads files to the same destination sequentially, +irrespective of whether or not the store supports concurrent uploads. ## Concurrency Multiple processes may upload parts of a multipart upload simultaneously. -If a call is made to `initialize(path)` to a destination where an active +If a call is made to `startUpload(path)` to a destination where an active upload is in progress, implementations MUST perform one of the two operations. * Reject the call as a duplicate. @@ -70,9 +81,17 @@ the in-progress upload, if it has not completed, must not be included in the final file, in whole or in part. Implementations SHOULD raise an error in the `putPart()` operation. +# Serialization Compatibility + +Users MUST NOT expect that serialized PathHandle versions are compatible across +* different multipart uploader implementations. +* different versions of the same implementation. + +That is: all clients MUST use the exact same version of Hadoop. + ## Model -A File System which supports Multipart Uploads extends the existing model +A FileSystem/FileContext which supports Multipart Uploads extends the existing model `(Directories, Files, Symlinks)` to one of `(Directories, Files, Symlinks, Uploads)` `Uploads` of type `Map[UploadHandle -> Map[PartHandle -> UploadPart]`. @@ -112,11 +131,40 @@ However, if Part Handles are rapidly recycled, there is a risk that the nominall idempotent operation `abort(FS, uploadHandle)` could unintentionally cancel a successor operation which used the same Upload Handle. +## Asynchronous API + +All operations return `CompletableFuture<>` types which must be +subsequently evaluated to get their return values. + +1. The execution of the operation MAY be a blocking operation in on the call thread. +1. If not, it SHALL be executed in a separate thread and MUST complete by the time the +future evaluation returns. +1. Some/All preconditions MAY be evaluated at the time of initial invocation, +1. All those which are not evaluated at that time, MUST Be evaluated during the execution +of the future. + + +What this means is that when an implementation interacts with a fast file system/store all preconditions +including the existence of files MAY be evaluated early, whereas and implementation interacting with a +remote object store whose probes are slow MAY verify preconditions in the asynchronous phase -especially +those which interact with the remote store. + +Java CompletableFutures do not work well with checked exceptions. The Hadoop codease is still evolving the +details of the exception handling here, as more use is made of the asynchronous APIs. Assume that any +precondition failure which declares that an `IOException` MUST be raised may have that operation wrapped in a +`RuntimeException` of some form if evaluated in the future; this also holds for any other `IOException` +raised during the operations. + +### `close()` + +Applications MUST call `close()` after using an uploader; this is so it may release other +objects, update statistics, etc. + ## State Changing Operations -### `UploadHandle initialize(Path path)` +### `CompletableFuture startUpload(Path)` -Initialized a Multipart Upload, returning an upload handle for use in +Starts a Multipart Upload, ultimately returning an `UploadHandle` for use in subsequent operations. #### Preconditions @@ -128,17 +176,15 @@ if exists(FS, path) and not isFile(FS, path) raise PathIsDirectoryException, IOE ``` If a filesystem does not support concurrent uploads to a destination, -then the following precondition is added +then the following precondition is added: ```python if path in values(FS.Uploads) raise PathExistsException, IOException - ``` - #### Postconditions -The outcome of this operation is that the filesystem state is updated with a new +Once the initialization operation completes, the filesystem state is updated with a new active upload, with a new handle, this handle being returned to the caller. ```python @@ -147,9 +193,10 @@ FS' = FS where FS'.Uploads(handle') == {} result = handle' ``` -### `PartHandle putPart(Path path, InputStream inputStream, int partNumber, UploadHandle uploadHandle, long lengthInBytes)` +### `CompletableFuture putPart(UploadHandle uploadHandle, int partNumber, Path filePath, InputStream inputStream, long lengthInBytes)` -Upload a part for the multipart upload. +Upload a part for the specific multipart upload, eventually being returned an opaque part handle +represting this part of the specified upload. #### Preconditions @@ -170,10 +217,12 @@ FS' = FS where FS'.uploads(uploadHandle).parts(partHandle') == data' result = partHandle' ``` -The data is stored in the filesystem, pending completion. +The data is stored in the filesystem, pending completion. It MUST NOT be visible at the destination path. +It MAY be visible in a temporary path somewhere in the file system; +This is implementation-specific and MUST NOT be relied upon. -### `PathHandle complete(Path path, Map parts, UploadHandle multipartUploadId)` +### ` CompletableFuture complete(UploadHandle uploadId, Path filePath, Map handles)` Complete the multipart upload. @@ -188,11 +237,23 @@ uploadHandle in keys(FS.Uploads) else raise FileNotFoundException FS.Uploads(uploadHandle).path == path if exists(FS, path) and not isFile(FS, path) raise PathIsDirectoryException, IOException parts.size() > 0 +forall k in keys(parts): k > 0 +forall k in keys(parts): + not exists(k2 in keys(parts)) where (parts[k] == parts[k2]) ``` -If there are handles in the MPU which aren't included in the map, then the omitted -parts will not be a part of the resulting file. It is up to the implementation -of the MultipartUploader to make sure the leftover parts are cleaned up. +All keys MUST be greater than zero, and there MUST not be any duplicate +references to the same parthandle. +These validations MAY be performed at any point during the operation. +After a failure, there is no guarantee that a `complete()` call for this +upload with a valid map of paths will complete. +Callers SHOULD invoke `abort()` after any such failure to ensure cleanup. + +if `putPart()` operations For this `uploadHandle` were performed But whose +`PathHandle` Handles were not included in this request -the omitted +parts SHALL NOT be a part of the resulting file. + +The MultipartUploader MUST clean up any such outstanding entries. In the case of backing stores that support directories (local filesystem, HDFS, etc), if, at the point of completion, there is now a directory at the @@ -206,14 +267,14 @@ exists(FS', path') and result = PathHandle(path') FS' = FS where FS.Files(path) == UploadData' and not uploadHandle in keys(FS'.uploads) ``` -The PathHandle is returned by the complete operation so subsequent operations +The `PathHandle` is returned by the complete operation so subsequent operations will be able to identify that the data has not changed in the meantime. The order of parts in the uploaded by file is that of the natural order of -parts: part 1 is ahead of part 2, etc. +parts in the map: part 1 is ahead of part 2, etc. -### `void abort(Path path, UploadHandle multipartUploadId)` +### `CompletableFuture abort(UploadHandle uploadId, Path filePath)` Abort a multipart upload. The handle becomes invalid and not subject to reuse. @@ -233,3 +294,23 @@ FS' = FS where not uploadHandle in keys(FS'.uploads) ``` A subsequent call to `abort()` with the same handle will fail, unless the handle has been recycled. + +### `CompletableFuture abortUploadsUnderPath(Path path)` + +Perform a best-effort cleanup of all uploads under a path. + +returns a future which resolves to. + + -1 if unsuppported + >= 0 if supported + +Because it is best effort a strict postcondition isn't possible. +The ideal postcondition is all uploads under the path are aborted, +and the count is the number of uploads aborted: + +```python +FS'.uploads forall upload in FS.uploads: + not isDescendant(FS, path, upload.path) +return len(forall upload in FS.uploads: + isDescendant(FS, path, upload.path)) +``` diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java index f0057a6c6d902..6cd450610b390 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java @@ -137,6 +137,12 @@ public Token[] addDelegationTokens(String renewer, Credentials creds) void setQuota(Path f, long namespaceQuota, long storagespaceQuota); void setQuotaByStorageType(Path f, StorageType type, long quota); StorageStatistics getStorageStatistics(); + + /* + Not passed through as the inner implementation will miss features + of the filter such as checksums. + */ + MultipartUploaderBuilder createMultipartUploader(Path basePath); } @Test @@ -278,6 +284,23 @@ public void testRenameOptions() throws Exception { verify(mockFs).rename(eq(src), eq(dst), eq(opt)); } + /** + * Verify that filterFS always returns false, even if local/rawlocal + * ever implement multipart uploads. + */ + @Test + public void testFilterPathCapabilites() throws Exception { + try (FilterFileSystem flfs = new FilterLocalFileSystem()) { + flfs.initialize(URI.create("filter:/"), conf); + Path src = new Path("/src"); + assertFalse( + "hasPathCapability(FS_MULTIPART_UPLOADER) should have failed for " + + flfs, + flfs.hasPathCapability(src, + CommonPathCapabilities.FS_MULTIPART_UPLOADER)); + } + } + private void checkInit(FilterFileSystem fs, boolean expectInit) throws Exception { URI uri = URI.create("filter:/"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java index 2097633839112..8050ce6b4427d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java @@ -248,6 +248,9 @@ CompletableFuture openFileWithOptions( CompletableFuture openFileWithOptions( Path path, OpenFileParameters parameters) throws IOException; + + MultipartUploaderBuilder createMultipartUploader(Path basePath) + throws IOException; } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java index 7a8f0830eda37..31926964c897c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java @@ -26,8 +26,10 @@ import java.util.HashMap; import java.util.Map; import java.util.Random; +import java.util.concurrent.CompletableFuture; import com.google.common.base.Charsets; +import org.assertj.core.api.Assertions; import org.junit.Assume; import org.junit.Test; import org.slf4j.Logger; @@ -35,22 +37,31 @@ import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.io.IOUtils; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BBUploadHandle; +import org.apache.hadoop.fs.CommonPathCapabilities; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.MultipartUploader; -import org.apache.hadoop.fs.MultipartUploaderFactory; import org.apache.hadoop.fs.PartHandle; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathHandle; import org.apache.hadoop.fs.UploadHandle; +import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.DurationInfo; import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyPathExists; +import static org.apache.hadoop.fs.impl.FutureIOSupport.awaitFuture; import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; import static org.apache.hadoop.test.LambdaTestUtils.eventually; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +/** + * Tests of multipart uploads. + *

+ * Note: some of the tests get a random uploader between + * the two which are available. If tests fail intermittently, + * it may be because different uploaders are being selected. + */ public abstract class AbstractContractMultipartUploaderTest extends AbstractFSContractTestBase { @@ -63,36 +74,44 @@ public abstract class AbstractContractMultipartUploaderTest extends */ protected static final int SMALL_FILE = 100; - private MultipartUploader mpu; - private MultipartUploader mpu2; + protected static final int CONSISTENCY_INTERVAL = 1000; + + private MultipartUploader uploader0; + private MultipartUploader uploader1; private final Random random = new Random(); private UploadHandle activeUpload; private Path activeUploadPath; - protected String getMethodName() { - return methodName.getMethodName(); - } - @Override public void setup() throws Exception { super.setup(); - Configuration conf = getContract().getConf(); - mpu = MultipartUploaderFactory.get(getFileSystem(), conf); - mpu2 = MultipartUploaderFactory.get(getFileSystem(), conf); + + final FileSystem fs = getFileSystem(); + Path testPath = getContract().getTestPath(); + uploader0 = fs.createMultipartUploader(testPath).build(); + uploader1 = fs.createMultipartUploader(testPath).build(); } @Override public void teardown() throws Exception { - if (mpu!= null && activeUpload != null) { + MultipartUploader uploader = getUploader(1); + if (uploader != null) { + if (activeUpload != null) { + abortUploadQuietly(activeUpload, activeUploadPath); + } try { - mpu.abort(activeUploadPath, activeUpload); - } catch (FileNotFoundException ignored) { - /* this is fine */ + // round off with an abort of all uploads + Path teardown = getContract().getTestPath(); + LOG.info("Teardown: aborting outstanding uploads under {}", teardown); + CompletableFuture f + = uploader.abortUploadsUnderPath(teardown); + f.get(); } catch (Exception e) { - LOG.info("in teardown", e); + LOG.warn("Exeception in teardown", e); } } - cleanupWithLogger(LOG, mpu, mpu2); + + cleanupWithLogger(LOG, uploader0, uploader1); super.teardown(); } @@ -192,16 +211,16 @@ protected int timeToBecomeConsistentMillis() { * @param index index of upload * @return an uploader */ - protected MultipartUploader mpu(int index) { - return (index % 2 == 0) ? mpu : mpu2; + protected MultipartUploader getUploader(int index) { + return (index % 2 == 0) ? uploader0 : uploader1; } /** * Pick a multipart uploader at random. * @return an uploader */ - protected MultipartUploader randomMpu() { - return mpu(random.nextInt(10)); + protected MultipartUploader getRandomUploader() { + return getUploader(random.nextInt(10)); } /** @@ -211,39 +230,71 @@ protected MultipartUploader randomMpu() { @Test public void testSingleUpload() throws Exception { Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); MessageDigest origDigest = DigestUtils.getMd5Digest(); int size = SMALL_FILE; byte[] payload = generatePayload(1, size); origDigest.update(payload); + // use a single uploader + // note: the same is used here as it found a bug in the S3Guard + // DDB bulk operation state upload -the previous operation had + // added an entry to the ongoing state; this second call + // was interpreted as an inconsistent write. + MultipartUploader completer = uploader0; + // and upload with uploader 1 to validate cross-uploader uploads PartHandle partHandle = putPart(file, uploadHandle, 1, payload); partHandles.put(1, partHandle); - PathHandle fd = completeUpload(file, uploadHandle, partHandles, - origDigest, - size); + PathHandle fd = complete(completer, uploadHandle, file, + partHandles); + + validateUpload(file, origDigest, size); + // verify that if the implementation processes data immediately + // then a second attempt at the upload will fail. if (finalizeConsumesUploadIdImmediately()) { intercept(FileNotFoundException.class, - () -> mpu.complete(file, partHandles, uploadHandle)); + () -> complete(completer, uploadHandle, file, partHandles)); } else { - PathHandle fd2 = mpu.complete(file, partHandles, uploadHandle); + // otherwise, the same or other uploader can try again. + PathHandle fd2 = complete(completer, uploadHandle, file, partHandles); assertArrayEquals("Path handles differ", fd.toByteArray(), fd2.toByteArray()); } } /** - * Initialize an upload. + * Complete IO for a specific uploader; await the response. + * @param uploader uploader + * @param uploadHandle Identifier + * @param file Target path for upload + * @param partHandles handles map of part number to part handle + * @return unique PathHandle identifier for the uploaded file. + */ + protected PathHandle complete( + final MultipartUploader uploader, + final UploadHandle uploadHandle, + final Path file, + final Map partHandles) + throws IOException { + try (DurationInfo d = + new DurationInfo(LOG, "Complete upload to %s", file)) { + return awaitFuture( + uploader.complete(uploadHandle, file, partHandles)); + } + } + + /** + * start an upload. * This saves the path and upload handle as the active * upload, for aborting in teardown * @param dest destination * @return the handle * @throws IOException failure to initialize */ - protected UploadHandle initializeUpload(final Path dest) throws IOException { + protected UploadHandle startUpload(final Path dest) throws IOException { activeUploadPath = dest; - activeUpload = randomMpu().initialize(dest); + activeUpload = awaitFuture(getRandomUploader().startUpload(dest)); return activeUpload; } @@ -283,12 +334,17 @@ protected PartHandle putPart(final Path file, final int index, final byte[] payload) throws IOException { ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); - PartHandle partHandle = mpu(index) - .putPart(file, - new ByteArrayInputStream(payload), - index, - uploadHandle, - payload.length); + PartHandle partHandle; + try (DurationInfo d = + new DurationInfo(LOG, "Put part %d (size %s) %s", + index, + payload.length, + file)) { + partHandle = awaitFuture(getUploader(index) + .putPart(uploadHandle, index, file, + new ByteArrayInputStream(payload), + payload.length)); + } timer.end("Uploaded part %s", index); LOG.info("Upload bandwidth {} MB/s", timer.bandwidthDescription(payload.length)); @@ -296,7 +352,7 @@ protected PartHandle putPart(final Path file, } /** - * Complete an upload with the active MPU instance. + * Complete an upload with a random uploader. * @param file destination * @param uploadHandle handle * @param partHandles map of handles @@ -312,36 +368,64 @@ private PathHandle completeUpload(final Path file, final int expectedLength) throws IOException { PathHandle fd = complete(file, uploadHandle, partHandles); - FileStatus status = verifyPathExists(getFileSystem(), - "Completed file", file); - assertEquals("length of " + status, - expectedLength, status.getLen()); + validateUpload(file, origDigest, expectedLength); + return fd; + } + + /** + * Complete an upload with a random uploader. + * @param file destination + * @param origDigest digest of source data (may be null) + * @param expectedLength expected length of result. + * @throws IOException IO failure + */ + private void validateUpload(final Path file, + final MessageDigest origDigest, + final int expectedLength) throws IOException { + verifyPathExists(getFileSystem(), + "Completed file", file); + verifyFileLength(file, expectedLength); if (origDigest != null) { verifyContents(file, origDigest, expectedLength); } - return fd; } /** * Verify the contents of a file. * @param file path * @param origDigest digest - * @param expectedLength expected length (for logging B/W) + * @param expectedLength expected length (for logging download bandwidth) * @throws IOException IO failure */ protected void verifyContents(final Path file, final MessageDigest origDigest, final int expectedLength) throws IOException { ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer(); - assertArrayEquals("digest of source and " + file - + " differ", - origDigest.digest(), digest(file)); + Assertions.assertThat(digest(file)) + .describedAs("digest of uploaded file %s", file) + .isEqualTo(origDigest.digest()); timer2.end("Completed digest", file); LOG.info("Download bandwidth {} MB/s", timer2.bandwidthDescription(expectedLength)); } + /** + * Verify the length of a file. + * @param file path + * @param expectedLength expected length + * @throws IOException IO failure + */ + private void verifyFileLength(final Path file, final long expectedLength) + throws IOException { + FileStatus st = getFileSystem().getFileStatus(file); + Assertions.assertThat(st) + .describedAs("Uploaded file %s", st) + .matches(FileStatus::isFile) + .extracting(FileStatus::getLen) + .isEqualTo(expectedLength); + } + /** * Perform the inner complete without verification. * @param file destination path @@ -353,21 +437,37 @@ protected void verifyContents(final Path file, private PathHandle complete(final Path file, final UploadHandle uploadHandle, final Map partHandles) throws IOException { - ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); - PathHandle fd = randomMpu().complete(file, partHandles, uploadHandle); - timer.end("Completed upload to %s", file); - return fd; + return complete(getRandomUploader(), uploadHandle, file, + partHandles); } /** * Abort an upload. - * @param file path * @param uploadHandle handle + * @param file path * @throws IOException failure */ - private void abortUpload(final Path file, UploadHandle uploadHandle) + private void abortUpload(UploadHandle uploadHandle, + final Path file) throws IOException { - randomMpu().abort(file, uploadHandle); + try (DurationInfo d = + new DurationInfo(LOG, "Abort upload to %s", file)) { + awaitFuture(getRandomUploader().abort(uploadHandle, file)); + } + } + + /** + * Abort an upload; swallows exceptions. + * @param uploadHandle handle + * @param file path + */ + private void abortUploadQuietly(UploadHandle uploadHandle, Path file) { + try { + abortUpload(uploadHandle, file); + } catch (FileNotFoundException ignored) { + } catch (Exception e) { + LOG.info("aborting {}: {}", file, e.toString()); + } } /** @@ -377,10 +477,10 @@ private void abortUpload(final Path file, UploadHandle uploadHandle) @Test public void testMultipartUpload() throws Exception { Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); MessageDigest origDigest = DigestUtils.getMd5Digest(); - final int payloadCount = getTestPayloadCount(); + int payloadCount = getTestPayloadCount(); for (int i = 1; i <= payloadCount; ++i) { PartHandle partHandle = buildAndPutPart(file, uploadHandle, i, origDigest); @@ -400,16 +500,16 @@ public void testMultipartUploadEmptyPart() throws Exception { FileSystem fs = getFileSystem(); Path file = path("testMultipartUpload"); try (MultipartUploader uploader = - MultipartUploaderFactory.get(fs, null)) { - UploadHandle uploadHandle = uploader.initialize(file); + fs.createMultipartUploader(file).build()) { + UploadHandle uploadHandle = uploader.startUpload(file).get(); Map partHandles = new HashMap<>(); MessageDigest origDigest = DigestUtils.getMd5Digest(); byte[] payload = new byte[0]; origDigest.update(payload); InputStream is = new ByteArrayInputStream(payload); - PartHandle partHandle = uploader.putPart(file, is, 1, uploadHandle, - payload.length); + PartHandle partHandle = awaitFuture( + uploader.putPart(uploadHandle, 1, file, is, payload.length)); partHandles.put(1, partHandle); completeUpload(file, uploadHandle, partHandles, origDigest, 0); } @@ -422,7 +522,7 @@ public void testMultipartUploadEmptyPart() throws Exception { @Test public void testUploadEmptyBlock() throws Exception { Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); partHandles.put(1, putPart(file, uploadHandle, 1, new byte[0])); completeUpload(file, uploadHandle, partHandles, null, 0); @@ -435,10 +535,10 @@ public void testUploadEmptyBlock() throws Exception { @Test public void testMultipartUploadReverseOrder() throws Exception { Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); MessageDigest origDigest = DigestUtils.getMd5Digest(); - final int payloadCount = getTestPayloadCount(); + int payloadCount = getTestPayloadCount(); for (int i = 1; i <= payloadCount; ++i) { byte[] payload = generatePayload(i); origDigest.update(payload); @@ -459,7 +559,7 @@ public void testMultipartUploadReverseOrderNonContiguousPartNumbers() throws Exception { describe("Upload in reverse order and the part numbers are not contiguous"); Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); MessageDigest origDigest = DigestUtils.getMd5Digest(); int payloadCount = 2 * getTestPayloadCount(); for (int i = 2; i <= payloadCount; i += 2) { @@ -482,22 +582,22 @@ public void testMultipartUploadReverseOrderNonContiguousPartNumbers() public void testMultipartUploadAbort() throws Exception { describe("Upload and then abort it before completing"); Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); - int end = 10; + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); for (int i = 12; i > 10; i--) { partHandles.put(i, buildAndPutPart(file, uploadHandle, i, null)); } - abortUpload(file, uploadHandle); + abortUpload(uploadHandle, file); String contents = "ThisIsPart49\n"; int len = contents.getBytes(Charsets.UTF_8).length; InputStream is = IOUtils.toInputStream(contents, "UTF-8"); intercept(IOException.class, - () -> mpu.putPart(file, is, 49, uploadHandle, len)); + () -> awaitFuture( + uploader0.putPart(uploadHandle, 49, file, is, len))); intercept(IOException.class, - () -> mpu.complete(file, partHandles, uploadHandle)); + () -> complete(uploader0, uploadHandle, file, partHandles)); assertPathDoesNotExist("Uploaded file should not exist", file); @@ -505,9 +605,9 @@ public void testMultipartUploadAbort() throws Exception { // consumed by finalization operations (complete, abort). if (finalizeConsumesUploadIdImmediately()) { intercept(FileNotFoundException.class, - () -> abortUpload(file, uploadHandle)); + () -> abortUpload(uploadHandle, file)); } else { - abortUpload(file, uploadHandle); + abortUpload(uploadHandle, file); } } @@ -519,31 +619,55 @@ public void testAbortUnknownUpload() throws Exception { Path file = methodPath(); ByteBuffer byteBuffer = ByteBuffer.wrap( "invalid-handle".getBytes(Charsets.UTF_8)); - UploadHandle uploadHandle = BBUploadHandle.from(byteBuffer); intercept(FileNotFoundException.class, - () -> abortUpload(file, uploadHandle)); + () -> abortUpload(BBUploadHandle.from(byteBuffer), file)); } /** - * Trying to abort with a handle of size 0 must fail. + * Trying to abort an upload with no data does not create a file. */ @Test public void testAbortEmptyUpload() throws Exception { describe("initialize upload and abort before uploading data"); Path file = methodPath(); - abortUpload(file, initializeUpload(file)); + abortUpload(startUpload(file), file); assertPathDoesNotExist("Uploaded file should not exist", file); } + + /** + * Trying to abort an upload with no data does not create a file. + */ + @Test + public void testAbortAllPendingUploads() throws Exception { + describe("initialize upload and abort the pending upload"); + Path path = methodPath(); + Path file = new Path(path, "child"); + UploadHandle upload = startUpload(file); + try { + CompletableFuture oF + = getRandomUploader().abortUploadsUnderPath(path.getParent()); + int abortedUploads = awaitFuture(oF); + if (abortedUploads >= 0) { + // uploads can be aborted + Assertions.assertThat(abortedUploads) + .describedAs("Number of uploads aborted") + .isGreaterThanOrEqualTo(1); + assertPathDoesNotExist("Uploaded file should not exist", file); + } + } finally { + abortUploadQuietly(upload, file); + } + } + /** * Trying to abort with a handle of size 0 must fail. */ @Test public void testAbortEmptyUploadHandle() throws Exception { ByteBuffer byteBuffer = ByteBuffer.wrap(new byte[0]); - UploadHandle uploadHandle = BBUploadHandle.from(byteBuffer); intercept(IllegalArgumentException.class, - () -> abortUpload(methodPath(), uploadHandle)); + () -> abortUpload(BBUploadHandle.from(byteBuffer), methodPath())); } /** @@ -553,10 +677,10 @@ public void testAbortEmptyUploadHandle() throws Exception { public void testCompleteEmptyUpload() throws Exception { describe("Expect an empty MPU to fail, but still be abortable"); Path dest = methodPath(); - UploadHandle handle = initializeUpload(dest); + UploadHandle handle = startUpload(dest); intercept(IllegalArgumentException.class, - () -> mpu.complete(dest, new HashMap<>(), handle)); - abortUpload(dest, handle); + () -> complete(uploader0, handle, dest, new HashMap<>())); + abortUpload(handle, dest); } /** @@ -571,7 +695,7 @@ public void testPutPartEmptyUploadID() throws Exception { byte[] payload = generatePayload(1); InputStream is = new ByteArrayInputStream(payload); intercept(IllegalArgumentException.class, - () -> mpu.putPart(dest, is, 1, emptyHandle, payload.length)); + () -> uploader0.putPart(emptyHandle, 1, dest, is, payload.length)); } /** @@ -581,7 +705,7 @@ public void testPutPartEmptyUploadID() throws Exception { public void testCompleteEmptyUploadID() throws Exception { describe("Expect IllegalArgumentException when complete uploadID is empty"); Path dest = methodPath(); - UploadHandle realHandle = initializeUpload(dest); + UploadHandle realHandle = startUpload(dest); UploadHandle emptyHandle = BBUploadHandle.from(ByteBuffer.wrap(new byte[0])); Map partHandles = new HashMap<>(); @@ -590,14 +714,14 @@ public void testCompleteEmptyUploadID() throws Exception { partHandles.put(1, partHandle); intercept(IllegalArgumentException.class, - () -> mpu.complete(dest, partHandles, emptyHandle)); + () -> complete(uploader0, emptyHandle, dest, partHandles)); // and, while things are setup, attempt to complete with // a part index of 0 partHandles.clear(); partHandles.put(0, partHandle); intercept(IllegalArgumentException.class, - () -> mpu.complete(dest, partHandles, realHandle)); + () -> complete(uploader0, realHandle, dest, partHandles)); } /** @@ -610,7 +734,7 @@ public void testCompleteEmptyUploadID() throws Exception { public void testDirectoryInTheWay() throws Exception { FileSystem fs = getFileSystem(); Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); int size = SMALL_FILE; PartHandle partHandle = putPart(file, uploadHandle, 1, @@ -622,7 +746,7 @@ public void testDirectoryInTheWay() throws Exception { () -> completeUpload(file, uploadHandle, partHandles, null, size)); // abort should still work - abortUpload(file, uploadHandle); + abortUpload(uploadHandle, file); } @Test @@ -630,46 +754,44 @@ public void testConcurrentUploads() throws Throwable { // if the FS doesn't support concurrent uploads, this test is // required to fail during the second initialization. - final boolean concurrent = supportsConcurrentUploadsToSamePath(); + boolean concurrent = supportsConcurrentUploadsToSamePath(); describe("testing concurrent uploads, MPU support for this is " + concurrent); - final FileSystem fs = getFileSystem(); - final Path file = methodPath(); - final int size1 = SMALL_FILE; - final int partId1 = 1; - final byte[] payload1 = generatePayload(partId1, size1); - final MessageDigest digest1 = DigestUtils.getMd5Digest(); + Path file = methodPath(); + int size1 = SMALL_FILE; + int partId1 = 1; + byte[] payload1 = generatePayload(partId1, size1); + MessageDigest digest1 = DigestUtils.getMd5Digest(); digest1.update(payload1); - final UploadHandle upload1 = initializeUpload(file); - final Map partHandles1 = new HashMap<>(); + UploadHandle upload1 = startUpload(file); + Map partHandles1 = new HashMap<>(); // initiate part 2 // by using a different size, it's straightforward to see which // version is visible, before reading/digesting the contents - final int size2 = size1 * 2; - final int partId2 = 2; - final byte[] payload2 = generatePayload(partId1, size2); - final MessageDigest digest2 = DigestUtils.getMd5Digest(); + int size2 = size1 * 2; + int partId2 = 2; + byte[] payload2 = generatePayload(partId1, size2); + MessageDigest digest2 = DigestUtils.getMd5Digest(); digest2.update(payload2); - final UploadHandle upload2; + UploadHandle upload2; try { - upload2 = initializeUpload(file); + upload2 = startUpload(file); Assume.assumeTrue( "The Filesystem is unexpectedly supporting concurrent uploads", concurrent); } catch (IOException e) { if (!concurrent) { // this is expected, so end the test - LOG.debug("Expected exception raised on concurrent uploads {}", e); + LOG.debug("Expected exception raised on concurrent uploads", e); return; } else { throw e; } } - final Map partHandles2 = new HashMap<>(); - + Map partHandles2 = new HashMap<>(); assertNotEquals("Upload handles match", upload1, upload2); @@ -686,13 +808,21 @@ public void testConcurrentUploads() throws Throwable { // now upload part 2. complete(file, upload2, partHandles2); // and await the visible length to match - eventually(timeToBecomeConsistentMillis(), 500, - () -> { - FileStatus status = fs.getFileStatus(file); - assertEquals("File length in " + status, - size2, status.getLen()); - }); + eventually(timeToBecomeConsistentMillis(), + () -> verifyFileLength(file, size2), + new LambdaTestUtils.ProportionalRetryInterval( + CONSISTENCY_INTERVAL, + timeToBecomeConsistentMillis())); verifyContents(file, digest2, size2); } + + @Test + public void testPathCapabilities() throws Throwable { + FileSystem fs = getFileSystem(); + Assertions.assertThat(fs.hasPathCapability(getContract().getTestPath(), + CommonPathCapabilities.FS_MULTIPART_UPLOADER)) + .describedAs("fs %s, lacks multipart upload capability", fs) + .isTrue(); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java deleted file mode 100644 index f675ddfa0db82..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.contract.localfs; - -import org.junit.Assume; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -/** - * Test the FileSystemMultipartUploader on local file system. - */ -public class TestLocalFSContractMultipartUploader - extends AbstractContractMultipartUploaderTest { - - @Override - public void setup() throws Exception { - Assume.assumeTrue("Skipping until HDFS-13934", false); - super.setup(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new LocalFSContract(conf); - } - - /** - * There is no real need to upload any particular size. - * @return 1 kilobyte - */ - @Override - protected int partSizeInBytes() { - return 1024; - } - - @Override - protected boolean finalizeConsumesUploadIdImmediately() { - return true; - } - - @Override - protected boolean supportsConcurrentUploadsToSamePath() { - return true; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 55e228d34ebb8..450862b777078 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -52,6 +52,7 @@ import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider; import org.apache.hadoop.fs.InvalidPathHandleException; import org.apache.hadoop.fs.PartialListing; +import org.apache.hadoop.fs.MultipartUploaderBuilder; import org.apache.hadoop.fs.PathHandle; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Options; @@ -66,6 +67,7 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.impl.FileSystemMultipartUploaderBuilder; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; @@ -3615,4 +3617,10 @@ public boolean hasPathCapability(final Path path, final String capability) return super.hasPathCapability(p, capability); } + + @Override + public MultipartUploaderBuilder createMultipartUploader(final Path basePath) + throws IOException { + return new FileSystemMultipartUploaderBuilder(this, basePath); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java index 6cad69a46c4e8..30e7e00653bcc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java @@ -47,6 +47,7 @@ public static Optional hasPathCapability(final Path path, case CommonPathCapabilities.FS_CHECKSUMS: case CommonPathCapabilities.FS_CONCAT: case CommonPathCapabilities.FS_LIST_CORRUPT_FILE_BLOCKS: + case CommonPathCapabilities.FS_MULTIPART_UPLOADER: case CommonPathCapabilities.FS_PATHHANDLES: case CommonPathCapabilities.FS_PERMISSIONS: case CommonPathCapabilities.FS_SNAPSHOTS: diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index df3f7eaf7f055..25e7f7373226b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -76,10 +76,12 @@ import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.GlobalStorageStatistics; import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider; +import org.apache.hadoop.fs.MultipartUploaderBuilder; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.PathCapabilities; import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.impl.FileSystemMultipartUploaderBuilder; import org.apache.hadoop.fs.permission.FsCreateModes; import org.apache.hadoop.hdfs.DFSOpsCountStatistics; import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType; @@ -2125,6 +2127,12 @@ public boolean hasPathCapability(final Path path, final String capability) return super.hasPathCapability(p, capability); } + @Override + public MultipartUploaderBuilder createMultipartUploader(final Path basePath) + throws IOException { + return new FileSystemMultipartUploaderBuilder(this, basePath); + } + /** * This class is used for opening, reading, and seeking files while using the * WebHdfsFileSystem. This class will invoke the retry policy when performing diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory deleted file mode 100644 index b153fd9924381..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.hdfs.DFSMultipartUploaderFactory diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index fa0251aa73aa2..e5b08f1bb903e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -108,8 +108,11 @@ import org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport; import org.apache.hadoop.fs.s3a.impl.OperationCallbacks; import org.apache.hadoop.fs.s3a.impl.RenameOperation; +import org.apache.hadoop.fs.s3a.impl.S3AMultipartUploaderBuilder; import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.fs.s3a.impl.StoreContextBuilder; +import org.apache.hadoop.fs.s3a.impl.statistics.S3AMultipartUploaderStatisticsImpl; import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; import org.apache.hadoop.fs.s3a.select.InternalSelectConstants; import org.apache.hadoop.io.IOUtils; @@ -4493,6 +4496,9 @@ public boolean hasPathCapability(final Path path, final String capability) return getConf().getBoolean(ETAG_CHECKSUM_ENABLED, ETAG_CHECKSUM_ENABLED_DEFAULT); + case CommonPathCapabilities.FS_MULTIPART_UPLOADER: + return true; + default: return super.hasPathCapability(p, capability); } @@ -4722,6 +4728,18 @@ public CompletableFuture openFileWithOptions( return result; } + @Override + public S3AMultipartUploaderBuilder createMultipartUploader( + final Path basePath) + throws IOException { + StoreContext ctx = createStoreContext(); + return new S3AMultipartUploaderBuilder(this, + getWriteOperationHelper(), + ctx, + basePath, + new S3AMultipartUploaderStatisticsImpl(ctx::incrementStatistic)); + } + /** * Build an immutable store context. * If called while the FS is being initialized, @@ -4731,24 +4749,24 @@ public CompletableFuture openFileWithOptions( */ @InterfaceAudience.Private public StoreContext createStoreContext() { - return new StoreContext( - getUri(), - getBucket(), - getConf(), - getUsername(), - owner, - boundedThreadPool, - executorCapacity, - invoker, - getInstrumentation(), - getStorageStatistics(), - getInputPolicy(), - changeDetectionPolicy, - enableMultiObjectsDelete, - metadataStore, - useListV1, - new ContextAccessorsImpl(), - getTtlTimeProvider()); + return new StoreContextBuilder().setFsURI(getUri()) + .setBucket(getBucket()) + .setConfiguration(getConf()) + .setUsername(getUsername()) + .setOwner(owner) + .setExecutor(boundedThreadPool) + .setExecutorCapacity(executorCapacity) + .setInvoker(invoker) + .setInstrumentation(getInstrumentation()) + .setStorageStatistics(getStorageStatistics()) + .setInputPolicy(getInputPolicy()) + .setChangeDetectionPolicy(changeDetectionPolicy) + .setMultiObjectDeleteEnabled(enableMultiObjectsDelete) + .setMetadataStore(metadataStore) + .setUseListV1(useListV1) + .setContextAccessors(new ContextAccessorsImpl()) + .setTimeProvider(getTtlTimeProvider()) + .build(); } /** @@ -4776,5 +4794,10 @@ public File createTempFile(final String prefix, final long size) public String getBucketLocation() throws IOException { return S3AFileSystem.this.getBucketLocation(); } + + @Override + public Path makeQualified(final Path path) { + return S3AFileSystem.this.makeQualified(path); + } } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java index b9918b5098946..cb0a43430dbd9 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java @@ -193,7 +193,14 @@ public class S3AInstrumentation implements Closeable, MetricsSource { S3GUARD_METADATASTORE_AUTHORITATIVE_DIRECTORIES_UPDATED, STORE_IO_THROTTLED, DELEGATION_TOKENS_ISSUED, - FILES_DELETE_REJECTED + FILES_DELETE_REJECTED, + MULTIPART_INSTANTIATED, + MULTIPART_PART_PUT, + MULTIPART_PART_PUT_BYTES, + MULTIPART_UPLOAD_ABORTED, + MULTIPART_UPLOAD_ABORT_UNDER_PATH_INVOKED, + MULTIPART_UPLOAD_COMPLETED, + MULTIPART_UPLOAD_STARTED }; private static final Statistic[] GAUGES_TO_CREATE = { diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java deleted file mode 100644 index cf58751ea446c..0000000000000 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.s3a; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; - -import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; -import com.amazonaws.services.s3.model.PartETag; -import com.amazonaws.services.s3.model.UploadPartRequest; -import com.amazonaws.services.s3.model.UploadPartResult; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Charsets; -import com.google.common.base.Preconditions; - -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.BBPartHandle; -import org.apache.hadoop.fs.BBUploadHandle; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.MultipartUploader; -import org.apache.hadoop.fs.MultipartUploaderFactory; -import org.apache.hadoop.fs.PartHandle; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathHandle; -import org.apache.hadoop.fs.UploadHandle; - -import static org.apache.hadoop.fs.s3a.Constants.FS_S3A; - -/** - * MultipartUploader for S3AFileSystem. This uses the S3 multipart - * upload mechanism. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public class S3AMultipartUploader extends MultipartUploader { - - private final S3AFileSystem s3a; - - /** Header for Parts: {@value}. */ - - public static final String HEADER = "S3A-part01"; - - public S3AMultipartUploader(FileSystem fs, Configuration conf) { - Preconditions.checkArgument(fs instanceof S3AFileSystem, - "Wrong filesystem: expected S3A but got %s", fs); - s3a = (S3AFileSystem) fs; - } - - @Override - public UploadHandle initialize(Path filePath) throws IOException { - final WriteOperationHelper writeHelper = s3a.getWriteOperationHelper(); - String key = s3a.pathToKey(filePath); - String uploadId = writeHelper.initiateMultiPartUpload(key); - return BBUploadHandle.from(ByteBuffer.wrap( - uploadId.getBytes(Charsets.UTF_8))); - } - - @Override - public PartHandle putPart(Path filePath, InputStream inputStream, - int partNumber, UploadHandle uploadId, long lengthInBytes) - throws IOException { - checkPutArguments(filePath, inputStream, partNumber, uploadId, - lengthInBytes); - byte[] uploadIdBytes = uploadId.toByteArray(); - checkUploadId(uploadIdBytes); - String key = s3a.pathToKey(filePath); - final WriteOperationHelper writeHelper = s3a.getWriteOperationHelper(); - String uploadIdString = new String(uploadIdBytes, 0, uploadIdBytes.length, - Charsets.UTF_8); - UploadPartRequest request = writeHelper.newUploadPartRequest(key, - uploadIdString, partNumber, (int) lengthInBytes, inputStream, null, 0L); - UploadPartResult result = writeHelper.uploadPart(request); - String eTag = result.getETag(); - return BBPartHandle.from( - ByteBuffer.wrap( - buildPartHandlePayload(eTag, lengthInBytes))); - } - - @Override - public PathHandle complete(Path filePath, - Map handleMap, - UploadHandle uploadId) - throws IOException { - byte[] uploadIdBytes = uploadId.toByteArray(); - checkUploadId(uploadIdBytes); - - checkPartHandles(handleMap); - List> handles = - new ArrayList<>(handleMap.entrySet()); - handles.sort(Comparator.comparingInt(Map.Entry::getKey)); - final WriteOperationHelper writeHelper = s3a.getWriteOperationHelper(); - String key = s3a.pathToKey(filePath); - - String uploadIdStr = new String(uploadIdBytes, 0, uploadIdBytes.length, - Charsets.UTF_8); - ArrayList eTags = new ArrayList<>(); - eTags.ensureCapacity(handles.size()); - long totalLength = 0; - for (Map.Entry handle : handles) { - byte[] payload = handle.getValue().toByteArray(); - Pair result = parsePartHandlePayload(payload); - totalLength += result.getLeft(); - eTags.add(new PartETag(handle.getKey(), result.getRight())); - } - AtomicInteger errorCount = new AtomicInteger(0); - CompleteMultipartUploadResult result = writeHelper.completeMPUwithRetries( - key, uploadIdStr, eTags, totalLength, errorCount); - - byte[] eTag = result.getETag().getBytes(Charsets.UTF_8); - return (PathHandle) () -> ByteBuffer.wrap(eTag); - } - - @Override - public void abort(Path filePath, UploadHandle uploadId) throws IOException { - final byte[] uploadIdBytes = uploadId.toByteArray(); - checkUploadId(uploadIdBytes); - final WriteOperationHelper writeHelper = s3a.getWriteOperationHelper(); - String key = s3a.pathToKey(filePath); - String uploadIdString = new String(uploadIdBytes, 0, uploadIdBytes.length, - Charsets.UTF_8); - writeHelper.abortMultipartCommit(key, uploadIdString); - } - - /** - * Factory for creating MultipartUploader objects for s3a:// FileSystems. - */ - public static class Factory extends MultipartUploaderFactory { - @Override - protected MultipartUploader createMultipartUploader(FileSystem fs, - Configuration conf) { - if (FS_S3A.equals(fs.getScheme())) { - return new S3AMultipartUploader(fs, conf); - } - return null; - } - } - - /** - * Build the payload for marshalling. - * @param eTag upload etag - * @param len length - * @return a byte array to marshall. - * @throws IOException error writing the payload - */ - @VisibleForTesting - static byte[] buildPartHandlePayload(String eTag, long len) - throws IOException { - Preconditions.checkArgument(StringUtils.isNotEmpty(eTag), - "Empty etag"); - Preconditions.checkArgument(len >= 0, - "Invalid length"); - - ByteArrayOutputStream bytes = new ByteArrayOutputStream(); - try(DataOutputStream output = new DataOutputStream(bytes)) { - output.writeUTF(HEADER); - output.writeLong(len); - output.writeUTF(eTag); - } - return bytes.toByteArray(); - } - - /** - * Parse the payload marshalled as a part handle. - * @param data handle data - * @return the length and etag - * @throws IOException error reading the payload - */ - @VisibleForTesting - static Pair parsePartHandlePayload(byte[] data) - throws IOException { - - try(DataInputStream input = - new DataInputStream(new ByteArrayInputStream(data))) { - final String header = input.readUTF(); - if (!HEADER.equals(header)) { - throw new IOException("Wrong header string: \"" + header + "\""); - } - final long len = input.readLong(); - final String etag = input.readUTF(); - if (len < 0) { - throw new IOException("Negative length"); - } - return Pair.of(len, etag); - } - } - -} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java index 1d3d4758028c6..8153169054ba9 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java @@ -234,7 +234,29 @@ public enum Statistic { "Rate of S3 request throttling"), DELEGATION_TOKENS_ISSUED("delegation_tokens_issued", - "Number of delegation tokens issued"); + "Number of delegation tokens issued"), + + MULTIPART_INSTANTIATED( + "multipart_instantiated", + "Multipart Uploader Instantiated"), + MULTIPART_PART_PUT( + "multipart_part_put", + "Multipart Part Put Operation"), + MULTIPART_PART_PUT_BYTES( + "multipart_part_put_bytes", + "Multipart Part Put Bytes"), + MULTIPART_UPLOAD_ABORTED( + "multipart_upload_aborted", + "Multipart Upload Aborted"), + MULTIPART_UPLOAD_ABORT_UNDER_PATH_INVOKED( + "multipart_upload_abort_under_path_invoked", + "Multipart Upload Abort Udner Path Invoked"), + MULTIPART_UPLOAD_COMPLETED( + "multipart_upload_completed", + "Multipart Upload Completed"), + MULTIPART_UPLOAD_STARTED( + "multipart_upload_started", + "Multipart Upload Started"); private static final Map SYMBOL_MAP = new HashMap<>(Statistic.values().length); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java index ab53486d648b2..26d0942db61d4 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java @@ -87,7 +87,7 @@ */ @InterfaceAudience.Private @InterfaceStability.Unstable -public class WriteOperationHelper { +public class WriteOperationHelper implements WriteOperations { private static final Logger LOG = LoggerFactory.getLogger(WriteOperationHelper.class); @@ -254,11 +254,11 @@ private CompleteMultipartUploadResult finalizeMultipartUpload( Retried retrying, @Nullable BulkOperationState operationState) throws IOException { if (partETags.isEmpty()) { - throw new IOException( - "No upload parts in multipart upload to " + destKey); + throw new PathIOException(destKey, + "No upload parts in multipart upload"); } CompleteMultipartUploadResult uploadResult = - invoker.retry("Completing multipart commit", destKey, + invoker.retry("Completing multipart upload", destKey, true, retrying, () -> { @@ -560,8 +560,20 @@ public CompleteMultipartUploadResult commitUpload( */ public BulkOperationState initiateCommitOperation( Path path) throws IOException { + return initiateOperation(path, BulkOperationState.OperationType.Commit); + } + + /** + * Initiate a commit operation through any metastore. + * @param path path under which the writes will all take place. + * @param operationType operation to initiate + * @return an possibly null operation state from the metastore. + * @throws IOException failure to instantiate. + */ + public BulkOperationState initiateOperation(final Path path, + final BulkOperationState.OperationType operationType) throws IOException { return S3Guard.initiateBulkWrite(owner.getMetadataStore(), - BulkOperationState.OperationType.Commit, path); + operationType, path); } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java new file mode 100644 index 0000000000000..95cbd7e70b73b --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java @@ -0,0 +1,335 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a; + +import javax.annotation.Nullable; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; +import com.amazonaws.services.s3.model.MultipartUpload; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.PutObjectResult; +import com.amazonaws.services.s3.model.SelectObjectContentRequest; +import com.amazonaws.services.s3.model.SelectObjectContentResult; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import com.amazonaws.services.s3.transfer.model.UploadResult; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; + +/** + * Operations to update the store. + * This is effectively a private internal API for classes used as part + * of the S3A implementation. + * New extension points SHOULD use this interface -provided there + * is no plan to backport to previous versions. In those situations, + * use `WriteOperationHelper` directly. + * @since Hadoop 3.3.0 + */ +public interface WriteOperations { + + /** + * Execute a function with retry processing. + * @param action action to execute (used in error messages) + * @param path path of work (used in error messages) + * @param idempotent does the operation have semantics + * which mean that it can be retried even if was already executed? + * @param operation operation to execute + * @param type of return value + * @return the result of the call + * @throws IOException any IOE raised, or translated exception + */ + T retry(String action, + String path, + boolean idempotent, + Invoker.Operation operation) + throws IOException; + + /** + * Create a {@link PutObjectRequest} request against the specific key. + * @param destKey destination key + * @param inputStream source data. + * @param length size, if known. Use -1 for not known + * @return the request + */ + PutObjectRequest createPutObjectRequest(String destKey, + InputStream inputStream, long length); + + /** + * Create a {@link PutObjectRequest} request to upload a file. + * @param dest key to PUT to. + * @param sourceFile source file + * @return the request + */ + PutObjectRequest createPutObjectRequest(String dest, + File sourceFile); + + /** + * Callback on a successful write. + * @param length length of the write + */ + void writeSuccessful(long length); + + /** + * Callback on a write failure. + * @param ex Any exception raised which triggered the failure. + */ + void writeFailed(Exception ex); + + /** + * Create a new object metadata instance. + * Any standard metadata headers are added here, for example: + * encryption. + * @param length size, if known. Use -1 for not known + * @return a new metadata instance + */ + ObjectMetadata newObjectMetadata(long length); + + /** + * Start the multipart upload process. + * Retry policy: retrying, translated. + * @param destKey destination of upload + * @return the upload result containing the ID + * @throws IOException IO problem + */ + @Retries.RetryTranslated + String initiateMultiPartUpload(String destKey) throws IOException; + + /** + * This completes a multipart upload to the destination key via + * {@code finalizeMultipartUpload()}. + * Retry policy: retrying, translated. + * Retries increment the {@code errorCount} counter. + * @param destKey destination + * @param uploadId multipart operation Id + * @param partETags list of partial uploads + * @param length length of the upload + * @param errorCount a counter incremented by 1 on every error; for + * use in statistics + * @return the result of the operation. + * @throws IOException if problems arose which could not be retried, or + * the retry count was exceeded + */ + @Retries.RetryTranslated + CompleteMultipartUploadResult completeMPUwithRetries( + String destKey, + String uploadId, + List partETags, + long length, + AtomicInteger errorCount) + throws IOException; + + /** + * Abort a multipart upload operation. + * @param destKey destination key of the upload + * @param uploadId multipart operation Id + * @param retrying callback invoked on every retry + * @throws IOException failure to abort + * @throws FileNotFoundException if the abort ID is unknown + */ + @Retries.RetryTranslated + void abortMultipartUpload(String destKey, String uploadId, + Invoker.Retried retrying) + throws IOException; + + /** + * Abort a multipart commit operation. + * @param upload upload to abort. + * @throws IOException on problems. + */ + @Retries.RetryTranslated + void abortMultipartUpload(MultipartUpload upload) + throws IOException; + + /** + * Abort multipart uploads under a path: limited to the first + * few hundred. + * @param prefix prefix for uploads to abort + * @return a count of aborts + * @throws IOException trouble; FileNotFoundExceptions are swallowed. + */ + @Retries.RetryTranslated + int abortMultipartUploadsUnderPath(String prefix) + throws IOException; + + /** + * Abort a multipart commit operation. + * @param destKey destination key of ongoing operation + * @param uploadId multipart operation Id + * @throws IOException on problems. + * @throws FileNotFoundException if the abort ID is unknown + */ + @Retries.RetryTranslated + void abortMultipartCommit(String destKey, String uploadId) + throws IOException; + + /** + * Create and initialize a part request of a multipart upload. + * Exactly one of: {@code uploadStream} or {@code sourceFile} + * must be specified. + * A subset of the file may be posted, by providing the starting point + * in {@code offset} and a length of block in {@code size} equal to + * or less than the remaining bytes. + * @param destKey destination key of ongoing operation + * @param uploadId ID of ongoing upload + * @param partNumber current part number of the upload + * @param size amount of data + * @param uploadStream source of data to upload + * @param sourceFile optional source file. + * @param offset offset in file to start reading. + * @return the request. + * @throws IllegalArgumentException if the parameters are invalid -including + * @throws PathIOException if the part number is out of range. + */ + UploadPartRequest newUploadPartRequest( + String destKey, + String uploadId, + int partNumber, + int size, + InputStream uploadStream, + File sourceFile, + Long offset) throws PathIOException; + + /** + * PUT an object directly (i.e. not via the transfer manager). + * Byte length is calculated from the file length, or, if there is no + * file, from the content length of the header. + * @param putObjectRequest the request + * @return the upload initiated + * @throws IOException on problems + */ + @Retries.RetryTranslated + PutObjectResult putObject(PutObjectRequest putObjectRequest) + throws IOException; + + /** + * PUT an object via the transfer manager. + * @param putObjectRequest the request + * @return the result of the operation + * @throws IOException on problems + */ + @Retries.RetryTranslated + UploadResult uploadObject(PutObjectRequest putObjectRequest) + throws IOException; + + /** + * Revert a commit by deleting the file. + * Relies on retry code in filesystem + * @throws IOException on problems + * @param destKey destination key + * @param operationState operational state for a bulk update + */ + @Retries.OnceTranslated + void revertCommit(String destKey, + @Nullable BulkOperationState operationState) throws IOException; + + /** + * This completes a multipart upload to the destination key via + * {@code finalizeMultipartUpload()}. + * Retry policy: retrying, translated. + * Retries increment the {@code errorCount} counter. + * @param destKey destination + * @param uploadId multipart operation Id + * @param partETags list of partial uploads + * @param length length of the upload + * @param operationState operational state for a bulk update + * @return the result of the operation. + * @throws IOException if problems arose which could not be retried, or + * the retry count was exceeded + */ + @Retries.RetryTranslated + CompleteMultipartUploadResult commitUpload( + String destKey, + String uploadId, + List partETags, + long length, + @Nullable BulkOperationState operationState) + throws IOException; + + /** + * Initiate a commit operation through any metastore. + * @param path path under which the writes will all take place. + * @return an possibly null operation state from the metastore. + * @throws IOException failure to instantiate. + */ + BulkOperationState initiateCommitOperation( + Path path) throws IOException; + + /** + * Initiate a commit operation through any metastore. + * @param path path under which the writes will all take place. + * @param operationType operation to initiate + * @return an possibly null operation state from the metastore. + * @throws IOException failure to instantiate. + */ + BulkOperationState initiateOperation(Path path, + BulkOperationState.OperationType operationType) throws IOException; + + /** + * Upload part of a multi-partition file. + * @param request request + * @return the result of the operation. + * @throws IOException on problems + */ + @Retries.RetryTranslated + UploadPartResult uploadPart(UploadPartRequest request) + throws IOException; + + /** + * Get the configuration of this instance; essentially the owning + * filesystem configuration. + * @return the configuration. + */ + Configuration getConf(); + + /** + * Create a S3 Select request for the destination path. + * This does not build the query. + * @param path pre-qualified path for query + * @return the request + */ + SelectObjectContentRequest newSelectRequest(Path path); + + /** + * Execute an S3 Select operation. + * On a failure, the request is only logged at debug to avoid the + * select exception being printed. + * @param source source for selection + * @param request Select request to issue. + * @param action the action for use in exception creation + * @return response + * @throws IOException failure + */ + @Retries.RetryTranslated + SelectObjectContentResult select( + Path source, + SelectObjectContentRequest request, + String action) + throws IOException; +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java index b10cc6d857ad9..d39c649df2e22 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java @@ -73,4 +73,12 @@ public interface ContextAccessors { */ @Retries.RetryTranslated String getBucketLocation() throws IOException; + + /** + * Qualify a path. + * + * @param path path to qualify/normalize + * @return possibly new path. + */ + Path makeQualified(Path path); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploader.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploader.java new file mode 100644 index 0000000000000..9f131dd3989d5 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploader.java @@ -0,0 +1,420 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CompletableFuture; + +import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.BBPartHandle; +import org.apache.hadoop.fs.BBUploadHandle; +import org.apache.hadoop.fs.PartHandle; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathHandle; +import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.fs.UploadHandle; +import org.apache.hadoop.fs.impl.AbstractMultipartUploader; +import org.apache.hadoop.fs.s3a.WriteOperations; +import org.apache.hadoop.fs.s3a.impl.statistics.S3AMultipartUploaderStatistics; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; + +/** + * MultipartUploader for S3AFileSystem. This uses the S3 multipart + * upload mechanism. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +class S3AMultipartUploader extends AbstractMultipartUploader { + + private final S3AMultipartUploaderBuilder builder; + + /** Header for serialized Parts: {@value}. */ + + public static final String HEADER = "S3A-part01"; + + private final WriteOperations writeOperations; + + private final StoreContext context; + + private final S3AMultipartUploaderStatistics statistics; + + /** + * Bulk state; demand created and then retained. + */ + private BulkOperationState operationState; + + /** + * Was an operation state requested but not returned? + */ + private boolean noOperationState; + + /** + * Instatiate; this is called by the builder. + * @param builder builder + * @param writeOperations writeOperations + * @param context s3a context + * @param statistics statistics callbacks + */ + S3AMultipartUploader( + final S3AMultipartUploaderBuilder builder, + final WriteOperations writeOperations, + final StoreContext context, + final S3AMultipartUploaderStatistics statistics) { + super(context.makeQualified(builder.getPath())); + this.builder = builder; + this.writeOperations = writeOperations; + this.context = context; + this.statistics = statistics; + } + + @Override + public void close() throws IOException { + if (operationState != null) { + operationState.close(); + } + super.close(); + } + + /** + * Retrieve the operation state; create one on demand if needed + * and there has been no unsuccessful attempt to create one. + * @return an active operation state. + * @throws IOException failure + */ + private synchronized BulkOperationState retrieveOperationState() + throws IOException { + if (operationState == null && !noOperationState) { + operationState = writeOperations.initiateOperation(getBasePath(), + BulkOperationState.OperationType.Upload); + noOperationState = operationState != null; + } + return operationState; + } + + @Override + public CompletableFuture startUpload( + final Path filePath) + throws IOException { + Path dest = context.makeQualified(filePath); + checkPath(dest); + String key = context.pathToKey(dest); + return context.submit(new CompletableFuture<>(), + () -> { + String uploadId = writeOperations.initiateMultiPartUpload(key); + statistics.uploadStarted(); + return BBUploadHandle.from(ByteBuffer.wrap( + uploadId.getBytes(Charsets.UTF_8))); + }); + } + + @Override + public CompletableFuture putPart( + final UploadHandle uploadId, + final int partNumber, + final Path filePath, + final InputStream inputStream, + final long lengthInBytes) + throws IOException { + Path dest = context.makeQualified(filePath); + checkPutArguments(dest, inputStream, partNumber, uploadId, + lengthInBytes); + byte[] uploadIdBytes = uploadId.toByteArray(); + checkUploadId(uploadIdBytes); + String key = context.pathToKey(dest); + String uploadIdString = new String(uploadIdBytes, 0, uploadIdBytes.length, + Charsets.UTF_8); + return context.submit(new CompletableFuture<>(), + () -> { + UploadPartRequest request = writeOperations.newUploadPartRequest(key, + uploadIdString, partNumber, (int) lengthInBytes, inputStream, + null, 0L); + UploadPartResult result = writeOperations.uploadPart(request); + statistics.partPut(lengthInBytes); + String eTag = result.getETag(); + return BBPartHandle.from( + ByteBuffer.wrap( + buildPartHandlePayload( + filePath.toUri().toString(), + uploadIdString, + result.getPartNumber(), + eTag, + lengthInBytes))); + }); + } + + @Override + public CompletableFuture complete( + final UploadHandle uploadHandle, + final Path filePath, + final Map handleMap) + throws IOException { + Path dest = context.makeQualified(filePath); + checkPath(dest); + byte[] uploadIdBytes = uploadHandle.toByteArray(); + checkUploadId(uploadIdBytes); + checkPartHandles(handleMap); + List> handles = + new ArrayList<>(handleMap.entrySet()); + handles.sort(Comparator.comparingInt(Map.Entry::getKey)); + int count = handles.size(); + String key = context.pathToKey(dest); + + String uploadIdStr = new String(uploadIdBytes, 0, uploadIdBytes.length, + Charsets.UTF_8); + ArrayList eTags = new ArrayList<>(); + eTags.ensureCapacity(handles.size()); + long totalLength = 0; + // built up to identify duplicates -if the size of this set is + // below that of the number of parts, then there's a duplicate entry. + Set ids = new HashSet<>(count); + + for (Map.Entry handle : handles) { + PartHandlePayload payload = parsePartHandlePayload( + handle.getValue().toByteArray()); + payload.validate(uploadIdStr, filePath); + ids.add(payload.getPartNumber()); + totalLength += payload.getLen(); + eTags.add(new PartETag(handle.getKey(), payload.getEtag())); + } + Preconditions.checkArgument(ids.size() == count, + "Duplicate PartHandles"); + + // retrieve/create operation state for scalability of completion. + final BulkOperationState state = retrieveOperationState(); + long finalLen = totalLength; + return context.submit(new CompletableFuture<>(), + () -> { + CompleteMultipartUploadResult result = + writeOperations.commitUpload( + key, + uploadIdStr, + eTags, + finalLen, + state); + + byte[] eTag = result.getETag().getBytes(Charsets.UTF_8); + statistics.uploadCompleted(); + return (PathHandle) () -> ByteBuffer.wrap(eTag); + }); + } + + @Override + public CompletableFuture abort( + final UploadHandle uploadId, + final Path filePath) + throws IOException { + Path dest = context.makeQualified(filePath); + checkPath(dest); + final byte[] uploadIdBytes = uploadId.toByteArray(); + checkUploadId(uploadIdBytes); + String uploadIdString = new String(uploadIdBytes, 0, uploadIdBytes.length, + Charsets.UTF_8); + return context.submit(new CompletableFuture<>(), + () -> { + writeOperations.abortMultipartCommit( + context.pathToKey(dest), + uploadIdString); + statistics.uploadAborted(); + return null; + }); + } + + /** + * Upload all MPUs under the path. + * @param path path to abort uploads under. + * @return a future which eventually returns the number of entries found + * @throws IOException submission failure + */ + @Override + public CompletableFuture abortUploadsUnderPath(final Path path) + throws IOException { + statistics.abortUploadsUnderPathInvoked(); + return context.submit(new CompletableFuture<>(), + () -> + writeOperations.abortMultipartUploadsUnderPath( + context.pathToKey(path))); + } + + /** + * Build the payload for marshalling. + * + * @param partNumber part number from response + * @param etag upload etag + * @param len length + * @return a byte array to marshall. + * @throws IOException error writing the payload + */ + @VisibleForTesting + static byte[] buildPartHandlePayload( + final String path, + final String uploadId, + final int partNumber, + final String etag, + final long len) + throws IOException { + + return new PartHandlePayload(path, uploadId, partNumber, len, etag) + .toBytes(); + } + + /** + * Parse the payload marshalled as a part handle. + * @param data handle data + * @return the length and etag + * @throws IOException error reading the payload + */ + @VisibleForTesting + static PartHandlePayload parsePartHandlePayload( + final byte[] data) + throws IOException { + + try (DataInputStream input = + new DataInputStream(new ByteArrayInputStream(data))) { + final String header = input.readUTF(); + if (!HEADER.equals(header)) { + throw new IOException("Wrong header string: \"" + header + "\""); + } + final String path = input.readUTF(); + final String uploadId = input.readUTF(); + final int partNumber = input.readInt(); + final long len = input.readLong(); + final String etag = input.readUTF(); + if (len < 0) { + throw new IOException("Negative length"); + } + return new PartHandlePayload(path, uploadId, partNumber, len, etag); + } + } + + /** + * Payload of a part handle; serializes + * the fields using DataInputStream and DataOutputStream. + */ + @VisibleForTesting + static final class PartHandlePayload { + + private final String path; + + private final String uploadId; + + private final int partNumber; + + private final long len; + + private final String etag; + + private PartHandlePayload( + final String path, + final String uploadId, + final int partNumber, + final long len, + final String etag) { + Preconditions.checkArgument(StringUtils.isNotEmpty(etag), + "Empty etag"); + Preconditions.checkArgument(StringUtils.isNotEmpty(path), + "Empty path"); + Preconditions.checkArgument(StringUtils.isNotEmpty(uploadId), + "Empty uploadId"); + Preconditions.checkArgument(len >= 0, + "Invalid length"); + + this.path = path; + this.uploadId = uploadId; + this.partNumber = partNumber; + this.len = len; + this.etag = etag; + } + + public String getPath() { + return path; + } + + public int getPartNumber() { + return partNumber; + } + + public long getLen() { + return len; + } + + public String getEtag() { + return etag; + } + + public String getUploadId() { + return uploadId; + } + + public byte[] toBytes() + throws IOException { + Preconditions.checkArgument(StringUtils.isNotEmpty(etag), + "Empty etag"); + Preconditions.checkArgument(len >= 0, + "Invalid length"); + + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + try (DataOutputStream output = new DataOutputStream(bytes)) { + output.writeUTF(HEADER); + output.writeUTF(path); + output.writeUTF(uploadId); + output.writeInt(partNumber); + output.writeLong(len); + output.writeUTF(etag); + } + return bytes.toByteArray(); + } + + public void validate(String uploadIdStr, Path filePath) + throws PathIOException { + String destUri = filePath.toUri().toString(); + if (!destUri.equals(path)) { + throw new PathIOException(destUri, + "Multipart part path mismatch: " + path); + } + if (!uploadIdStr.equals(uploadId)) { + throw new PathIOException(destUri, + "Multipart part ID mismatch: " + uploadId); + } + } + } + + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploaderBuilder.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploaderBuilder.java new file mode 100644 index 0000000000000..3bf1a7ddd919a --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploaderBuilder.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import javax.annotation.Nonnull; +import java.io.IOException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.impl.MultipartUploaderBuilderImpl; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.WriteOperations; +import org.apache.hadoop.fs.s3a.impl.statistics.S3AMultipartUploaderStatistics; + +/** + * Builder for S3A multipart uploaders. + */ +public class S3AMultipartUploaderBuilder extends + MultipartUploaderBuilderImpl { + + private final WriteOperations writeOperations; + + private final StoreContext context; + + private final S3AMultipartUploaderStatistics statistics; + + public S3AMultipartUploaderBuilder( + @Nonnull final S3AFileSystem fileSystem, + @Nonnull final WriteOperations writeOperations, + @Nonnull final StoreContext context, + @Nonnull final Path p, + @Nonnull final S3AMultipartUploaderStatistics statistics) { + super(fileSystem, p); + this.writeOperations = writeOperations; + this.context = context; + this.statistics = statistics; + } + + @Override + public S3AMultipartUploaderBuilder getThisBuilder() { + return this; + } + + @Override + public S3AMultipartUploader build() + throws IllegalArgumentException, IOException { + return new S3AMultipartUploader(this, writeOperations, context, statistics); + } + + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java index 88480db753515..e307c8db9bf17 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java @@ -21,6 +21,8 @@ import java.io.File; import java.io.IOException; import java.net.URI; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; import com.google.common.util.concurrent.ListeningExecutorService; @@ -37,6 +39,7 @@ import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.LambdaUtils; import org.apache.hadoop.util.SemaphoredDelegatingExecutor; /** @@ -49,9 +52,10 @@ * their own. * * Warning: this really is private and unstable. Do not use - * outside the org.apache.hadoop.fs.s3a package. + * outside the org.apache.hadoop.fs.s3a package, or in extension points + * such as DelegationTokens. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate("S3A Filesystem and extensions") @InterfaceStability.Unstable public class StoreContext { @@ -114,8 +118,7 @@ public class StoreContext { /** * Instantiate. - * No attempt to use a builder here as outside tests - * this should only be created in the S3AFileSystem. + * @deprecated as public method: use {@link StoreContextBuilder}. */ public StoreContext( final URI fsURI, @@ -226,6 +229,16 @@ public String pathToKey(Path path) { return contextAccessors.pathToKey(path); } + /** + * Qualify a path. + * + * @param path path to qualify/normalize + * @return possibly new path. + */ + public Path makeQualified(Path path) { + return contextAccessors.makeQualified(path); + } + /** * Get the storage statistics of this filesystem. * @return the storage statistics @@ -351,4 +364,20 @@ public String fullKey(final S3AFileStatus stat) { ? k + "/" : k; } + + /** + * Submit a closure for execution in the executor + * returned by {@link #getExecutor()}. + * @param type of future + * @param future future for the result. + * @param call callable to invoke. + * @return the future passed in + */ + public CompletableFuture submit( + final CompletableFuture future, + final Callable call) { + getExecutor().submit(() -> + LambdaUtils.eval(future, call)); + return future; + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java new file mode 100644 index 0000000000000..a5e0dadf379f5 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java @@ -0,0 +1,189 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.net.URI; + +import com.google.common.util.concurrent.ListeningExecutorService; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.s3a.Invoker; +import org.apache.hadoop.fs.s3a.S3AInputPolicy; +import org.apache.hadoop.fs.s3a.S3AInstrumentation; +import org.apache.hadoop.fs.s3a.S3AStorageStatistics; +import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; +import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * Builder for the store context. + */ +public class StoreContextBuilder { + + private URI fsURI; + + private String bucket; + + private Configuration configuration; + + private String username; + + private UserGroupInformation owner; + + private ListeningExecutorService executor; + + private int executorCapacity; + + private Invoker invoker; + + private S3AInstrumentation instrumentation; + + private S3AStorageStatistics storageStatistics; + + private S3AInputPolicy inputPolicy = S3AInputPolicy.Normal; + + private ChangeDetectionPolicy changeDetectionPolicy; + + private boolean multiObjectDeleteEnabled = true; + + private MetadataStore metadataStore; + + private boolean useListV1 = false; + + private ContextAccessors contextAccessors; + + private ITtlTimeProvider timeProvider; + + public StoreContextBuilder setFsURI(final URI fsURI) { + this.fsURI = fsURI; + return this; + } + + public StoreContextBuilder setBucket(final String b) { + this.bucket = b; + return this; + } + + public StoreContextBuilder setConfiguration(final Configuration conf) { + this.configuration = conf; + return this; + } + + public StoreContextBuilder setUsername(final String user) { + this.username = user; + return this; + } + + public StoreContextBuilder setOwner(final UserGroupInformation ugi) { + this.owner = ugi; + return this; + } + + public StoreContextBuilder setExecutor( + final ListeningExecutorService ex) { + this.executor = ex; + return this; + } + + public StoreContextBuilder setExecutorCapacity( + final int capacity) { + this.executorCapacity = capacity; + return this; + } + + public StoreContextBuilder setInvoker(final Invoker invoke) { + this.invoker = invoke; + return this; + } + + public StoreContextBuilder setInstrumentation( + final S3AInstrumentation instr) { + this.instrumentation = instr; + return this; + } + + public StoreContextBuilder setStorageStatistics( + final S3AStorageStatistics sstats) { + this.storageStatistics = sstats; + return this; + } + + public StoreContextBuilder setInputPolicy( + final S3AInputPolicy policy) { + this.inputPolicy = policy; + return this; + } + + public StoreContextBuilder setChangeDetectionPolicy( + final ChangeDetectionPolicy policy) { + this.changeDetectionPolicy = policy; + return this; + } + + public StoreContextBuilder setMultiObjectDeleteEnabled( + final boolean enabled) { + this.multiObjectDeleteEnabled = enabled; + return this; + } + + public StoreContextBuilder setMetadataStore( + final MetadataStore store) { + this.metadataStore = store; + return this; + } + + public StoreContextBuilder setUseListV1( + final boolean useV1) { + this.useListV1 = useV1; + return this; + } + + public StoreContextBuilder setContextAccessors( + final ContextAccessors accessors) { + this.contextAccessors = accessors; + return this; + } + + public StoreContextBuilder setTimeProvider( + final ITtlTimeProvider provider) { + this.timeProvider = provider; + return this; + } + + @SuppressWarnings("deprecation") + public StoreContext build() { + return new StoreContext(fsURI, + bucket, + configuration, + username, + owner, + executor, + executorCapacity, + invoker, + instrumentation, + storageStatistics, + inputPolicy, + changeDetectionPolicy, + multiObjectDeleteEnabled, + metadataStore, + useListV1, + contextAccessors, + timeProvider); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/statistics/S3AMultipartUploaderStatistics.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/statistics/S3AMultipartUploaderStatistics.java new file mode 100644 index 0000000000000..2cd74ff9cf5da --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/statistics/S3AMultipartUploaderStatistics.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl.statistics; + +import java.io.Closeable; + +/** + * Statistics for the S3A multipart uploader. + */ +public interface S3AMultipartUploaderStatistics extends Closeable { + + void instantiated(); + + void uploadStarted(); + + void partPut(long lengthInBytes); + + void uploadCompleted(); + + void uploadAborted(); + + void abortUploadsUnderPathInvoked(); +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/statistics/S3AMultipartUploaderStatisticsImpl.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/statistics/S3AMultipartUploaderStatisticsImpl.java new file mode 100644 index 0000000000000..70e4785f62e19 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/statistics/S3AMultipartUploaderStatisticsImpl.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl.statistics; + + +import java.io.IOException; +import java.util.function.BiConsumer; + +import org.apache.hadoop.fs.s3a.Statistic; + +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_ABORT_UNDER_PATH_INVOKED; +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_INSTANTIATED; +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_PART_PUT; +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_PART_PUT_BYTES; +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_ABORTED; +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_COMPLETED; +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_STARTED; + +/** + * Implementation of the uploader statistics. + * This takes a function to update some counter and will update + * this value when things change, so it can be bonded to arbitrary + * statistic collectors. + */ +public final class S3AMultipartUploaderStatisticsImpl implements + S3AMultipartUploaderStatistics { + + /** + * The operation to increment a counter/statistic by a value. + */ + private final BiConsumer incrementCallback; + + /** + * Constructor. + * @param incrementCallback The operation to increment a + * counter/statistic by a value. + */ + public S3AMultipartUploaderStatisticsImpl( + final BiConsumer incrementCallback) { + this.incrementCallback = incrementCallback; + } + + private void inc(Statistic op, long count) { + incrementCallback.accept(op, count); + } + + @Override + public void instantiated() { + inc(MULTIPART_INSTANTIATED, 1); + } + + @Override + public void uploadStarted() { + inc(MULTIPART_UPLOAD_STARTED, 1); + } + + @Override + public void partPut(final long lengthInBytes) { + inc(MULTIPART_PART_PUT, 1); + inc(MULTIPART_PART_PUT_BYTES, lengthInBytes); + } + + @Override + public void uploadCompleted() { + inc(MULTIPART_UPLOAD_COMPLETED, 1); + } + + @Override + public void uploadAborted() { + inc(MULTIPART_UPLOAD_ABORTED, 1); + } + + @Override + public void abortUploadsUnderPathInvoked() { + inc(MULTIPART_UPLOAD_ABORT_UNDER_PATH_INVOKED, 1); + } + + @Override + public void close() throws IOException { + + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/BulkOperationState.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/BulkOperationState.java index fcb3dce4d0b45..b4974b7356514 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/BulkOperationState.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/BulkOperationState.java @@ -102,5 +102,9 @@ public enum OperationType { * Mkdir operation. */ Mkdir, + /** + * Multipart upload operation. + */ + Upload } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java index 38b38fb7f93e7..b131320a1b7ab 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java @@ -912,17 +912,27 @@ private Collection completeAncestry( DDBPathMetadata oldEntry = ancestorState.put(path, entry); boolean addAncestors = true; if (oldEntry != null) { - if (!oldEntry.getFileStatus().isDirectory() - || !entry.getFileStatus().isDirectory()) { - // check for and warn if the existing bulk operation overwrote it. - // this should never occur outside tests explicitly creating it + // check for and warn if the existing bulk operation has an inconsistent + // entry. + // two directories or two files are both allowed. + // file-over-file can happen in multipart uploaders when the same + // uploader is overwriting file entries to the same destination as + // part of its bulk operation. + boolean oldWasDir = oldEntry.getFileStatus().isDirectory(); + boolean newIsDir = entry.getFileStatus().isDirectory(); + if ((oldWasDir && !newIsDir) + || (!oldWasDir && newIsDir)) { LOG.warn("Overwriting a S3Guard file created in the operation: {}", oldEntry); LOG.warn("With new entry: {}", entry); // restore the old state ancestorState.put(path, oldEntry); // then raise an exception - throw new PathIOException(path.toString(), E_INCONSISTENT_UPDATE); + throw new PathIOException(path.toString(), + String.format("%s old %s new %s", + E_INCONSISTENT_UPDATE, + oldEntry, + entry)); } else { // a directory is already present. Log and continue. LOG.debug("Directory at {} being updated with value {}", diff --git a/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader index d16846b25b52f..68a4c79250611 100644 --- a/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader +++ b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -org.apache.hadoop.fs.s3a.S3AMultipartUploader +org.apache.hadoop.fs.s3a.impl.S3AMultipartUploader diff --git a/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory deleted file mode 100644 index 2e4bc241d0c94..0000000000000 --- a/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -org.apache.hadoop.fs.s3a.S3AMultipartUploader$Factory diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java index 059312a8103a3..8222fff614598 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java @@ -15,25 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.fs.contract.s3a; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +package org.apache.hadoop.fs.contract.s3a; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest; -import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.s3a.S3AFileSystem; -import org.apache.hadoop.fs.s3a.WriteOperationHelper; -import static org.apache.hadoop.fs.s3a.S3ATestConstants.*; -import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; +import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; +import static org.apache.hadoop.fs.s3a.S3ATestConstants.DEFAULT_SCALE_TESTS_ENABLED; +import static org.apache.hadoop.fs.s3a.S3ATestConstants.KEY_HUGE_PARTITION_SIZE; +import static org.apache.hadoop.fs.s3a.S3ATestConstants.KEY_SCALE_TESTS_ENABLED; +import static org.apache.hadoop.fs.s3a.S3ATestConstants.SCALE_TEST_TIMEOUT_MILLIS; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestPropertyBool; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestPropertyBytes; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.maybeEnableS3Guard; import static org.apache.hadoop.fs.s3a.scale.AbstractSTestS3AHugeFiles.DEFAULT_HUGE_PARTITION_SIZE; /** * Test MultipartUploader with S3A. + *

* Although not an S3A Scale test subclass, it uses the -Dscale option * to enable it, and partition size option to control the size of * parts uploaded. @@ -41,14 +44,11 @@ public class ITestS3AContractMultipartUploader extends AbstractContractMultipartUploaderTest { - private static final Logger LOG = - LoggerFactory.getLogger(ITestS3AContractMultipartUploader.class); - private int partitionSize; /** * S3 requires a minimum part size of 5MB (except the last part). - * @return 5MB + * @return 5MB+ value */ @Override protected int partSizeInBytes() { @@ -126,37 +126,15 @@ public void setup() throws Exception { DEFAULT_HUGE_PARTITION_SIZE); } - /** - * Extend superclass teardown with actions to help clean up the S3 store, - * including aborting uploads under the test path. - */ - @Override - public void teardown() throws Exception { - Path teardown = path("teardown").getParent(); - S3AFileSystem fs = getFileSystem(); - if (fs != null) { - WriteOperationHelper helper = fs.getWriteOperationHelper(); - try { - LOG.info("Teardown: aborting outstanding uploads under {}", teardown); - int count = helper.abortMultipartUploadsUnderPath( - fs.pathToKey(teardown)); - LOG.info("Found {} incomplete uploads", count); - } catch (Exception e) { - LOG.warn("Exeception in teardown", e); - } - } - super.teardown(); - } - /** * S3 has no concept of directories, so this test does not apply. */ public void testDirectoryInTheWay() throws Exception { - // no-op + skip("unsupported"); } @Override public void testMultipartUploadReverseOrder() throws Exception { - ContractTestUtils.skip("skipped for speed"); + skip("skipped for speed"); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java index 244d2eed324c7..c9d872e591f41 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java @@ -203,29 +203,31 @@ private StoreContext createMockStoreContext(boolean multiDelete, OperationTrackingStore store) throws URISyntaxException, IOException { URI name = new URI("s3a://bucket"); Configuration conf = new Configuration(); - return new StoreContext( - name, - "bucket", - conf, - "alice", - UserGroupInformation.getCurrentUser(), - BlockingThreadPoolExecutorService.newInstance( + return new StoreContextBuilder().setFsURI(name) + .setBucket("bucket") + .setConfiguration(conf) + .setUsername("alice") + .setOwner(UserGroupInformation.getCurrentUser()) + .setExecutor(BlockingThreadPoolExecutorService.newInstance( 4, 4, 10, TimeUnit.SECONDS, - "s3a-transfer-shared"), - Constants.DEFAULT_EXECUTOR_CAPACITY, - new Invoker(RetryPolicies.TRY_ONCE_THEN_FAIL, Invoker.LOG_EVENT), - new S3AInstrumentation(name), - new S3AStorageStatistics(), - S3AInputPolicy.Normal, - ChangeDetectionPolicy.createPolicy(ChangeDetectionPolicy.Mode.None, - ChangeDetectionPolicy.Source.ETag, false), - multiDelete, - store, - false, - CONTEXT_ACCESSORS, - new S3Guard.TtlTimeProvider(conf)); + "s3a-transfer-shared")) + .setExecutorCapacity(Constants.DEFAULT_EXECUTOR_CAPACITY) + .setInvoker( + new Invoker(RetryPolicies.TRY_ONCE_THEN_FAIL, Invoker.LOG_EVENT)) + .setInstrumentation(new S3AInstrumentation(name)) + .setStorageStatistics(new S3AStorageStatistics()) + .setInputPolicy(S3AInputPolicy.Normal) + .setChangeDetectionPolicy( + ChangeDetectionPolicy.createPolicy(ChangeDetectionPolicy.Mode.None, + ChangeDetectionPolicy.Source.ETag, false)) + .setMultiObjectDeleteEnabled(multiDelete) + .setMetadataStore(store) + .setUseListV1(false) + .setContextAccessors(CONTEXT_ACCESSORS) + .setTimeProvider(new S3Guard.TtlTimeProvider(conf)) + .build(); } private static class MinimalContextAccessor implements ContextAccessors { @@ -251,6 +253,10 @@ public String getBucketLocation() throws IOException { return null; } + @Override + public Path makeQualified(final Path path) { + return path; + } } /** * MetadataStore which tracks what is deleted and added. diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AMultipartUploaderSupport.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestS3AMultipartUploaderSupport.java similarity index 56% rename from hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AMultipartUploaderSupport.java rename to hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestS3AMultipartUploaderSupport.java index 4825d26eeb068..71305aa6633e0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AMultipartUploaderSupport.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestS3AMultipartUploaderSupport.java @@ -16,51 +16,60 @@ * limitations under the License. */ -package org.apache.hadoop.fs.s3a; +package org.apache.hadoop.fs.s3a.impl; import java.io.EOFException; import java.io.IOException; import org.junit.Test; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.test.HadoopTestBase; -import static org.apache.hadoop.fs.s3a.S3AMultipartUploader.*; -import static org.apache.hadoop.fs.s3a.S3AMultipartUploader.parsePartHandlePayload; +import static org.apache.hadoop.fs.s3a.impl.S3AMultipartUploader.PartHandlePayload; +import static org.apache.hadoop.fs.s3a.impl.S3AMultipartUploader.buildPartHandlePayload; +import static org.apache.hadoop.fs.s3a.impl.S3AMultipartUploader.parsePartHandlePayload; import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** - * Test multipart upload support methods and classes. + * Unit test of multipart upload support methods and classes. */ public class TestS3AMultipartUploaderSupport extends HadoopTestBase { + public static final String PATH = "s3a://bucket/path"; + + public static final String UPLOAD = "01"; + @Test public void testRoundTrip() throws Throwable { - Pair result = roundTrip("tag", 1); - assertEquals("tag", result.getRight()); - assertEquals(1, result.getLeft().longValue()); + PartHandlePayload result = roundTrip(999, "tag", 1); + assertEquals(PATH, result.getPath()); + assertEquals(UPLOAD, result.getUploadId()); + assertEquals(999, result.getPartNumber()); + assertEquals("tag", result.getEtag()); + assertEquals(1, result.getLen()); } @Test public void testRoundTrip2() throws Throwable { long len = 1L + Integer.MAX_VALUE; - Pair result = roundTrip("11223344", - len); - assertEquals("11223344", result.getRight()); - assertEquals(len, result.getLeft().longValue()); + PartHandlePayload result = + roundTrip(1, "11223344", len); + assertEquals(1, result.getPartNumber()); + assertEquals("11223344", result.getEtag()); + assertEquals(len, result.getLen()); } @Test public void testNoEtag() throws Throwable { intercept(IllegalArgumentException.class, - () -> buildPartHandlePayload("", 1)); + () -> buildPartHandlePayload(PATH, UPLOAD, + 0, "", 1)); } @Test public void testNoLen() throws Throwable { intercept(IllegalArgumentException.class, - () -> buildPartHandlePayload("tag", -1)); + () -> buildPartHandlePayload(PATH, UPLOAD, 0, "tag", -1)); } @Test @@ -71,14 +80,17 @@ public void testBadPayload() throws Throwable { @Test public void testBadHeader() throws Throwable { - byte[] bytes = buildPartHandlePayload("tag", 1); - bytes[2]='f'; + byte[] bytes = buildPartHandlePayload(PATH, UPLOAD, 0, "tag", 1); + bytes[2] = 'f'; intercept(IOException.class, "header", () -> parsePartHandlePayload(bytes)); } - private Pair roundTrip(final String tag, final long len) throws IOException { - byte[] bytes = buildPartHandlePayload(tag, len); + private PartHandlePayload roundTrip( + int partNumber, + String tag, + long len) throws IOException { + byte[] bytes = buildPartHandlePayload(PATH, UPLOAD, partNumber, tag, len); return parsePartHandlePayload(bytes); } } From 806d84b79c97cd0bbed324f6a324d7c110a6fd87 Mon Sep 17 00:00:00 2001 From: jimmy-zuber-amzn <67486813+jimmy-zuber-amzn@users.noreply.github.com> Date: Mon, 13 Jul 2020 11:07:48 -0700 Subject: [PATCH 102/131] HADOOP-17105. S3AFS - Do not attempt to resolve symlinks in globStatus (#2113) Contributed by Jimmy Zuber. --- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 10 +++-- .../fs/s3a/ITestS3AFileOperationCost.java | 44 +++++++++++++++++++ 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index e5b08f1bb903e..f123f6b3376b6 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -3980,6 +3980,8 @@ public boolean isMagicCommitPath(Path path) { /** * Increments the statistic {@link Statistic#INVOCATION_GLOB_STATUS}. + * Override superclass so as to disable symlink resolution as symlinks + * are not supported by S3A. * {@inheritDoc} */ @Override @@ -3988,9 +3990,9 @@ public FileStatus[] globStatus(Path pathPattern) throws IOException { } /** - * Override superclass so as to disable symlink resolution and so avoid - * some calls to the FS which may have problems when the store is being - * inconsistent. + * Increments the statistic {@link Statistic#INVOCATION_GLOB_STATUS}. + * Override superclass so as to disable symlink resolution as symlinks + * are not supported by S3A. * {@inheritDoc} */ @Override @@ -4002,7 +4004,7 @@ public FileStatus[] globStatus( return Globber.createGlobber(this) .withPathPattern(pathPattern) .withPathFiltern(filter) - .withResolveSymlinks(true) + .withResolveSymlinks(false) .build() .glob(); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java index b2b983c4d4df7..cd8d7d5d53a3a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java @@ -574,4 +574,48 @@ public void testCreateCost() throws Throwable { } } + + @Test + public void testCostOfGlobStatus() throws Throwable { + describe("Test globStatus has expected cost"); + S3AFileSystem fs = getFileSystem(); + assume("Unguarded FS only", !fs.hasMetadataStore()); + + Path basePath = path("testCostOfGlobStatus/nextFolder/"); + + // create a bunch of files + int filesToCreate = 10; + for (int i = 0; i < filesToCreate; i++) { + try (FSDataOutputStream out = fs.create(basePath.suffix("/" + i))) { + verifyOperationCount(1, 1); + } + } + + fs.globStatus(basePath.suffix("/*")); + // 2 head + 1 list from getFileStatus on path, + // plus 1 list to match the glob pattern + verifyOperationCount(2, 2); + } + + @Test + public void testCostOfGlobStatusNoSymlinkResolution() throws Throwable { + describe("Test globStatus does not attempt to resolve symlinks"); + S3AFileSystem fs = getFileSystem(); + assume("Unguarded FS only", !fs.hasMetadataStore()); + + Path basePath = path("testCostOfGlobStatusNoSymlinkResolution/f/"); + + // create a single file, globStatus returning a single file on a pattern + // triggers attempts at symlinks resolution if configured + String fileName = "/notASymlinkDOntResolveMeLikeOne"; + try (FSDataOutputStream out = fs.create(basePath.suffix(fileName))) { + verifyOperationCount(1, 1); + } + + fs.globStatus(basePath.suffix("/*")); + // unguarded: 2 head + 1 list from getFileStatus on path, + // plus 1 list to match the glob pattern + // no additional operations from symlink resolution + verifyOperationCount(2, 2); + } } From 0427100b7543d412f4fafe631b7ace289662d28c Mon Sep 17 00:00:00 2001 From: Eric E Payne Date: Mon, 13 Jul 2020 18:57:50 +0000 Subject: [PATCH 103/131] YARN-10297. TestContinuousScheduling#testFairSchedulerContinuousSchedulingInitTime fails intermittently. Contributed by Jim Brennan (Jim_Brennan) --- .../scheduler/fair/TestContinuousScheduling.java | 5 +++++ .../resourcemanager/scheduler/fair/TestFairScheduler.java | 1 + 2 files changed, 6 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java index 443c7963cc932..1d1a7ba6321eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java @@ -20,6 +20,7 @@ import com.google.common.base.Supplier; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -35,6 +36,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ClusterNodeTracker; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; @@ -86,6 +88,9 @@ public Configuration createConfiguration() { @SuppressWarnings("deprecation") @Before public void setup() { + QueueMetrics.clearQueueMetrics(); + DefaultMetricsSystem.setMiniClusterMode(true); + mockClock = new ControlledClock(); conf = createConfiguration(); resourceManager = new MockRM(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 05ec09e66741f..7882ba3664cd6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -146,6 +146,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { @Before public void setUp() throws IOException { + DefaultMetricsSystem.setMiniClusterMode(true); scheduler = new FairScheduler(); conf = createConfiguration(); resourceManager = new MockRM(conf); From e62d8f841275ee47a0ba911415aac9e39af291c6 Mon Sep 17 00:00:00 2001 From: Hanisha Koneru Date: Mon, 13 Jul 2020 12:55:34 -0700 Subject: [PATCH 104/131] HADOOP-17116. Skip Retry INFO logging on first failover from a proxy --- .../io/retry/RetryInvocationHandler.java | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java index 64824a15cd89c..6db00d724aa35 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java @@ -35,6 +35,7 @@ import java.lang.reflect.Proxy; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.Map; /** @@ -312,6 +313,8 @@ public String toString() { private volatile boolean hasSuccessfulCall = false; + private HashSet failedAtLeastOnce = new HashSet<>(); + private final RetryPolicy defaultPolicy; private final Map methodNameToPolicyMap; @@ -390,12 +393,18 @@ private RetryInfo handleException(final Method method, final int callId, private void log(final Method method, final boolean isFailover, final int failovers, final long delay, final Exception ex) { - // log info if this has made some successful calls or - // this is not the first failover - final boolean info = hasSuccessfulCall || failovers != 0 - || asyncCallHandler.hasSuccessfulCall(); - if (!info && !LOG.isDebugEnabled()) { - return; + boolean info = true; + // If this is the first failover to this proxy, skip logging at INFO level + if (!failedAtLeastOnce.contains(proxyDescriptor.getProxyInfo().toString())) + { + failedAtLeastOnce.add(proxyDescriptor.getProxyInfo().toString()); + + // If successful calls were made to this proxy, log info even for first + // failover + info = hasSuccessfulCall || asyncCallHandler.hasSuccessfulCall(); + if (!info && !LOG.isDebugEnabled()) { + return; + } } final StringBuilder b = new StringBuilder() From 48f90115b5ecb37f814af281f09bb404361b2bba Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Mon, 13 Jul 2020 23:09:12 +0000 Subject: [PATCH 105/131] YARN-10348. Allow RM to always cancel tokens after app completes. Contributed by Jim Brennan --- .../hadoop/yarn/conf/YarnConfiguration.java | 3 + .../src/main/resources/yarn-default.xml | 10 +++ .../security/DelegationTokenRenewer.java | 8 +- .../security/TestDelegationTokenRenewer.java | 75 ++++++++++++++++++- 4 files changed, 93 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 156943c7f6b41..0abd67eae1d8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -743,6 +743,9 @@ public static boolean isAclEnabled(Configuration conf) { RM_PREFIX + "delegation-token.max-conf-size-bytes"; public static final int DEFAULT_RM_DELEGATION_TOKEN_MAX_CONF_SIZE_BYTES = 12800; + public static final String RM_DELEGATION_TOKEN_ALWAYS_CANCEL = + RM_PREFIX + "delegation-token.always-cancel"; + public static final boolean DEFAULT_RM_DELEGATION_TOKEN_ALWAYS_CANCEL = false; public static final String RM_DT_RENEWER_THREAD_TIMEOUT = RM_PREFIX + "delegation-token-renewer.thread-timeout"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 1507296e14662..67da860cf56dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -804,6 +804,16 @@ 12800 + + If true, ResourceManager will always try to cancel delegation + tokens after the application completes, even if the client sets + shouldCancelAtEnd false. References to delegation tokens are tracked, + so they will not be canceled until all sub-tasks are done using them. + + yarn.resourcemanager.delegation-token.always-cancel + false + + If true, ResourceManager will have proxy-user privileges. Use case: In a secure cluster, YARN requires the user hdfs delegation-tokens to diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index fd8935debbcaf..4c21b55e24c99 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -115,6 +115,7 @@ public class DelegationTokenRenewer extends AbstractService { private volatile boolean isServiceStarted; private LinkedBlockingQueue pendingEventQueue; + private boolean alwaysCancelDelegationTokens; private boolean tokenKeepAliveEnabled; private boolean hasProxyUserPrivileges; private long credentialsValidTimeRemaining; @@ -137,6 +138,9 @@ public DelegationTokenRenewer() { @Override protected void serviceInit(Configuration conf) throws Exception { + this.alwaysCancelDelegationTokens = + conf.getBoolean(YarnConfiguration.RM_DELEGATION_TOKEN_ALWAYS_CANCEL, + YarnConfiguration.DEFAULT_RM_DELEGATION_TOKEN_ALWAYS_CANCEL); this.hasProxyUserPrivileges = conf.getBoolean(YarnConfiguration.RM_PROXY_USER_PRIVILEGES_ENABLED, YarnConfiguration.DEFAULT_RM_PROXY_USER_PRIVILEGES_ENABLED); @@ -268,7 +272,7 @@ protected void serviceStop() { * */ @VisibleForTesting - protected static class DelegationTokenToRenew { + protected class DelegationTokenToRenew { public final Token token; public final Collection referringAppIds; public final Configuration conf; @@ -298,7 +302,7 @@ public DelegationTokenToRenew(Collection applicationIds, this.conf = conf; this.expirationDate = expirationDate; this.timerTask = null; - this.shouldCancelAtEnd = shouldCancelAtEnd; + this.shouldCancelAtEnd = shouldCancelAtEnd | alwaysCancelDelegationTokens; } public void setTimerTask(RenewalTimerTask tTask) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index 31a87cb71bec4..01cf3b6f5bb60 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -217,6 +217,8 @@ public void setUp() throws Exception { conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); conf.set("override_token_expire_time", "3000"); + conf.setBoolean(YarnConfiguration.RM_DELEGATION_TOKEN_ALWAYS_CANCEL, + false); UserGroupInformation.setConfiguration(conf); eventQueue = new LinkedBlockingQueue(); dispatcher = new AsyncDispatcher(eventQueue); @@ -608,6 +610,77 @@ public void testDTRenewalWithNoCancel () throws Exception { token1.renew(conf); } + /** + * Basic idea of the test: + * 1. Verify that YarnConfiguration.RM_DELEGATION_TOKEN_ALWAYS_CANCEL = true + * overrides shouldCancelAtEnd + * 2. register a token for 2 seconds with shouldCancelAtEnd = false + * 3. cancel it immediately + * 4. check that token was canceled + * @throws IOException + * @throws URISyntaxException + */ + @Test(timeout=60000) + public void testDTRenewalWithNoCancelAlwaysCancel() throws Exception { + Configuration lconf = new Configuration(conf); + lconf.setBoolean(YarnConfiguration.RM_DELEGATION_TOKEN_ALWAYS_CANCEL, + true); + + DelegationTokenRenewer localDtr = + createNewDelegationTokenRenewer(lconf, counter); + RMContext mockContext = mock(RMContext.class); + when(mockContext.getSystemCredentialsForApps()).thenReturn( + new ConcurrentHashMap()); + ClientRMService mockClientRMService = mock(ClientRMService.class); + when(mockContext.getClientRMService()).thenReturn(mockClientRMService); + when(mockContext.getDelegationTokenRenewer()).thenReturn( + localDtr); + when(mockContext.getDispatcher()).thenReturn(dispatcher); + InetSocketAddress sockAddr = + InetSocketAddress.createUnresolved("localhost", 1234); + when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); + localDtr.setDelegationTokenRenewerPoolTracker(false); + localDtr.setRMContext(mockContext); + localDtr.init(lconf); + localDtr.start(); + + MyFS dfs = (MyFS)FileSystem.get(lconf); + LOG.info("dfs="+(Object)dfs.hashCode() + ";conf="+lconf.hashCode()); + + Credentials ts = new Credentials(); + MyToken token1 = dfs.getDelegationToken("user1"); + + //to cause this one to be set for renew in 2 secs + Renewer.tokenToRenewIn2Sec = token1; + LOG.info("token="+token1+" should be renewed for 2 secs"); + + String nn1 = DelegationTokenRenewer.SCHEME + "://host1:0"; + ts.addToken(new Text(nn1), token1); + + ApplicationId applicationId = BuilderUtils.newApplicationId(0, 1); + localDtr.addApplicationAsync(applicationId, ts, false, "user", + new Configuration()); + waitForEventsToGetProcessed(localDtr); + localDtr.applicationFinished(applicationId); + waitForEventsToGetProcessed(localDtr); + + int numberOfExpectedRenewals = Renewer.counter; // number of renewals so far + try { + Thread.sleep(6*1000); // sleep 6 seconds, so it has time to renew + } catch (InterruptedException e) {} + LOG.info("Counter = " + Renewer.counter + ";t="+ Renewer.lastRenewed); + + // counter and the token should still be the old ones + assertEquals("renew wasn't called as many times as expected", + numberOfExpectedRenewals, Renewer.counter); + + // The token should have been cancelled at this point. Renewal will fail. + try { + token1.renew(lconf); + fail("Renewal of cancelled token should have failed"); + } catch (InvalidToken ite) {} + } + /** * Basic idea of the test: * 0. Setup token KEEP_ALIVE @@ -1616,7 +1689,7 @@ protected Token[] obtainSystemTokensForUser(String user, // Ensure incrTokenSequenceNo has been called for new token request Mockito.verify(mockContext, Mockito.times(1)).incrTokenSequenceNo(); - DelegationTokenToRenew dttr = new DelegationTokenToRenew(appIds, + DelegationTokenToRenew dttr = dtr.new DelegationTokenToRenew(appIds, expectedToken, conf, 1000, false, "user1"); dtr.requestNewHdfsDelegationTokenIfNeeded(dttr); From 380e0f4506a818d6337271ae6d996927f70b601b Mon Sep 17 00:00:00 2001 From: Anoop Sam John Date: Tue, 14 Jul 2020 18:37:27 +0530 Subject: [PATCH 106/131] HADOOP-16998. WASB : NativeAzureFsOutputStream#close() throwing IllegalArgumentException (#2073) Contributed by Anoop Sam John. --- .../fs/azure/SyncableDataOutputStream.java | 37 +++++++++- .../azure/TestSyncableDataOutputStream.java | 68 +++++++++++++++++++ 2 files changed, 104 insertions(+), 1 deletion(-) create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestSyncableDataOutputStream.java diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java index dcfff2fbe3784..14ddb02fc4a6b 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java @@ -22,9 +22,12 @@ import java.io.IOException; import java.io.OutputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Syncable; -import org.apache.hadoop.classification.InterfaceAudience; /** * Support the Syncable interface on top of a DataOutputStream. @@ -35,6 +38,8 @@ public class SyncableDataOutputStream extends DataOutputStream implements Syncable, StreamCapabilities { + private static final Logger LOG = LoggerFactory.getLogger(SyncableDataOutputStream.class); + public SyncableDataOutputStream(OutputStream out) { super(out); } @@ -70,4 +75,34 @@ public void hsync() throws IOException { ((Syncable) out).hsync(); } } + + @Override + public void close() throws IOException { + IOException ioeFromFlush = null; + try { + flush(); + } catch (IOException e) { + ioeFromFlush = e; + throw e; + } finally { + try { + this.out.close(); + } catch (IOException e) { + // If there was an Exception during flush(), the Azure SDK will throw back the + // same when we call close on the same stream. When try and finally both throw + // Exception, Java will use Throwable#addSuppressed for one of the Exception so + // that the caller will get one exception back. When within this, if both + // Exceptions are equal, it will throw back IllegalStateException. This makes us + // to throw back a non IOE. The below special handling is to avoid this. + if (ioeFromFlush == e) { + // Do nothing.. + // The close() call gave back the same IOE which flush() gave. Just swallow it + LOG.debug("flush() and close() throwing back same Exception. Just swallowing the latter", e); + } else { + // Let Java handle 2 different Exceptions been thrown from try and finally. + throw e; + } + } + } + } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestSyncableDataOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestSyncableDataOutputStream.java new file mode 100644 index 0000000000000..c8c6d93f49d9a --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestSyncableDataOutputStream.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.azure; + +import java.io.IOException; +import java.io.OutputStream; + +import org.junit.Test; + +import org.apache.hadoop.test.LambdaTestUtils; + +public class TestSyncableDataOutputStream { + + @Test + public void testCloseWhenFlushThrowingIOException() throws Exception { + MockOutputStream out = new MockOutputStream(); + SyncableDataOutputStream sdos = new SyncableDataOutputStream(out); + out.flushThrowIOE = true; + LambdaTestUtils.intercept(IOException.class, "An IOE from flush", () -> sdos.close()); + MockOutputStream out2 = new MockOutputStream(); + out2.flushThrowIOE = true; + LambdaTestUtils.intercept(IOException.class, "An IOE from flush", () -> { + try (SyncableDataOutputStream sdos2 = new SyncableDataOutputStream(out2)) { + } + }); + } + + private static class MockOutputStream extends OutputStream { + + private boolean flushThrowIOE = false; + private IOException lastException = null; + + @Override + public void write(int arg0) throws IOException { + + } + + @Override + public void flush() throws IOException { + if (this.flushThrowIOE) { + this.lastException = new IOException("An IOE from flush"); + throw this.lastException; + } + } + + @Override + public void close() throws IOException { + if (this.lastException != null) { + throw this.lastException; + } + } + } +} From bdce75d737bc7d207c777bb0a9e5fc4c9a78cc0a Mon Sep 17 00:00:00 2001 From: zhaoyim Date: Tue, 14 Jul 2020 21:42:12 +0800 Subject: [PATCH 107/131] HDFS-15371. Nonstandard characters exist in NameNode.java (#2032) Contributed by zhaoyim --- .../java/org/apache/hadoop/hdfs/server/namenode/NameNode.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 7c2026c1059b0..9bffaaabebeaf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -301,7 +301,7 @@ public enum OperationCategory { DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, DFS_HA_FENCE_METHODS_KEY, - DFS_HA_ZKFC_PORT_KEY, + DFS_HA_ZKFC_PORT_KEY }; /** From 4647a60430136aa4abc18d5112b93a8b927dbd1f Mon Sep 17 00:00:00 2001 From: Mukund Thakur Date: Tue, 14 Jul 2020 15:27:35 +0100 Subject: [PATCH 108/131] HADOOP-17022. Tune S3AFileSystem.listFiles() API. Contributed by Mukund Thakur. Change-Id: I17f5cfdcd25670ce3ddb62c13378c7e2dc06ba52 --- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 174 +++++++++++------- .../fs/s3a/ITestS3AFileOperationCost.java | 70 +++++++ .../fs/s3a/ITestS3GuardListConsistency.java | 6 +- .../hadoop/fs/s3a/auth/ITestAssumeRole.java | 4 + 4 files changed, 189 insertions(+), 65 deletions(-) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index f123f6b3376b6..286df44939312 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -4206,79 +4206,125 @@ private RemoteIterator innerListFiles( Path path = qualify(f); LOG.debug("listFiles({}, {})", path, recursive); try { - // if a status was given, that is used, otherwise - // call getFileStatus, which triggers an existence check - final S3AFileStatus fileStatus = status != null - ? status - : (S3AFileStatus) getFileStatus(path); - if (fileStatus.isFile()) { + // if a status was given and it is a file. + if (status != null && status.isFile()) { // simple case: File - LOG.debug("Path is a file"); + LOG.debug("Path is a file: {}", path); return new Listing.SingleStatusRemoteIterator( - toLocatedFileStatus(fileStatus)); - } else { - // directory: do a bulk operation - String key = maybeAddTrailingSlash(pathToKey(path)); - String delimiter = recursive ? null : "/"; - LOG.debug("Requesting all entries under {} with delimiter '{}'", - key, delimiter); - final RemoteIterator cachedFilesIterator; - final Set tombstones; - boolean allowAuthoritative = allowAuthoritative(f); - if (recursive) { - final PathMetadata pm = metadataStore.get(path, true); - // shouldn't need to check pm.isDeleted() because that will have - // been caught by getFileStatus above. - MetadataStoreListFilesIterator metadataStoreListFilesIterator = - new MetadataStoreListFilesIterator(metadataStore, pm, - allowAuthoritative); - tombstones = metadataStoreListFilesIterator.listTombstones(); - // if all of the below is true - // - authoritative access is allowed for this metadatastore for this directory, - // - all the directory listings are authoritative on the client - // - the caller does not force non-authoritative access - // return the listing without any further s3 access - if (!forceNonAuthoritativeMS && - allowAuthoritative && - metadataStoreListFilesIterator.isRecursivelyAuthoritative()) { - S3AFileStatus[] statuses = S3Guard.iteratorToStatuses( - metadataStoreListFilesIterator, tombstones); - cachedFilesIterator = listing.createProvidedFileStatusIterator( - statuses, ACCEPT_ALL, acceptor); - return listing.createLocatedFileStatusIterator(cachedFilesIterator); - } - cachedFilesIterator = metadataStoreListFilesIterator; - } else { - DirListingMetadata meta = - S3Guard.listChildrenWithTtl(metadataStore, path, ttlTimeProvider, - allowAuthoritative); - if (meta != null) { - tombstones = meta.listTombstones(); - } else { - tombstones = null; - } - cachedFilesIterator = listing.createProvidedFileStatusIterator( - S3Guard.dirMetaToStatuses(meta), ACCEPT_ALL, acceptor); - if (allowAuthoritative && meta != null && meta.isAuthoritative()) { - // metadata listing is authoritative, so return it directly - return listing.createLocatedFileStatusIterator(cachedFilesIterator); - } + toLocatedFileStatus(status)); + } + // Assuming the path to be a directory + // do a bulk operation. + RemoteIterator listFilesAssumingDir = + getListFilesAssumingDir(path, + recursive, + acceptor, + collectTombstones, + forceNonAuthoritativeMS); + // If there are no list entries present, we + // fallback to file existence check as the path + // can be a file or empty directory. + if (!listFilesAssumingDir.hasNext()) { + // If file status was already passed, reuse it. + final S3AFileStatus fileStatus = status != null + ? status + : (S3AFileStatus) getFileStatus(path); + if (fileStatus.isFile()) { + return new Listing.SingleStatusRemoteIterator( + toLocatedFileStatus(fileStatus)); } - return listing.createTombstoneReconcilingIterator( - listing.createLocatedFileStatusIterator( - listing.createFileStatusListingIterator(path, - createListObjectsRequest(key, delimiter), - ACCEPT_ALL, - acceptor, - cachedFilesIterator)), - collectTombstones ? tombstones : null); } + // If we have reached here, it means either there are files + // in this directory or it is empty. + return listFilesAssumingDir; } catch (AmazonClientException e) { - // TODO S3Guard: retry on file not found exception throw translateException("listFiles", path, e); } } + /** + * List files under a path assuming the path to be a directory. + * @param path input path. + * @param recursive recursive listing? + * @param acceptor file status filter + * @param collectTombstones should tombstones be collected from S3Guard? + * @param forceNonAuthoritativeMS forces metadata store to act like non + * authoritative. This is useful when + * listFiles output is used by import tool. + * @return an iterator over listing. + * @throws IOException any exception. + */ + private RemoteIterator getListFilesAssumingDir( + Path path, + boolean recursive, Listing.FileStatusAcceptor acceptor, + boolean collectTombstones, + boolean forceNonAuthoritativeMS) throws IOException { + + String key = maybeAddTrailingSlash(pathToKey(path)); + String delimiter = recursive ? null : "/"; + LOG.debug("Requesting all entries under {} with delimiter '{}'", + key, delimiter); + final RemoteIterator cachedFilesIterator; + final Set tombstones; + boolean allowAuthoritative = allowAuthoritative(path); + if (recursive) { + final PathMetadata pm = metadataStore.get(path, true); + if (pm != null) { + if (pm.isDeleted()) { + OffsetDateTime deletedAt = OffsetDateTime + .ofInstant(Instant.ofEpochMilli( + pm.getFileStatus().getModificationTime()), + ZoneOffset.UTC); + throw new FileNotFoundException("Path " + path + " is recorded as " + + "deleted by S3Guard at " + deletedAt); + } + } + MetadataStoreListFilesIterator metadataStoreListFilesIterator = + new MetadataStoreListFilesIterator(metadataStore, pm, + allowAuthoritative); + tombstones = metadataStoreListFilesIterator.listTombstones(); + // if all of the below is true + // - authoritative access is allowed for this metadatastore + // for this directory, + // - all the directory listings are authoritative on the client + // - the caller does not force non-authoritative access + // return the listing without any further s3 access + if (!forceNonAuthoritativeMS && + allowAuthoritative && + metadataStoreListFilesIterator.isRecursivelyAuthoritative()) { + S3AFileStatus[] statuses = S3Guard.iteratorToStatuses( + metadataStoreListFilesIterator, tombstones); + cachedFilesIterator = listing.createProvidedFileStatusIterator( + statuses, ACCEPT_ALL, acceptor); + return listing.createLocatedFileStatusIterator(cachedFilesIterator); + } + cachedFilesIterator = metadataStoreListFilesIterator; + } else { + DirListingMetadata meta = + S3Guard.listChildrenWithTtl(metadataStore, path, ttlTimeProvider, + allowAuthoritative); + if (meta != null) { + tombstones = meta.listTombstones(); + } else { + tombstones = null; + } + cachedFilesIterator = listing.createProvidedFileStatusIterator( + S3Guard.dirMetaToStatuses(meta), ACCEPT_ALL, acceptor); + if (allowAuthoritative && meta != null && meta.isAuthoritative()) { + // metadata listing is authoritative, so return it directly + return listing.createLocatedFileStatusIterator(cachedFilesIterator); + } + } + return listing.createTombstoneReconcilingIterator( + listing.createLocatedFileStatusIterator( + listing.createFileStatusListingIterator(path, + createListObjectsRequest(key, delimiter), + ACCEPT_ALL, + acceptor, + cachedFilesIterator)), + collectTombstones ? tombstones : null); + } + /** * Override superclass so as to add statistic collection. * {@inheritDoc} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java index cd8d7d5d53a3a..e54fd97a6af1e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java @@ -168,6 +168,76 @@ public void testCostOfListLocatedStatusOnNonEmptyDir() throws Throwable { } } + @Test + public void testCostOfListFilesOnFile() throws Throwable { + describe("Performing listFiles() on a file"); + Path file = path(getMethodName() + ".txt"); + S3AFileSystem fs = getFileSystem(); + touch(fs, file); + resetMetricDiffs(); + fs.listFiles(file, true); + if (!fs.hasMetadataStore()) { + metadataRequests.assertDiffEquals(1); + } else { + if (fs.allowAuthoritative(file)) { + listRequests.assertDiffEquals(0); + } else { + listRequests.assertDiffEquals(1); + } + } + } + + @Test + public void testCostOfListFilesOnEmptyDir() throws Throwable { + describe("Performing listFiles() on an empty dir"); + Path dir = path(getMethodName()); + S3AFileSystem fs = getFileSystem(); + fs.mkdirs(dir); + resetMetricDiffs(); + fs.listFiles(dir, true); + if (!fs.hasMetadataStore()) { + verifyOperationCount(2, 1); + } else { + if (fs.allowAuthoritative(dir)) { + verifyOperationCount(0, 0); + } else { + verifyOperationCount(0, 1); + } + } + } + + @Test + public void testCostOfListFilesOnNonEmptyDir() throws Throwable { + describe("Performing listFiles() on a non empty dir"); + Path dir = path(getMethodName()); + S3AFileSystem fs = getFileSystem(); + fs.mkdirs(dir); + Path file = new Path(dir, "file.txt"); + touch(fs, file); + resetMetricDiffs(); + fs.listFiles(dir, true); + if (!fs.hasMetadataStore()) { + verifyOperationCount(0, 1); + } else { + if (fs.allowAuthoritative(dir)) { + verifyOperationCount(0, 0); + } else { + verifyOperationCount(0, 1); + } + } + } + + @Test + public void testCostOfListFilesOnNonExistingDir() throws Throwable { + describe("Performing listFiles() on a non existing dir"); + Path dir = path(getMethodName()); + S3AFileSystem fs = getFileSystem(); + resetMetricDiffs(); + intercept(FileNotFoundException.class, + () -> fs.listFiles(dir, true)); + verifyOperationCount(2, 2); + } + @Test public void testCostOfGetFileStatusOnFile() throws Throwable { describe("performing getFileStatus on a file"); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java index 6e55796fd3ae5..3c67e252e6e69 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.contract.s3a.S3AContract; import com.google.common.collect.Lists; @@ -271,7 +272,10 @@ public void testConsistentRenameAfterDelete() throws Exception { assertTrue(fs.delete(testDirs[1], false)); assertTrue(fs.delete(testDirs[2], false)); - fs.rename(path("a"), path("a3")); + ContractTestUtils.rename(fs, path("a"), path("a3")); + ContractTestUtils.assertPathsDoNotExist(fs, + "Source paths shouldn't exist post rename operation", + testDirs[0], testDirs[1], testDirs[2]); FileStatus[] paths = fs.listStatus(path("a3/b")); List list = new ArrayList<>(); for (FileStatus fileState : paths) { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java index cf935d28591ba..4f6a1ff417873 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java @@ -397,6 +397,10 @@ public void testAssumeRoleRestrictedPolicyFS() throws Exception { } forbidden("", () -> fs.listStatus(ROOT)); + forbidden("", + () -> fs.listFiles(ROOT, true)); + forbidden("", + () -> fs.listLocatedStatus(ROOT)); forbidden("", () -> fs.mkdirs(path("testAssumeRoleFS"))); } From 317fe4584a51cfe553e4098d48170cd2898b9732 Mon Sep 17 00:00:00 2001 From: Erik Krogen Date: Tue, 14 Jul 2020 11:22:16 -0700 Subject: [PATCH 109/131] HADOOP-17127. Use RpcMetrics.TIMEUNIT to initialize rpc queueTime and processingTime. Contributed by Jim Brennan. --- .../org/apache/hadoop/ipc/DecayRpcScheduler.java | 5 +++-- .../java/org/apache/hadoop/ipc/RpcScheduler.java | 12 ++++++------ .../src/test/java/org/apache/hadoop/ipc/TestRPC.java | 6 +++++- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java index 3e952eb63c3ff..45cbd4e99dff8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java @@ -43,6 +43,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ipc.metrics.DecayRpcSchedulerDetailedMetrics; +import org.apache.hadoop.ipc.metrics.RpcMetrics; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; @@ -632,8 +633,8 @@ public void addResponseTime(String callName, Schedulable schedulable, addCost(user, processingCost); int priorityLevel = schedulable.getPriorityLevel(); - long queueTime = details.get(Timing.QUEUE, TimeUnit.MILLISECONDS); - long processingTime = details.get(Timing.PROCESSING, TimeUnit.MILLISECONDS); + long queueTime = details.get(Timing.QUEUE, RpcMetrics.TIMEUNIT); + long processingTime = details.get(Timing.PROCESSING, RpcMetrics.TIMEUNIT); this.decayRpcSchedulerDetailedMetrics.addQueueTime( priorityLevel, queueTime); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java index 63812f47f2db0..5202c6b356177 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ipc; -import java.util.concurrent.TimeUnit; +import org.apache.hadoop.ipc.metrics.RpcMetrics; /** * Implement this interface to be used for RPC scheduling and backoff. @@ -62,12 +62,12 @@ default void addResponseTime(String callName, Schedulable schedulable, // this interface, a default implementation is supplied which uses the old // method. All new implementations MUST override this interface and should // NOT use the other addResponseTime method. - int queueTimeMs = (int) - details.get(ProcessingDetails.Timing.QUEUE, TimeUnit.MILLISECONDS); - int processingTimeMs = (int) - details.get(ProcessingDetails.Timing.PROCESSING, TimeUnit.MILLISECONDS); + int queueTime = (int) + details.get(ProcessingDetails.Timing.QUEUE, RpcMetrics.TIMEUNIT); + int processingTime = (int) + details.get(ProcessingDetails.Timing.PROCESSING, RpcMetrics.TIMEUNIT); addResponseTime(callName, schedulable.getPriorityLevel(), - queueTimeMs, processingTimeMs); + queueTime, processingTime); } void stop(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java index 640ca3d2b89ed..cd2433a8aff10 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ipc.Client.ConnectionId; import org.apache.hadoop.ipc.Server.Call; import org.apache.hadoop.ipc.Server.Connection; +import org.apache.hadoop.ipc.metrics.RpcMetrics; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto; import org.apache.hadoop.ipc.protobuf.TestProtos; @@ -81,6 +82,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; @@ -1095,7 +1097,9 @@ public TestRpcService run() { proxy.lockAndSleep(null, newSleepRequest(5)); rpcMetrics = getMetrics(server.getRpcMetrics().name()); - assertGauge("RpcLockWaitTimeAvgTime", 10000.0, rpcMetrics); + assertGauge("RpcLockWaitTimeAvgTime", + (double)(RpcMetrics.TIMEUNIT.convert(10L, TimeUnit.SECONDS)), + rpcMetrics); } finally { if (proxy2 != null) { RPC.stopProxy(proxy2); From cce5a6f6094cefd2e23b73d202cc173cf4fc2cc5 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Wed, 15 Jul 2020 10:09:48 +0530 Subject: [PATCH 110/131] HDFS-15385 Upgrade boost library to 1.72 (#2051) * Removed the asio-1.10.2 that was checked in under libhdfspp/third_party directory. --- BUILDING.txt | 8 + dev-support/docker/Dockerfile | 15 + dev-support/docker/Dockerfile_aarch64 | 15 + .../src/CMakeLists.txt | 2 + .../src/main/native/libhdfspp/CMakeLists.txt | 4 +- .../libhdfspp/include/hdfspp/ioservice.h | 7 +- .../libhdfspp/lib/common/CMakeLists.txt | 1 + .../libhdfspp/lib/common/async_stream.h | 21 +- .../libhdfspp/lib/common/continuation/asio.h | 9 +- .../lib/common/continuation/protobuf.h | 16 +- .../native/libhdfspp/lib/common/fsinfo.cc | 2 + .../libhdfspp/lib/common/ioservice_impl.cc | 4 +- .../libhdfspp/lib/common/ioservice_impl.h | 6 +- .../native/libhdfspp/lib/common/logging.cc | 2 +- .../native/libhdfspp/lib/common/logging.h | 4 +- .../libhdfspp/lib/common/namenode_info.cc | 10 +- .../libhdfspp/lib/common/namenode_info.h | 4 +- .../libhdfspp/lib/common/retry_policy.cc | 4 +- .../main/native/libhdfspp/lib/common/util.cc | 6 +- .../main/native/libhdfspp/lib/common/util.h | 13 +- .../lib/connection/datanodeconnection.cc | 12 +- .../lib/connection/datanodeconnection.h | 13 +- .../native/libhdfspp/lib/fs/filehandle.cc | 8 +- .../native/libhdfspp/lib/fs/filesystem.cc | 4 +- .../libhdfspp/lib/fs/namenode_operations.cc | 4 +- .../libhdfspp/lib/reader/block_reader.cc | 34 +- .../libhdfspp/lib/reader/datatransfer.h | 6 +- .../libhdfspp/lib/reader/datatransfer_impl.h | 6 +- .../libhdfspp/lib/rpc/namenode_tracker.cc | 8 +- .../libhdfspp/lib/rpc/namenode_tracker.h | 8 +- .../main/native/libhdfspp/lib/rpc/request.h | 6 +- .../native/libhdfspp/lib/rpc/rpc_connection.h | 14 +- .../libhdfspp/lib/rpc/rpc_connection_impl.cc | 12 +- .../libhdfspp/lib/rpc/rpc_connection_impl.h | 88 +- .../native/libhdfspp/lib/rpc/rpc_engine.cc | 8 +- .../native/libhdfspp/lib/rpc/rpc_engine.h | 8 +- .../native/libhdfspp/tests/CMakeLists.txt | 4 +- .../libhdfspp/tests/bad_datanode_test.cc | 21 +- .../native/libhdfspp/tests/mock_connection.cc | 4 +- .../native/libhdfspp/tests/mock_connection.h | 44 +- .../tests/remote_block_reader_test.cc | 35 +- .../native/libhdfspp/tests/rpc_engine_test.cc | 64 +- .../libhdfspp/third_party/asio-1.10.2/COPYING | 4 - .../third_party/asio-1.10.2/include/asio.hpp | 122 - .../asio-1.10.2/include/asio/async_result.hpp | 94 - .../include/asio/basic_datagram_socket.hpp | 949 ----- .../include/asio/basic_deadline_timer.hpp | 520 --- .../include/asio/basic_io_object.hpp | 240 -- .../include/asio/basic_raw_socket.hpp | 940 ----- .../include/asio/basic_seq_packet_socket.hpp | 565 --- .../include/asio/basic_serial_port.hpp | 695 ---- .../include/asio/basic_signal_set.hpp | 384 -- .../asio-1.10.2/include/asio/basic_socket.hpp | 1518 -------- .../include/asio/basic_socket_acceptor.hpp | 1136 ------ .../include/asio/basic_socket_iostream.hpp | 286 -- .../include/asio/basic_socket_streambuf.hpp | 567 --- .../include/asio/basic_stream_socket.hpp | 852 ----- .../include/asio/basic_streambuf.hpp | 369 -- .../include/asio/basic_streambuf_fwd.hpp | 33 - .../include/asio/basic_waitable_timer.hpp | 519 --- .../asio-1.10.2/include/asio/buffer.hpp | 2239 ----------- .../include/asio/buffered_read_stream.hpp | 244 -- .../include/asio/buffered_read_stream_fwd.hpp | 25 - .../include/asio/buffered_stream.hpp | 258 -- .../include/asio/buffered_stream_fwd.hpp | 25 - .../include/asio/buffered_write_stream.hpp | 236 -- .../asio/buffered_write_stream_fwd.hpp | 25 - .../include/asio/buffers_iterator.hpp | 481 --- .../include/asio/completion_condition.hpp | 218 -- .../asio-1.10.2/include/asio/connect.hpp | 823 ---- .../asio-1.10.2/include/asio/coroutine.hpp | 328 -- .../include/asio/datagram_socket_service.hpp | 432 --- .../include/asio/deadline_timer.hpp | 63 - .../include/asio/deadline_timer_service.hpp | 173 - .../include/asio/detail/addressof.hpp | 38 - .../asio-1.10.2/include/asio/detail/array.hpp | 38 - .../include/asio/detail/array_fwd.hpp | 34 - .../include/asio/detail/assert.hpp | 32 - .../include/asio/detail/atomic_count.hpp | 45 - .../asio/detail/base_from_completion_cond.hpp | 68 - .../include/asio/detail/bind_handler.hpp | 489 --- .../asio/detail/buffer_resize_guard.hpp | 66 - .../asio/detail/buffer_sequence_adapter.hpp | 383 -- .../asio/detail/buffered_stream_storage.hpp | 126 - .../include/asio/detail/call_stack.hpp | 125 - .../asio/detail/chrono_time_traits.hpp | 190 - .../asio/detail/completion_handler.hpp | 81 - .../include/asio/detail/config.hpp | 895 ----- .../include/asio/detail/consuming_buffers.hpp | 292 -- .../include/asio/detail/cstdint.hpp | 46 - .../include/asio/detail/date_time_fwd.hpp | 34 - .../asio/detail/deadline_timer_service.hpp | 227 -- .../include/asio/detail/dependent_type.hpp | 36 - .../include/asio/detail/descriptor_ops.hpp | 117 - .../asio/detail/descriptor_read_op.hpp | 119 - .../asio/detail/descriptor_write_op.hpp | 119 - .../include/asio/detail/dev_poll_reactor.hpp | 210 - .../include/asio/detail/epoll_reactor.hpp | 242 -- .../asio-1.10.2/include/asio/detail/event.hpp | 48 - .../detail/eventfd_select_interrupter.hpp | 83 - .../include/asio/detail/fd_set_adapter.hpp | 39 - .../include/asio/detail/fenced_block.hpp | 76 - .../include/asio/detail/function.hpp | 38 - .../asio/detail/gcc_arm_fenced_block.hpp | 89 - .../asio/detail/gcc_hppa_fenced_block.hpp | 66 - .../asio/detail/gcc_sync_fenced_block.hpp | 63 - .../asio/detail/gcc_x86_fenced_block.hpp | 89 - .../asio/detail/handler_alloc_helpers.hpp | 82 - .../asio/detail/handler_cont_helpers.hpp | 45 - .../asio/detail/handler_invoke_helpers.hpp | 57 - .../include/asio/detail/handler_tracking.hpp | 159 - .../asio/detail/handler_type_requirements.hpp | 488 --- .../include/asio/detail/hash_map.hpp | 331 -- .../detail/impl/buffer_sequence_adapter.ipp | 118 - .../asio/detail/impl/descriptor_ops.ipp | 451 --- .../asio/detail/impl/dev_poll_reactor.hpp | 78 - .../asio/detail/impl/dev_poll_reactor.ipp | 445 --- .../asio/detail/impl/epoll_reactor.hpp | 76 - .../asio/detail/impl/epoll_reactor.ipp | 662 ---- .../impl/eventfd_select_interrupter.ipp | 165 - .../asio/detail/impl/handler_tracking.ipp | 305 -- .../asio/detail/impl/kqueue_reactor.hpp | 80 - .../asio/detail/impl/kqueue_reactor.ipp | 463 --- .../detail/impl/pipe_select_interrupter.ipp | 124 - .../include/asio/detail/impl/posix_event.ipp | 47 - .../include/asio/detail/impl/posix_mutex.ipp | 46 - .../include/asio/detail/impl/posix_thread.ipp | 74 - .../asio/detail/impl/posix_tss_ptr.ipp | 46 - .../impl/reactive_descriptor_service.ipp | 208 - .../impl/reactive_serial_port_service.ipp | 151 - .../impl/reactive_socket_service_base.ipp | 267 -- .../detail/impl/resolver_service_base.ipp | 130 - .../asio/detail/impl/select_reactor.hpp | 87 - .../asio/detail/impl/select_reactor.ipp | 313 -- .../asio/detail/impl/service_registry.hpp | 88 - .../asio/detail/impl/service_registry.ipp | 188 - .../asio/detail/impl/signal_set_service.ipp | 647 ---- .../include/asio/detail/impl/socket_ops.ipp | 3394 ----------------- .../detail/impl/socket_select_interrupter.ipp | 175 - .../asio/detail/impl/strand_service.hpp | 118 - .../asio/detail/impl/strand_service.ipp | 176 - .../asio/detail/impl/task_io_service.hpp | 78 - .../asio/detail/impl/task_io_service.ipp | 474 --- .../include/asio/detail/impl/throw_error.ipp | 60 - .../asio/detail/impl/timer_queue_ptime.ipp | 84 - .../asio/detail/impl/timer_queue_set.ipp | 101 - .../include/asio/detail/impl/win_event.ipp | 67 - .../detail/impl/win_iocp_handle_service.ipp | 528 --- .../asio/detail/impl/win_iocp_io_service.hpp | 130 - .../asio/detail/impl/win_iocp_io_service.ipp | 531 --- .../impl/win_iocp_serial_port_service.ipp | 180 - .../impl/win_iocp_socket_service_base.ipp | 728 ---- .../include/asio/detail/impl/win_mutex.ipp | 78 - .../detail/impl/win_object_handle_service.ipp | 444 --- .../asio/detail/impl/win_static_mutex.ipp | 118 - .../include/asio/detail/impl/win_thread.ipp | 139 - .../include/asio/detail/impl/win_tss_ptr.ipp | 57 - .../impl/winrt_ssocket_service_base.ipp | 612 --- .../detail/impl/winrt_timer_scheduler.hpp | 79 - .../detail/impl/winrt_timer_scheduler.ipp | 122 - .../include/asio/detail/impl/winsock_init.ipp | 82 - .../include/asio/detail/io_control.hpp | 134 - .../include/asio/detail/keyword_tss_ptr.hpp | 70 - .../include/asio/detail/kqueue_reactor.hpp | 219 -- .../include/asio/detail/limits.hpp | 26 - .../asio/detail/local_free_on_block_exit.hpp | 57 - .../asio/detail/macos_fenced_block.hpp | 61 - .../asio-1.10.2/include/asio/detail/mutex.hpp | 48 - .../include/asio/detail/noncopyable.hpp | 43 - .../include/asio/detail/null_event.hpp | 88 - .../include/asio/detail/null_fenced_block.hpp | 45 - .../include/asio/detail/null_mutex.hpp | 64 - .../include/asio/detail/null_reactor.hpp | 67 - .../asio/detail/null_signal_blocker.hpp | 69 - .../asio/detail/null_socket_service.hpp | 497 --- .../include/asio/detail/null_static_mutex.hpp | 60 - .../include/asio/detail/null_thread.hpp | 61 - .../include/asio/detail/null_tss_ptr.hpp | 68 - .../include/asio/detail/object_pool.hpp | 146 - .../asio/detail/old_win_sdk_compat.hpp | 214 -- .../include/asio/detail/op_queue.hpp | 156 - .../include/asio/detail/operation.hpp | 38 - .../asio/detail/pipe_select_interrupter.hpp | 89 - .../include/asio/detail/pop_options.hpp | 105 - .../include/asio/detail/posix_event.hpp | 126 - .../asio/detail/posix_fd_set_adapter.hpp | 118 - .../include/asio/detail/posix_mutex.hpp | 76 - .../asio/detail/posix_signal_blocker.hpp | 85 - .../asio/detail/posix_static_mutex.hpp | 64 - .../include/asio/detail/posix_thread.hpp | 105 - .../include/asio/detail/posix_tss_ptr.hpp | 79 - .../include/asio/detail/push_options.hpp | 138 - .../detail/reactive_descriptor_service.hpp | 322 -- .../asio/detail/reactive_null_buffers_op.hpp | 88 - .../detail/reactive_serial_port_service.hpp | 234 -- .../asio/detail/reactive_socket_accept_op.hpp | 136 - .../detail/reactive_socket_connect_op.hpp | 106 - .../asio/detail/reactive_socket_recv_op.hpp | 123 - .../detail/reactive_socket_recvfrom_op.hpp | 133 - .../detail/reactive_socket_recvmsg_op.hpp | 125 - .../asio/detail/reactive_socket_send_op.hpp | 120 - .../asio/detail/reactive_socket_sendto_op.hpp | 123 - .../asio/detail/reactive_socket_service.hpp | 457 --- .../detail/reactive_socket_service_base.hpp | 450 --- .../include/asio/detail/reactor.hpp | 32 - .../include/asio/detail/reactor_fwd.hpp | 40 - .../include/asio/detail/reactor_op.hpp | 61 - .../include/asio/detail/reactor_op_queue.hpp | 168 - .../include/asio/detail/regex_fwd.hpp | 35 - .../asio/detail/resolve_endpoint_op.hpp | 121 - .../include/asio/detail/resolve_op.hpp | 131 - .../include/asio/detail/resolver_service.hpp | 129 - .../asio/detail/resolver_service_base.hpp | 129 - .../include/asio/detail/scoped_lock.hpp | 101 - .../include/asio/detail/scoped_ptr.hpp | 79 - .../asio/detail/select_interrupter.hpp | 46 - .../include/asio/detail/select_reactor.hpp | 219 -- .../include/asio/detail/service_registry.hpp | 156 - .../include/asio/detail/shared_ptr.hpp | 38 - .../include/asio/detail/signal_blocker.hpp | 44 - .../include/asio/detail/signal_handler.hpp | 82 - .../include/asio/detail/signal_init.hpp | 47 - .../include/asio/detail/signal_op.hpp | 49 - .../asio/detail/signal_set_service.hpp | 216 -- .../include/asio/detail/socket_holder.hpp | 98 - .../include/asio/detail/socket_ops.hpp | 334 -- .../include/asio/detail/socket_option.hpp | 316 -- .../asio/detail/socket_select_interrupter.hpp | 91 - .../include/asio/detail/socket_types.hpp | 404 -- .../asio/detail/solaris_fenced_block.hpp | 61 - .../include/asio/detail/static_mutex.hpp | 52 - .../include/asio/detail/std_event.hpp | 176 - .../include/asio/detail/std_mutex.hpp | 73 - .../include/asio/detail/std_static_mutex.hpp | 81 - .../include/asio/detail/std_thread.hpp | 65 - .../include/asio/detail/strand_service.hpp | 142 - .../include/asio/detail/task_io_service.hpp | 201 - .../asio/detail/task_io_service_operation.hpp | 76 - .../detail/task_io_service_thread_info.hpp | 40 - .../include/asio/detail/thread.hpp | 56 - .../include/asio/detail/thread_info_base.hpp | 91 - .../include/asio/detail/throw_error.hpp | 53 - .../include/asio/detail/throw_exception.hpp | 51 - .../include/asio/detail/timer_queue.hpp | 332 -- .../include/asio/detail/timer_queue_base.hpp | 68 - .../include/asio/detail/timer_queue_ptime.hpp | 93 - .../include/asio/detail/timer_queue_set.hpp | 66 - .../include/asio/detail/timer_scheduler.hpp | 35 - .../asio/detail/timer_scheduler_fwd.hpp | 40 - .../include/asio/detail/tss_ptr.hpp | 69 - .../include/asio/detail/type_traits.hpp | 58 - .../asio/detail/variadic_templates.hpp | 63 - .../include/asio/detail/wait_handler.hpp | 83 - .../include/asio/detail/wait_op.hpp | 45 - .../include/asio/detail/weak_ptr.hpp | 38 - .../include/asio/detail/win_event.hpp | 126 - .../asio/detail/win_fd_set_adapter.hpp | 149 - .../include/asio/detail/win_fenced_block.hpp | 89 - .../asio/detail/win_iocp_handle_read_op.hpp | 109 - .../asio/detail/win_iocp_handle_service.hpp | 322 -- .../asio/detail/win_iocp_handle_write_op.hpp | 101 - .../asio/detail/win_iocp_io_service.hpp | 315 -- .../asio/detail/win_iocp_null_buffers_op.hpp | 119 - .../asio/detail/win_iocp_operation.hpp | 95 - .../asio/detail/win_iocp_overlapped_op.hpp | 88 - .../asio/detail/win_iocp_overlapped_ptr.hpp | 144 - .../detail/win_iocp_serial_port_service.hpp | 228 -- .../asio/detail/win_iocp_socket_accept_op.hpp | 165 - .../detail/win_iocp_socket_connect_op.hpp | 124 - .../asio/detail/win_iocp_socket_recv_op.hpp | 115 - .../detail/win_iocp_socket_recvfrom_op.hpp | 123 - .../detail/win_iocp_socket_recvmsg_op.hpp | 116 - .../asio/detail/win_iocp_socket_send_op.hpp | 109 - .../asio/detail/win_iocp_socket_service.hpp | 525 --- .../detail/win_iocp_socket_service_base.hpp | 524 --- .../asio/detail/win_iocp_thread_info.hpp | 34 - .../include/asio/detail/win_mutex.hpp | 78 - .../asio/detail/win_object_handle_service.hpp | 183 - .../include/asio/detail/win_static_mutex.hpp | 74 - .../include/asio/detail/win_thread.hpp | 139 - .../include/asio/detail/win_tss_ptr.hpp | 79 - .../include/asio/detail/wince_thread.hpp | 116 - .../asio/detail/winrt_async_manager.hpp | 294 -- .../include/asio/detail/winrt_async_op.hpp | 65 - .../include/asio/detail/winrt_resolve_op.hpp | 117 - .../asio/detail/winrt_resolver_service.hpp | 183 - .../asio/detail/winrt_socket_connect_op.hpp | 90 - .../asio/detail/winrt_socket_recv_op.hpp | 110 - .../asio/detail/winrt_socket_send_op.hpp | 101 - .../asio/detail/winrt_ssocket_service.hpp | 232 -- .../detail/winrt_ssocket_service_base.hpp | 355 -- .../asio/detail/winrt_timer_scheduler.hpp | 131 - .../include/asio/detail/winrt_utils.hpp | 106 - .../include/asio/detail/winsock_init.hpp | 128 - .../include/asio/detail/wrapped_handler.hpp | 291 -- .../asio-1.10.2/include/asio/error.hpp | 331 -- .../asio-1.10.2/include/asio/error_code.hpp | 188 - .../include/asio/generic/basic_endpoint.hpp | 193 - .../asio/generic/datagram_protocol.hpp | 123 - .../include/asio/generic/detail/endpoint.hpp | 133 - .../asio/generic/detail/impl/endpoint.ipp | 109 - .../include/asio/generic/raw_protocol.hpp | 121 - .../asio/generic/seq_packet_protocol.hpp | 122 - .../include/asio/generic/stream_protocol.hpp | 127 - .../include/asio/handler_alloc_hook.hpp | 81 - .../asio/handler_continuation_hook.hpp | 54 - .../include/asio/handler_invoke_hook.hpp | 85 - .../asio-1.10.2/include/asio/handler_type.hpp | 112 - .../include/asio/high_resolution_timer.hpp | 63 - .../asio/impl/buffered_read_stream.hpp | 358 -- .../asio/impl/buffered_write_stream.hpp | 338 -- .../asio-1.10.2/include/asio/impl/connect.hpp | 428 --- .../asio-1.10.2/include/asio/impl/error.ipp | 128 - .../include/asio/impl/error_code.ipp | 128 - .../include/asio/impl/handler_alloc_hook.ipp | 77 - .../include/asio/impl/io_service.hpp | 152 - .../include/asio/impl/io_service.ipp | 155 - .../asio-1.10.2/include/asio/impl/read.hpp | 753 ---- .../asio-1.10.2/include/asio/impl/read_at.hpp | 810 ---- .../include/asio/impl/read_until.hpp | 1147 ------ .../include/asio/impl/serial_port_base.hpp | 59 - .../include/asio/impl/serial_port_base.ipp | 554 --- .../asio-1.10.2/include/asio/impl/spawn.hpp | 336 -- .../asio-1.10.2/include/asio/impl/src.cpp | 25 - .../asio-1.10.2/include/asio/impl/src.hpp | 74 - .../include/asio/impl/use_future.hpp | 172 - .../asio-1.10.2/include/asio/impl/write.hpp | 765 ---- .../include/asio/impl/write_at.hpp | 825 ---- .../asio-1.10.2/include/asio/io_service.hpp | 770 ---- .../asio-1.10.2/include/asio/ip/address.hpp | 200 - .../include/asio/ip/address_v4.hpp | 241 -- .../include/asio/ip/address_v6.hpp | 246 -- .../include/asio/ip/basic_endpoint.hpp | 263 -- .../include/asio/ip/basic_resolver.hpp | 268 -- .../include/asio/ip/basic_resolver_entry.hpp | 94 - .../asio/ip/basic_resolver_iterator.hpp | 260 -- .../include/asio/ip/basic_resolver_query.hpp | 244 -- .../include/asio/ip/detail/endpoint.hpp | 139 - .../include/asio/ip/detail/impl/endpoint.ipp | 204 - .../include/asio/ip/detail/socket_option.hpp | 569 --- .../asio-1.10.2/include/asio/ip/host_name.hpp | 42 - .../asio-1.10.2/include/asio/ip/icmp.hpp | 115 - .../include/asio/ip/impl/address.hpp | 53 - .../include/asio/ip/impl/address.ipp | 226 -- .../include/asio/ip/impl/address_v4.hpp | 53 - .../include/asio/ip/impl/address_v4.ipp | 178 - .../include/asio/ip/impl/address_v6.hpp | 53 - .../include/asio/ip/impl/address_v6.ipp | 298 -- .../include/asio/ip/impl/basic_endpoint.hpp | 55 - .../include/asio/ip/impl/host_name.ipp | 54 - .../asio-1.10.2/include/asio/ip/multicast.hpp | 191 - .../include/asio/ip/resolver_query_base.hpp | 130 - .../include/asio/ip/resolver_service.hpp | 176 - .../asio-1.10.2/include/asio/ip/tcp.hpp | 155 - .../asio-1.10.2/include/asio/ip/udp.hpp | 111 - .../asio-1.10.2/include/asio/ip/unicast.hpp | 70 - .../asio-1.10.2/include/asio/ip/v6_only.hpp | 69 - .../include/asio/is_read_buffered.hpp | 59 - .../include/asio/is_write_buffered.hpp | 59 - .../include/asio/local/basic_endpoint.hpp | 239 -- .../include/asio/local/connect_pair.hpp | 104 - .../include/asio/local/datagram_protocol.hpp | 80 - .../include/asio/local/detail/endpoint.hpp | 133 - .../asio/local/detail/impl/endpoint.ipp | 128 - .../include/asio/local/stream_protocol.hpp | 90 - .../asio-1.10.2/include/asio/placeholders.hpp | 123 - .../include/asio/posix/basic_descriptor.hpp | 490 --- .../asio/posix/basic_stream_descriptor.hpp | 362 -- .../include/asio/posix/descriptor_base.hpp | 97 - .../include/asio/posix/stream_descriptor.hpp | 37 - .../asio/posix/stream_descriptor_service.hpp | 260 -- .../include/asio/raw_socket_service.hpp | 432 --- .../asio-1.10.2/include/asio/read.hpp | 631 --- .../asio-1.10.2/include/asio/read_at.hpp | 664 ---- .../asio-1.10.2/include/asio/read_until.hpp | 923 ----- .../asio/seq_packet_socket_service.hpp | 380 -- .../asio-1.10.2/include/asio/serial_port.hpp | 36 - .../include/asio/serial_port_base.hpp | 167 - .../include/asio/serial_port_service.hpp | 253 -- .../asio-1.10.2/include/asio/signal_set.hpp | 28 - .../include/asio/signal_set_service.hpp | 134 - .../include/asio/socket_acceptor_service.hpp | 302 -- .../asio-1.10.2/include/asio/socket_base.hpp | 520 --- .../asio-1.10.2/include/asio/spawn.hpp | 265 -- .../asio-1.10.2/include/asio/ssl.hpp | 30 - .../include/asio/ssl/basic_context.hpp | 40 - .../asio-1.10.2/include/asio/ssl/context.hpp | 787 ---- .../include/asio/ssl/context_base.hpp | 167 - .../include/asio/ssl/context_service.hpp | 40 - .../asio/ssl/detail/buffered_handshake_op.hpp | 110 - .../include/asio/ssl/detail/engine.hpp | 164 - .../include/asio/ssl/detail/handshake_op.hpp | 68 - .../include/asio/ssl/detail/impl/engine.ipp | 326 -- .../asio/ssl/detail/impl/openssl_init.ipp | 145 - .../include/asio/ssl/detail/io.hpp | 347 -- .../include/asio/ssl/detail/openssl_init.hpp | 101 - .../include/asio/ssl/detail/openssl_types.hpp | 28 - .../asio/ssl/detail/password_callback.hpp | 72 - .../include/asio/ssl/detail/read_op.hpp | 73 - .../include/asio/ssl/detail/shutdown_op.hpp | 60 - .../include/asio/ssl/detail/stream_core.hpp | 126 - .../asio/ssl/detail/verify_callback.hpp | 68 - .../include/asio/ssl/detail/write_op.hpp | 73 - .../asio-1.10.2/include/asio/ssl/error.hpp | 68 - .../include/asio/ssl/impl/context.hpp | 71 - .../include/asio/ssl/impl/context.ipp | 950 ----- .../include/asio/ssl/impl/error.ipp | 57 - .../asio/ssl/impl/rfc2818_verification.ipp | 166 - .../asio-1.10.2/include/asio/ssl/impl/src.hpp | 28 - .../include/asio/ssl/old/basic_context.hpp | 434 --- .../include/asio/ssl/old/context_service.hpp | 174 - .../old/detail/openssl_context_service.hpp | 386 -- .../asio/ssl/old/detail/openssl_operation.hpp | 524 --- .../ssl/old/detail/openssl_stream_service.hpp | 571 --- .../include/asio/ssl/old/stream.hpp | 501 --- .../include/asio/ssl/old/stream_service.hpp | 184 - .../include/asio/ssl/rfc2818_verification.hpp | 100 - .../asio-1.10.2/include/asio/ssl/stream.hpp | 756 ---- .../include/asio/ssl/stream_base.hpp | 52 - .../include/asio/ssl/stream_service.hpp | 40 - .../include/asio/ssl/verify_context.hpp | 73 - .../include/asio/ssl/verify_mode.hpp | 63 - .../asio-1.10.2/include/asio/steady_timer.hpp | 61 - .../asio-1.10.2/include/asio/strand.hpp | 251 -- .../include/asio/stream_socket_service.hpp | 376 -- .../asio-1.10.2/include/asio/streambuf.hpp | 33 - .../asio-1.10.2/include/asio/system_error.hpp | 131 - .../asio-1.10.2/include/asio/system_timer.hpp | 57 - .../asio-1.10.2/include/asio/thread.hpp | 92 - .../asio-1.10.2/include/asio/time_traits.hpp | 96 - .../asio-1.10.2/include/asio/unyield.hpp | 21 - .../asio-1.10.2/include/asio/use_future.hpp | 92 - .../asio-1.10.2/include/asio/version.hpp | 23 - .../asio-1.10.2/include/asio/wait_traits.hpp | 41 - .../include/asio/waitable_timer_service.hpp | 168 - .../include/asio/windows/basic_handle.hpp | 281 -- .../asio/windows/basic_object_handle.hpp | 178 - .../windows/basic_random_access_handle.hpp | 376 -- .../asio/windows/basic_stream_handle.hpp | 359 -- .../include/asio/windows/object_handle.hpp | 38 - .../asio/windows/object_handle_service.hpp | 177 - .../include/asio/windows/overlapped_ptr.hpp | 116 - .../asio/windows/random_access_handle.hpp | 37 - .../windows/random_access_handle_service.hpp | 220 -- .../include/asio/windows/stream_handle.hpp | 37 - .../asio/windows/stream_handle_service.hpp | 218 -- .../asio-1.10.2/include/asio/write.hpp | 618 --- .../asio-1.10.2/include/asio/write_at.hpp | 670 ---- .../asio-1.10.2/include/asio/yield.hpp | 23 - .../third_party/asio-1.10.2/src/asio.cpp | 11 - .../third_party/asio-1.10.2/src/asio_ssl.cpp | 11 - 451 files changed, 326 insertions(+), 87303 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/COPYING delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/async_result.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_datagram_socket.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_deadline_timer.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_io_object.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_raw_socket.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_seq_packet_socket.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_serial_port.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_signal_set.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_acceptor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_iostream.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_streambuf.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_stream_socket.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_streambuf.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_streambuf_fwd.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_waitable_timer.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/buffer.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/buffered_read_stream.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/buffered_read_stream_fwd.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/buffered_stream.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/buffered_stream_fwd.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/buffered_write_stream.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/buffered_write_stream_fwd.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/buffers_iterator.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/completion_condition.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/connect.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/coroutine.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/datagram_socket_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/deadline_timer.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/deadline_timer_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/addressof.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/array.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/array_fwd.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/assert.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/atomic_count.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/base_from_completion_cond.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/bind_handler.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/buffer_resize_guard.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/buffer_sequence_adapter.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/buffered_stream_storage.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/call_stack.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/chrono_time_traits.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/completion_handler.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/config.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/consuming_buffers.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/cstdint.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/date_time_fwd.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/deadline_timer_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/dependent_type.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/descriptor_ops.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/descriptor_read_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/descriptor_write_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/dev_poll_reactor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/epoll_reactor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/event.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/eventfd_select_interrupter.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/fd_set_adapter.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/fenced_block.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/function.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/gcc_arm_fenced_block.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/gcc_hppa_fenced_block.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/gcc_sync_fenced_block.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/gcc_x86_fenced_block.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/handler_alloc_helpers.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/handler_cont_helpers.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/handler_invoke_helpers.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/handler_tracking.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/handler_type_requirements.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/hash_map.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/buffer_sequence_adapter.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/descriptor_ops.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/dev_poll_reactor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/dev_poll_reactor.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/epoll_reactor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/epoll_reactor.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/eventfd_select_interrupter.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/handler_tracking.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/kqueue_reactor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/kqueue_reactor.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/pipe_select_interrupter.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/posix_event.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/posix_mutex.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/posix_thread.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/posix_tss_ptr.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/reactive_descriptor_service.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/reactive_serial_port_service.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/reactive_socket_service_base.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/resolver_service_base.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/select_reactor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/select_reactor.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/service_registry.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/service_registry.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/signal_set_service.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_ops.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_select_interrupter.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/strand_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/strand_service.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/task_io_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/task_io_service.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/throw_error.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/timer_queue_ptime.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/timer_queue_set.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_event.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_handle_service.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_io_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_io_service.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_serial_port_service.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_socket_service_base.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_mutex.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_object_handle_service.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_static_mutex.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_thread.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_tss_ptr.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/winrt_ssocket_service_base.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/winrt_timer_scheduler.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/winrt_timer_scheduler.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/winsock_init.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/io_control.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/keyword_tss_ptr.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/kqueue_reactor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/limits.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/local_free_on_block_exit.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/macos_fenced_block.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/mutex.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/noncopyable.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/null_event.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/null_fenced_block.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/null_mutex.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/null_reactor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/null_signal_blocker.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/null_socket_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/null_static_mutex.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/null_thread.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/null_tss_ptr.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/object_pool.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/old_win_sdk_compat.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/op_queue.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/operation.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/pipe_select_interrupter.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/pop_options.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/posix_event.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/posix_fd_set_adapter.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/posix_mutex.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/posix_signal_blocker.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/posix_static_mutex.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/posix_thread.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/posix_tss_ptr.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/push_options.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactive_descriptor_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactive_null_buffers_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactive_serial_port_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactive_socket_accept_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactive_socket_connect_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactive_socket_recv_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactive_socket_recvfrom_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactive_socket_recvmsg_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactive_socket_send_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactive_socket_sendto_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactive_socket_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactive_socket_service_base.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactor_fwd.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactor_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/reactor_op_queue.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/regex_fwd.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/resolve_endpoint_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/resolve_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/resolver_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/resolver_service_base.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/scoped_lock.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/scoped_ptr.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/select_interrupter.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/select_reactor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/service_registry.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/shared_ptr.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/signal_blocker.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/signal_handler.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/signal_init.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/signal_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/signal_set_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/socket_holder.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/socket_ops.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/socket_option.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/socket_select_interrupter.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/socket_types.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/solaris_fenced_block.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/static_mutex.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/std_event.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/std_mutex.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/std_static_mutex.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/std_thread.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/strand_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/task_io_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/task_io_service_operation.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/task_io_service_thread_info.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/thread.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/thread_info_base.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/throw_error.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/throw_exception.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/timer_queue.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/timer_queue_base.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/timer_queue_ptime.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/timer_queue_set.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/timer_scheduler.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/timer_scheduler_fwd.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/tss_ptr.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/type_traits.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/variadic_templates.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/wait_handler.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/wait_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/weak_ptr.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_event.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_fd_set_adapter.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_fenced_block.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_handle_read_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_handle_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_handle_write_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_io_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_null_buffers_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_operation.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_overlapped_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_overlapped_ptr.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_serial_port_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_socket_accept_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_socket_connect_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_socket_recv_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_socket_recvfrom_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_socket_recvmsg_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_socket_send_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_socket_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_socket_service_base.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_iocp_thread_info.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_mutex.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_object_handle_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_static_mutex.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_thread.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/win_tss_ptr.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/wince_thread.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/winrt_async_manager.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/winrt_async_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/winrt_resolve_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/winrt_resolver_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/winrt_socket_connect_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/winrt_socket_recv_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/winrt_socket_send_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/winrt_ssocket_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/winrt_ssocket_service_base.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/winrt_timer_scheduler.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/winrt_utils.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/winsock_init.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/wrapped_handler.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/error.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/error_code.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/generic/basic_endpoint.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/generic/datagram_protocol.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/generic/detail/endpoint.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/generic/detail/impl/endpoint.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/generic/raw_protocol.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/generic/seq_packet_protocol.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/generic/stream_protocol.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/handler_alloc_hook.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/handler_continuation_hook.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/handler_invoke_hook.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/handler_type.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/high_resolution_timer.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/buffered_read_stream.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/buffered_write_stream.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/connect.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/error.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/error_code.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/handler_alloc_hook.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/io_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/io_service.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/read.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/read_at.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/read_until.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/serial_port_base.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/serial_port_base.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/spawn.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/src.cpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/src.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/use_future.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/write.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/impl/write_at.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/io_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/address.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/address_v4.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/address_v6.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/basic_endpoint.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/basic_resolver.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/basic_resolver_entry.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/basic_resolver_iterator.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/basic_resolver_query.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/detail/endpoint.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/detail/impl/endpoint.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/detail/socket_option.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/host_name.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/icmp.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/impl/address.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/impl/address.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/impl/address_v4.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/impl/address_v4.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/impl/address_v6.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/impl/address_v6.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/impl/basic_endpoint.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/impl/host_name.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/multicast.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/resolver_query_base.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/resolver_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/tcp.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/udp.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/unicast.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ip/v6_only.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/is_read_buffered.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/is_write_buffered.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/local/basic_endpoint.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/local/connect_pair.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/local/datagram_protocol.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/local/detail/endpoint.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/local/detail/impl/endpoint.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/local/stream_protocol.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/placeholders.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/posix/basic_descriptor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/posix/basic_stream_descriptor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/posix/descriptor_base.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/posix/stream_descriptor.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/posix/stream_descriptor_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/raw_socket_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/read.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/read_at.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/read_until.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/seq_packet_socket_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/serial_port.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/serial_port_base.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/serial_port_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/signal_set.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/signal_set_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/socket_acceptor_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/socket_base.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/spawn.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/basic_context.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/context.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/context_base.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/context_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/buffered_handshake_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/engine.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/handshake_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/impl/engine.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/impl/openssl_init.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/io.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/openssl_init.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/openssl_types.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/password_callback.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/read_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/shutdown_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/stream_core.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/verify_callback.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/detail/write_op.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/error.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/impl/context.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/impl/context.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/impl/error.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/impl/rfc2818_verification.ipp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/impl/src.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/old/basic_context.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/old/context_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/old/detail/openssl_context_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/old/detail/openssl_operation.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/old/detail/openssl_stream_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/old/stream.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/old/stream_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/rfc2818_verification.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/stream.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/stream_base.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/stream_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/verify_context.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/ssl/verify_mode.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/steady_timer.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/strand.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/stream_socket_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/streambuf.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/system_error.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/system_timer.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/thread.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/time_traits.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/unyield.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/use_future.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/version.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/wait_traits.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/waitable_timer_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/windows/basic_handle.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/windows/basic_object_handle.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/windows/basic_random_access_handle.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/windows/basic_stream_handle.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/windows/object_handle.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/windows/object_handle_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/windows/overlapped_ptr.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/windows/random_access_handle.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/windows/random_access_handle_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/windows/stream_handle.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/windows/stream_handle_service.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/write.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/write_at.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/yield.hpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/src/asio.cpp delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/src/asio_ssl.cpp diff --git a/BUILDING.txt b/BUILDING.txt index d54ce83183846..c96c851204e75 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -6,6 +6,7 @@ Requirements: * Unix System * JDK 1.8 * Maven 3.3 or later +* Boost 1.72 (if compiling native code) * Protocol Buffers 3.7.1 (if compiling native code) * CMake 3.1 or newer (if compiling native code) * Zlib devel (if compiling native code) @@ -72,6 +73,12 @@ Installing required packages for clean install of Ubuntu 14.04 LTS Desktop: && ./configure\ && make install \ && rm -rf /opt/protobuf-3.7-src +* Boost + $ curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download > boost_1_72_0.tar.bz2 \ + && tar --bzip2 -xf boost_1_72_0.tar.bz2 \ + && cd boost_1_72_0 \ + && ./bootstrap.sh --prefix=/usr/ \ + && ./b2 --without-python install Optional packages: @@ -468,6 +475,7 @@ Requirements: * Windows System * JDK 1.8 * Maven 3.0 or later +* Boost 1.72 * Protocol Buffers 3.7.1 * CMake 3.1 or newer * Visual Studio 2010 Professional or Higher diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index fd2d2938419c8..f72fa4659009a 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -92,6 +92,21 @@ ENV MAVEN_HOME /usr ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 ENV FINDBUGS_HOME /usr +####### +# Install Boost 1.72 (1.65 ships with Bionic) +####### +# hadolint ignore=DL3003 +RUN mkdir -p /opt/boost-library \ + && curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download > boost_1_72_0.tar.bz2 \ + && mv boost_1_72_0.tar.bz2 /opt/boost-library \ + && cd /opt/boost-library \ + && tar --bzip2 -xf boost_1_72_0.tar.bz2 \ + && cd /opt/boost-library/boost_1_72_0 \ + && ./bootstrap.sh --prefix=/usr/ \ + && ./b2 --without-python install \ + && cd /root \ + && rm -rf /opt/boost-library + ###### # Install Google Protobuf 3.7.1 (3.0.0 ships with Bionic) ###### diff --git a/dev-support/docker/Dockerfile_aarch64 b/dev-support/docker/Dockerfile_aarch64 index ccc517dbf9fd2..5fd646fb9c08a 100644 --- a/dev-support/docker/Dockerfile_aarch64 +++ b/dev-support/docker/Dockerfile_aarch64 @@ -95,6 +95,21 @@ ENV MAVEN_HOME /usr ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-arm64 ENV FINDBUGS_HOME /usr +####### +# Install Boost 1.72 (1.65 ships with Bionic) +####### +# hadolint ignore=DL3003 +RUN mkdir -p /opt/boost-library \ + && curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download > boost_1_72_0.tar.bz2 \ + && mv boost_1_72_0.tar.bz2 /opt/boost-library \ + && cd /opt/boost-library \ + && tar --bzip2 -xf boost_1_72_0.tar.bz2 \ + && cd /opt/boost-library/boost_1_72_0 \ + && ./bootstrap.sh --prefix=/usr/ \ + && ./b2 --without-python install \ + && cd /root \ + && rm -rf /opt/boost-library + ###### # Install Google Protobuf 3.7.1 (3.0.0 ships with Bionic) ###### diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt index 626c49bf192c6..6e233fd3991d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt @@ -18,6 +18,8 @@ cmake_minimum_required(VERSION 3.1 FATAL_ERROR) +project(hadoop_hdfs_native_client) + enable_testing() list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/../../../hadoop-common-project/hadoop-common) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt index 411320ad771e7..6a2f378d0a4bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt @@ -28,6 +28,8 @@ project (libhdfspp) cmake_minimum_required(VERSION 2.8) +find_package (Boost 1.72.0 REQUIRED) + enable_testing() include (CTest) @@ -220,7 +222,7 @@ include_directories( include_directories( SYSTEM ${PROJECT_BINARY_DIR}/lib/proto - third_party/asio-1.10.2/include + ${Boost_INCLUDE_DIRS} third_party/rapidxml-1.13 third_party/gmock-1.7.0 third_party/tr2 diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/ioservice.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/ioservice.h index a6ec97ad4913f..b0bac5dd7ece0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/ioservice.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/ioservice.h @@ -61,10 +61,7 @@ #include #include -// forward decl -namespace asio { - class io_service; -} +#include namespace hdfs { @@ -133,7 +130,7 @@ class IoService : public std::enable_shared_from_this * Access underlying io_service object. Only to be used in asio library calls. * After HDFS-11884 is complete only tests should need direct access to the asio::io_service. **/ - virtual asio::io_service& GetRaw() = 0; + virtual boost::asio::io_service& GetRaw() = 0; }; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt index 1ab04d36689c0..87779e7f8ae81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt @@ -19,6 +19,7 @@ if(NEED_LINK_DL) set(LIB_DL dl) endif() +include_directories(${Boost_INCLUDE_DIRS} ../../include) add_library(common_obj OBJECT status.cc sasl_digest_md5.cc ioservice_impl.cc options.cc configuration.cc configuration_loader.cc hdfs_configuration.cc uri.cc util.cc retry_policy.cc cancel_tracker.cc logging.cc libhdfs_events_impl.cc auth_info.cc namenode_info.cc statinfo.cc fsinfo.cc content_summary.cc locks.cc config_parser.cc) add_library(common $ $) target_link_libraries(common ${LIB_DL}) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/async_stream.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/async_stream.h index efe2e1c5db376..e9779e7558bec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/async_stream.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/async_stream.h @@ -19,15 +19,17 @@ #ifndef LIB_COMMON_ASYNC_STREAM_H_ #define LIB_COMMON_ASYNC_STREAM_H_ -#include -#include +#include +#include +#include + #include namespace hdfs { // Contiguous buffer types -typedef asio::mutable_buffers_1 MutableBuffer; -typedef asio::const_buffers_1 ConstBuffer; +typedef boost::asio::mutable_buffers_1 MutableBuffer; +typedef boost::asio::const_buffers_1 ConstBuffer; /* * asio-compatible stream implementation. @@ -38,13 +40,20 @@ typedef asio::const_buffers_1 ConstBuffer; */ class AsyncStream { public: + using executor_type = boost::asio::system_executor; + executor_type executor_; + virtual void async_read_some(const MutableBuffer &buf, - std::function handler) = 0; virtual void async_write_some(const ConstBuffer &buf, - std::function handler) = 0; + + executor_type get_executor() { + return executor_; + } }; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/asio.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/asio.h index 0215176e6d462..f2a3722ec182c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/asio.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/asio.h @@ -21,7 +21,10 @@ #include "continuation.h" #include "common/util.h" #include "hdfspp/status.h" -#include + +#include +#include + #include namespace hdfs { @@ -37,8 +40,8 @@ class WriteContinuation : public Continuation { virtual void Run(const Next &next) override { auto handler = - [next](const asio::error_code &ec, size_t) { next(ToStatus(ec)); }; - asio::async_write(*stream_, buffer_, handler); + [next](const boost::system::error_code &ec, size_t) { next(ToStatus(ec)); }; + boost::asio::async_write(*stream_, buffer_, handler); } private: diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/protobuf.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/protobuf.h index 21e063ed0e08c..e5be85a5005b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/protobuf.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/protobuf.h @@ -20,8 +20,10 @@ #include "common/util.h" -#include - +#include +#include +#include +#include #include #include #include @@ -39,7 +41,7 @@ struct ReadDelimitedPBMessageContinuation : public Continuation { virtual void Run(const Next &next) override { namespace pbio = google::protobuf::io; - auto handler = [this, next](const asio::error_code &ec, size_t) { + auto handler = [this, next](const boost::system::error_code &ec, size_t) { Status status; if (ec) { status = ToStatus(ec); @@ -57,15 +59,15 @@ struct ReadDelimitedPBMessageContinuation : public Continuation { } next(status); }; - asio::async_read(*stream_, - asio::buffer(buf_), + boost::asio::async_read(*stream_, + boost::asio::buffer(buf_), std::bind(&ReadDelimitedPBMessageContinuation::CompletionHandler, this, std::placeholders::_1, std::placeholders::_2), handler); } private: - size_t CompletionHandler(const asio::error_code &ec, size_t transferred) { + size_t CompletionHandler(const boost::system::error_code &ec, size_t transferred) { if (ec) { return 0; } @@ -103,7 +105,7 @@ struct WriteDelimitedPBMessageContinuation : Continuation { return; } - asio::async_write(*stream_, asio::buffer(buf_), [next](const asio::error_code &ec, size_t) { next(ToStatus(ec)); } ); + boost::asio::async_write(*stream_, boost::asio::buffer(buf_), [next](const boost::system::error_code &ec, size_t) { next(ToStatus(ec)); } ); } private: diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc index 9f350a8f2cc76..f8f5923832711 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc @@ -17,6 +17,8 @@ */ #include + +#include #include #include diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.cc index de081ed148f54..17a4474a43319 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.cc @@ -125,7 +125,7 @@ void IoServiceImpl::Run() { // from escaping this library and crashing the process. // As recommended in http://www.boost.org/doc/libs/1_39_0/doc/html/boost_asio/reference/io_service.html#boost_asio.reference.io_service.effect_of_exceptions_thrown_from_handlers - asio::io_service::work work(io_service_); + boost::asio::io_service::work work(io_service_); while(true) { try @@ -145,7 +145,7 @@ void IoServiceImpl::Stop() { io_service_.stop(); } -asio::io_service& IoServiceImpl::GetRaw() { +boost::asio::io_service& IoServiceImpl::GetRaw() { return io_service_; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.h index a29985cf88a8b..2d627aabf23f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.h @@ -21,7 +21,7 @@ #include "hdfspp/ioservice.h" -#include +#include #include "common/new_delete.h" #include @@ -45,7 +45,7 @@ class IoServiceImpl : public IoService { void PostTask(std::function asyncTask) override; void Run() override; void Stop() override; - asio::io_service& GetRaw() override; + boost::asio::io_service& GetRaw() override; // Add a single worker thread, in the common case try to avoid this in favor // of Init[Default]Workers. Public for use by tests and rare cases where a @@ -57,7 +57,7 @@ class IoServiceImpl : public IoService { private: std::mutex state_lock_; - ::asio::io_service io_service_; + boost::asio::io_service io_service_; // For doing logging + resource manager updates on thread start/exit void ThreadStartHook(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.cc index 94bce83fd1e93..54048fb1201b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.cc @@ -136,7 +136,7 @@ LogMessage& LogMessage::operator<<(const std::string& str) { return *this; } -LogMessage& LogMessage::operator<<(const ::asio::ip::tcp::endpoint& endpoint) { +LogMessage& LogMessage::operator<<(const boost::asio::ip::tcp::endpoint& endpoint) { msg_buffer_ << endpoint; return *this; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.h index 4e66a93061774..8935287fe0108 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.h @@ -19,7 +19,7 @@ #ifndef LIB_COMMON_LOGGING_H_ #define LIB_COMMON_LOGGING_H_ -#include +#include #include "hdfspp/log.h" @@ -193,7 +193,7 @@ class LogMessage { LogMessage& operator<<(void *); //asio types - LogMessage& operator<<(const ::asio::ip::tcp::endpoint& endpoint); + LogMessage& operator<<(const boost::asio::ip::tcp::endpoint& endpoint); //thread and mutex types LogMessage& operator<<(const std::thread::id& tid); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.cc index a04daf1a8b7f6..92054fce07e31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.cc @@ -70,7 +70,7 @@ bool ResolveInPlace(std::shared_ptr ioservice, ResolvedNamenodeInfo & return true; } -typedef std::vector endpoint_vector; +typedef std::vector endpoint_vector; // RAII wrapper class ScopedResolver { @@ -78,8 +78,8 @@ class ScopedResolver { std::shared_ptr io_service_; std::string host_; std::string port_; - ::asio::ip::tcp::resolver::query query_; - ::asio::ip::tcp::resolver resolver_; + boost::asio::ip::tcp::resolver::query query_; + boost::asio::ip::tcp::resolver resolver_; endpoint_vector endpoints_; // Caller blocks on access if resolution isn't finished @@ -111,9 +111,9 @@ class ScopedResolver { std::shared_ptr> shared_result = result_status_; // Callback to pull a copy of endpoints out of resolver and set promise - auto callback = [this, shared_result](const asio::error_code &ec, ::asio::ip::tcp::resolver::iterator out) { + auto callback = [this, shared_result](const boost::system::error_code &ec, boost::asio::ip::tcp::resolver::iterator out) { if(!ec) { - std::copy(out, ::asio::ip::tcp::resolver::iterator(), std::back_inserter(endpoints_)); + std::copy(out, boost::asio::ip::tcp::resolver::iterator(), std::back_inserter(endpoints_)); } shared_result->set_value( ToStatus(ec) ); }; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.h index f43690dcf63b5..0532376e8c75f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.h @@ -19,7 +19,7 @@ #ifndef COMMON_HDFS_NAMENODE_INFO_H_ #define COMMON_HDFS_NAMENODE_INFO_H_ -#include +#include #include @@ -37,7 +37,7 @@ struct ResolvedNamenodeInfo : public NamenodeInfo { ResolvedNamenodeInfo& operator=(const NamenodeInfo &info); std::string str() const; - std::vector<::asio::ip::tcp::endpoint> endpoints; + std::vector endpoints; }; // Clear endpoints if set and resolve all of them in parallel. diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/retry_policy.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/retry_policy.cc index dca49fb66212a..eb64829601b57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/retry_policy.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/retry_policy.cc @@ -19,6 +19,8 @@ #include "common/retry_policy.h" #include "common/logging.h" +#include + #include namespace hdfs { @@ -57,7 +59,7 @@ RetryAction FixedDelayWithFailover::ShouldRetry(const Status &s, uint64_t retrie (void)max_failover_conn_retries_; LOG_TRACE(kRPC, << "FixedDelayWithFailover::ShouldRetry(retries=" << retries << ", failovers=" << failovers << ")"); - if(failovers < max_failover_retries_ && (s.code() == ::asio::error::timed_out || s.get_server_exception_type() == Status::kStandbyException) ) + if(failovers < max_failover_retries_ && (s.code() == boost::asio::error::timed_out || s.get_server_exception_type() == Status::kStandbyException) ) { // Try connecting to another NN in case this one keeps timing out // Can add the backoff wait specified by dfs.client.failover.sleep.base.millis here diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc index 6a0798737b7d4..7a4b4cf33efed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc @@ -30,7 +30,7 @@ namespace hdfs { -Status ToStatus(const ::asio::error_code &ec) { +Status ToStatus(const boost::system::error_code &ec) { if (ec) { return Status(ec.value(), ec.message().c_str()); } else { @@ -134,7 +134,7 @@ std::string Base64Encode(const std::string &src) { } -std::string SafeDisconnect(asio::ip::tcp::socket *sock) { +std::string SafeDisconnect(boost::asio::ip::tcp::socket *sock) { std::string err; if(sock && sock->is_open()) { /** @@ -147,7 +147,7 @@ std::string SafeDisconnect(asio::ip::tcp::socket *sock) { **/ try { - sock->shutdown(asio::ip::tcp::socket::shutdown_both); + sock->shutdown(boost::asio::ip::tcp::socket::shutdown_both); } catch (const std::exception &e) { err = std::string("shutdown() threw") + e.what(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h index 590ba5453b6ec..a7f4f958e79d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h @@ -24,7 +24,8 @@ #include #include -#include +#include +#include #include #include @@ -41,7 +42,7 @@ namespace hdfs { typedef std::lock_guard mutex_guard; -Status ToStatus(const ::asio::error_code &ec); +Status ToStatus(const boost::system::error_code &ec); // Determine size of buffer that needs to be allocated in order to serialize msg // in delimited format @@ -75,7 +76,7 @@ bool lock_held(T & mutex) { // Shutdown and close a socket safely; will check if the socket is open and // catch anything thrown by asio. // Returns a string containing error message on failure, otherwise an empty string. -std::string SafeDisconnect(asio::ip::tcp::socket *sock); +std::string SafeDisconnect(boost::asio::ip::tcp::socket *sock); // The following helper function is used for classes that look like the following: @@ -94,13 +95,13 @@ std::string SafeDisconnect(asio::ip::tcp::socket *sock); // it's a asio socket, and nullptr if it's anything else. template -inline asio::ip::tcp::socket *get_asio_socket_ptr(sock_t *s) { +inline boost::asio::ip::tcp::socket *get_asio_socket_ptr(sock_t *s) { (void)s; return nullptr; } template<> -inline asio::ip::tcp::socket *get_asio_socket_ptr - (asio::ip::tcp::socket *s) { +inline boost::asio::ip::tcp::socket *get_asio_socket_ptr + (boost::asio::ip::tcp::socket *s) { return s; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.cc index 41424827821ea..61df6d76d99de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.cc @@ -19,6 +19,8 @@ #include "datanodeconnection.h" #include "common/util.h" +#include + namespace hdfs { DataNodeConnection::~DataNodeConnection(){} @@ -29,7 +31,7 @@ DataNodeConnectionImpl::DataNodeConnectionImpl(std::shared_ptr io_ser const hadoop::common::TokenProto *token, LibhdfsEvents *event_handlers) : event_handlers_(event_handlers) { - using namespace ::asio::ip; + using namespace boost::asio::ip; conn_.reset(new tcp::socket(io_service->GetRaw())); auto datanode_addr = dn_proto.id(); @@ -49,8 +51,8 @@ void DataNodeConnectionImpl::Connect( // Keep the DN from being freed until we're done mutex_guard state_lock(state_lock_); auto shared_this = shared_from_this(); - asio::async_connect(*conn_, endpoints_.begin(), endpoints_.end(), - [shared_this, handler](const asio::error_code &ec, std::array::iterator it) { + boost::asio::async_connect(*conn_, endpoints_.begin(), endpoints_.end(), + [shared_this, handler](const boost::system::error_code &ec, std::array::iterator it) { (void)it; handler(ToStatus(ec), shared_this); }); } @@ -69,7 +71,7 @@ void DataNodeConnectionImpl::Cancel() { } void DataNodeConnectionImpl::async_read_some(const MutableBuffer &buf, - std::function handler) + std::function handler) { event_handlers_->call("DN_read_req", "", "", buf.end() - buf.begin()); @@ -78,7 +80,7 @@ void DataNodeConnectionImpl::async_read_some(const MutableBuffer &buf, } void DataNodeConnectionImpl::async_write_some(const ConstBuffer &buf, - std::function handler) + std::function handler) { event_handlers_->call("DN_write_req", "", "", buf.end() - buf.begin()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.h index a54338f17b6b5..a0cb8375a8680 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.h @@ -26,7 +26,8 @@ #include "common/util.h" #include "common/new_delete.h" -#include "asio.hpp" +#include +#include namespace hdfs { @@ -43,7 +44,7 @@ class DataNodeConnection : public AsyncStream { struct SocketDeleter { - inline void operator()(asio::ip::tcp::socket *sock) { +inline void operator()(boost::asio::ip::tcp::socket* sock) { // Cancel may have already closed the socket. std::string err = SafeDisconnect(sock); if(!err.empty()) { @@ -59,8 +60,8 @@ class DataNodeConnectionImpl : public DataNodeConnection, public std::enable_sha std::mutex state_lock_; public: MEMCHECKED_CLASS(DataNodeConnectionImpl) - std::unique_ptr conn_; - std::array endpoints_; + std::unique_ptr conn_; + std::array endpoints_; std::string uuid_; LibhdfsEvents *event_handlers_; @@ -74,10 +75,10 @@ class DataNodeConnectionImpl : public DataNodeConnection, public std::enable_sha void Cancel() override; void async_read_some(const MutableBuffer &buf, - std::function handler) override; + std::function handler) override; void async_write_some(const ConstBuffer &buf, - std::function handler) override; + std::function handler) override; }; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc index 02630fb247a6f..169def364b732 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc @@ -26,6 +26,8 @@ #include #include +#include + #define FMT_THIS_ADDR "this=" << (void*)this namespace hdfs { @@ -72,7 +74,7 @@ void FileHandleImpl::PositionRead( handler(status, bytes_read); }; - AsyncPreadSome(offset, asio::buffer(buf, buf_size), bad_node_tracker_, callback); + AsyncPreadSome(offset, boost::asio::buffer(buf, buf_size), bad_node_tracker_, callback); } Status FileHandleImpl::PositionRead(void *buf, size_t buf_size, off_t offset, size_t *bytes_read) { @@ -233,7 +235,7 @@ void FileHandleImpl::AsyncPreadSome( uint64_t offset_within_block = offset - block->offset(); uint64_t size_within_block = std::min( - block->b().numbytes() - offset_within_block, asio::buffer_size(buffer)); + block->b().numbytes() - offset_within_block, boost::asio::buffer_size(buffer)); LOG_DEBUG(kFileHandle, << "FileHandleImpl::AsyncPreadSome(" << FMT_THIS_ADDR << "), ...) Datanode hostname=" << dnHostName << ", IP Address=" << dnIpAddr @@ -281,7 +283,7 @@ void FileHandleImpl::AsyncPreadSome( if (status.ok()) { reader->AsyncReadBlock( client_name, *block, offset_within_block, - asio::buffer(buffer, size_within_block), read_handler); + boost::asio::buffer(buffer, size_within_block), read_handler); } else { handler(status, dn_id, 0); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc index 41cc645be5a20..ba75e86eec78d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc @@ -29,6 +29,8 @@ #include #include +#include + #define FMT_THIS_ADDR "this=" << (void*)this namespace hdfs { @@ -36,7 +38,7 @@ namespace hdfs { static const char kNamenodeProtocol[] = "org.apache.hadoop.hdfs.protocol.ClientProtocol"; static const int kNamenodeProtocolVersion = 1; -using ::asio::ip::tcp; +using boost::asio::ip::tcp; static constexpr uint16_t kDefaultPort = 8020; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc index e46faad127436..96744e5d03d2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc @@ -19,7 +19,7 @@ #include "filesystem.h" #include "common/continuation/asio.h" -#include +#include #include #include @@ -31,7 +31,7 @@ #define FMT_THIS_ADDR "this=" << (void*)this -using ::asio::ip::tcp; +using boost::asio::ip::tcp; namespace hdfs { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/block_reader.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/block_reader.cc index 90c02f71c1d56..acecfce52374e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/block_reader.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/block_reader.cc @@ -24,6 +24,10 @@ #include +#include +#include +#include + namespace hdfs { #define FMT_CONT_AND_PARENT_ADDR "this=" << (void*)this << ", parent=" << (void*)parent_ @@ -113,7 +117,7 @@ void BlockReaderImpl::AsyncRequestBlock(const std::string &client_name, auto read_pb_message = new continuation::ReadDelimitedPBMessageContinuation(dn_, &s->response); - m->Push(asio_continuation::Write(dn_, asio::buffer(s->header))).Push(read_pb_message); + m->Push(asio_continuation::Write(dn_, boost::asio::buffer(s->header))).Push(read_pb_message); m->Run([this, handler, offset](const Status &status, const State &s) { Status stat = status; if (stat.ok()) { @@ -167,7 +171,7 @@ struct BlockReaderImpl::ReadPacketHeader : continuation::Continuation parent_->packet_data_read_bytes_ = 0; parent_->packet_len_ = 0; - auto handler = [next, this](const asio::error_code &ec, size_t) { + auto handler = [next, this](const boost::system::error_code &ec, size_t) { Status status; if (ec) { status = Status(ec.value(), ec.message().c_str()); @@ -191,7 +195,7 @@ struct BlockReaderImpl::ReadPacketHeader : continuation::Continuation next(status); }; - asio::async_read(*parent_->dn_, asio::buffer(buf_), + boost::asio::async_read(*parent_->dn_, boost::asio::buffer(buf_), std::bind(&ReadPacketHeader::CompletionHandler, this, std::placeholders::_1, std::placeholders::_2), handler); } @@ -215,7 +219,7 @@ struct BlockReaderImpl::ReadPacketHeader : continuation::Continuation return ntohs(*reinterpret_cast(&buf_[kHeaderLenOffset])); } - size_t CompletionHandler(const asio::error_code &ec, size_t transferred) { + size_t CompletionHandler(const boost::system::error_code &ec, size_t transferred) { if (ec) { return 0; } else if (transferred < kHeaderStart) { @@ -245,7 +249,7 @@ struct BlockReaderImpl::ReadChecksum : continuation::Continuation std::shared_ptr keep_conn_alive_ = shared_conn_; - auto handler = [parent, next, this, keep_conn_alive_](const asio::error_code &ec, size_t) + auto handler = [parent, next, this, keep_conn_alive_](const boost::system::error_code &ec, size_t) { Status status; if (ec) { @@ -266,7 +270,7 @@ struct BlockReaderImpl::ReadChecksum : continuation::Continuation parent->checksum_.resize(parent->packet_len_ - sizeof(int) - parent->header_.datalen()); - asio::async_read(*parent->dn_, asio::buffer(parent->checksum_), handler); + boost::asio::async_read(*parent->dn_, boost::asio::buffer(parent->checksum_), handler); } private: @@ -279,8 +283,8 @@ struct BlockReaderImpl::ReadChecksum : continuation::Continuation struct BlockReaderImpl::ReadData : continuation::Continuation { ReadData(BlockReaderImpl *parent, std::shared_ptr bytes_transferred, - const asio::mutable_buffers_1 &buf) : parent_(parent), - bytes_transferred_(bytes_transferred), buf_(buf), shared_conn_(parent->dn_) + const boost::asio::mutable_buffers_1 &buf) : parent_(parent), + bytes_transferred_(bytes_transferred), buf_(buf), shared_conn_(parent->dn_) { buf_.begin(); } @@ -293,7 +297,7 @@ struct BlockReaderImpl::ReadData : continuation::Continuation LOG_TRACE(kBlockReader, << "BlockReaderImpl::ReadData::Run(" << FMT_CONT_AND_PARENT_ADDR << ") called"); auto handler = - [next, this](const asio::error_code &ec, size_t transferred) { + [next, this](const boost::system::error_code &ec, size_t transferred) { Status status; if (ec) { status = Status(ec.value(), ec.message().c_str()); @@ -320,13 +324,13 @@ struct BlockReaderImpl::ReadData : continuation::Continuation auto data_len = parent_->header_.datalen() - parent_->packet_data_read_bytes_; - asio::async_read(*parent_->dn_, buf_, asio::transfer_exactly(data_len), handler); + boost::asio::async_read(*parent_->dn_, buf_, boost::asio::transfer_exactly(data_len), handler); } private: BlockReaderImpl *parent_; std::shared_ptr bytes_transferred_; - const asio::mutable_buffers_1 buf_; + const boost::asio::mutable_buffers_1 buf_; // Keep DNConnection alive. std::shared_ptr shared_conn_; @@ -337,7 +341,7 @@ struct BlockReaderImpl::ReadPadding : continuation::Continuation ReadPadding(BlockReaderImpl *parent) : parent_(parent), padding_(parent->chunk_padding_bytes_), bytes_transferred_(std::make_shared(0)), - read_data_(new ReadData(parent, bytes_transferred_, asio::buffer(padding_))), + read_data_(new ReadData(parent, bytes_transferred_, boost::asio::buffer(padding_))), shared_conn_(parent->dn_) {} virtual void Run(const Next &next) override { @@ -505,7 +509,7 @@ struct BlockReaderImpl::RequestBlockContinuation : continuation::Continuation struct BlockReaderImpl::ReadBlockContinuation : continuation::Continuation { ReadBlockContinuation(BlockReader *reader, MutableBuffer buffer, size_t *transferred) - : reader_(reader), buffer_(buffer), buffer_size_(asio::buffer_size(buffer)), transferred_(transferred) {} + : reader_(reader), buffer_(buffer), buffer_size_(boost::asio::buffer_size(buffer)), transferred_(transferred) {} virtual void Run(const Next &next) override { LOG_TRACE(kBlockReader, << "BlockReaderImpl::ReadBlockContinuation::Run(" @@ -532,7 +536,7 @@ struct BlockReaderImpl::ReadBlockContinuation : continuation::Continuation next_(status); } else { reader_->AsyncReadPacket( - asio::buffer(buffer_ + *transferred_, buffer_size_ - *transferred_), + boost::asio::buffer(buffer_ + *transferred_, buffer_size_ - *transferred_), std::bind(&ReadBlockContinuation::OnReadData, this, _1, _2)); } } @@ -551,7 +555,7 @@ void BlockReaderImpl::AsyncReadBlock( auto m = continuation::Pipeline::Create(cancel_state_); size_t * bytesTransferred = &m->state(); - size_t size = asio::buffer_size(buffer); + size_t size = boost::asio::buffer_size(buffer); m->Push(new RequestBlockContinuation(this, client_name, &block.b(), size, offset)) .Push(new ReadBlockContinuation(this, buffer, bytesTransferred)); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer.h index ea176532f2368..cfa94bea2baf2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer.h @@ -21,8 +21,10 @@ #include "common/sasl_authenticator.h" #include "common/async_stream.h" #include "connection/datanodeconnection.h" + #include +#include namespace hdfs { @@ -45,13 +47,13 @@ template class DataTransferSaslStream : public DataNodeConnection template void Handshake(const Handler &next); void async_read_some(const MutableBuffer &buf, - std::function handler) override { stream_->async_read_some(buf, handler); } void async_write_some(const ConstBuffer &buf, - std::function handler) override { stream_->async_write_some(buf, handler); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer_impl.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer_impl.h index 77e618dd7a214..d77685dd45a70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer_impl.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer_impl.h @@ -23,8 +23,8 @@ #include "common/continuation/asio.h" #include "common/continuation/protobuf.h" -#include -#include +#include +#include namespace hdfs { @@ -101,7 +101,7 @@ void DataTransferSaslStream::Handshake(const Handler &next) { using ::hdfs::continuation::WriteDelimitedPBMessage; static const int kMagicNumber = htonl(kDataTransferSasl); - static const asio::const_buffers_1 kMagicNumberBuffer = asio::buffer( + static const boost::asio::const_buffers_1 kMagicNumberBuffer = boost::asio::buffer( reinterpret_cast(kMagicNumber), sizeof(kMagicNumber)); struct State { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/namenode_tracker.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/namenode_tracker.cc index 242c6eadcbb3c..acb754e738abc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/namenode_tracker.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/namenode_tracker.cc @@ -24,7 +24,7 @@ namespace hdfs { -static std::string format_endpoints(const std::vector<::asio::ip::tcp::endpoint> &pts) { +static std::string format_endpoints(const std::vector &pts) { std::stringstream ss; for(unsigned int i=0; i &se HANamenodeTracker::~HANamenodeTracker() {} -bool HANamenodeTracker::GetFailoverAndUpdate(const std::vector<::asio::ip::tcp::endpoint>& current_endpoints, +bool HANamenodeTracker::GetFailoverAndUpdate(const std::vector& current_endpoints, ResolvedNamenodeInfo& out) { mutex_guard swap_lock(swap_lock_); @@ -117,7 +117,7 @@ bool HANamenodeTracker::GetFailoverAndUpdate(const std::vector<::asio::ip::tcp:: } -bool HANamenodeTracker::IsCurrentActive_locked(const ::asio::ip::tcp::endpoint &ep) const { +bool HANamenodeTracker::IsCurrentActive_locked(const boost::asio::ip::tcp::endpoint &ep) const { for(unsigned int i=0;i +#include #include #include @@ -52,13 +52,13 @@ class HANamenodeTracker { // currently being used. Swap internal state and set out to other node. // Note: This will always mutate internal state. Use IsCurrentActive/Standby to // get info without changing state - bool GetFailoverAndUpdate(const std::vector<::asio::ip::tcp::endpoint>& current_endpoints, + bool GetFailoverAndUpdate(const std::vector& current_endpoints, ResolvedNamenodeInfo& out); private: // See if endpoint ep is part of the list of endpoints for the active or standby NN - bool IsCurrentActive_locked(const ::asio::ip::tcp::endpoint &ep) const; - bool IsCurrentStandby_locked(const ::asio::ip::tcp::endpoint &ep) const; + bool IsCurrentActive_locked(const boost::asio::ip::tcp::endpoint &ep) const; + bool IsCurrentStandby_locked(const boost::asio::ip::tcp::endpoint &ep) const; // If HA should be enabled, according to our options and runtime info like # nodes provided bool enabled_; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.h index f19554046dcf8..0e19fff87b4d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.h @@ -29,7 +29,7 @@ #include #include -#include +#include namespace hdfs { @@ -59,7 +59,7 @@ class Request { int call_id() const { return call_id_; } std::string method_name() const { return method_name_; } - ::asio::deadline_timer &timer() { return timer_; } + boost::asio::deadline_timer &timer() { return timer_; } int IncrementRetryCount() { return retry_count_++; } int IncrementFailoverCount(); void GetPacket(std::string *res) const; @@ -75,7 +75,7 @@ class Request { const std::string method_name_; const int call_id_; - ::asio::deadline_timer timer_; + boost::asio::deadline_timer timer_; std::string payload_; const Handler handler_; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection.h index 9f7b3bbd2424a..f599d36ee5c4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection.h @@ -42,6 +42,8 @@ #include #include +#include + namespace hdfs { typedef const std::function RpcCallback; @@ -57,10 +59,10 @@ class RpcConnection : public std::enable_shared_from_this { // Note that a single server can have multiple endpoints - especially both // an ipv4 and ipv6 endpoint - virtual void Connect(const std::vector<::asio::ip::tcp::endpoint> &server, + virtual void Connect(const std::vector &server, const AuthInfo & auth_info, RpcCallback &handler) = 0; - virtual void ConnectAndFlush(const std::vector<::asio::ip::tcp::endpoint> &server) = 0; + virtual void ConnectAndFlush(const std::vector &server) = 0; virtual void Disconnect() = 0; void StartReading(); @@ -110,9 +112,9 @@ class RpcConnection : public std::enable_shared_from_this { virtual void SendContext(RpcCallback &handler) = 0; void ContextComplete(const Status &s); - virtual void OnSendCompleted(const ::asio::error_code &ec, + virtual void OnSendCompleted(const boost::system::error_code &ec, size_t transferred) = 0; - virtual void OnRecvCompleted(const ::asio::error_code &ec, + virtual void OnRecvCompleted(const boost::system::error_code &ec, size_t transferred) = 0; virtual void FlushPendingRequests()=0; // Synchronously write the next request @@ -133,10 +135,10 @@ class RpcConnection : public std::enable_shared_from_this { Status HandleRpcResponse(std::shared_ptr response); void HandleRpcTimeout(std::shared_ptr req, - const ::asio::error_code &ec); + const boost::system::error_code &ec); void CommsError(const Status &status); - void ClearAndDisconnect(const ::asio::error_code &ec); + void ClearAndDisconnect(const boost::system::error_code &ec); std::shared_ptr RemoveFromRunningQueue(int call_id); std::weak_ptr engine_; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc index 43111eff9499a..82fdfeb033d38 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc @@ -23,6 +23,8 @@ #include "ProtobufRpcEngine.pb.h" #include "IpcConnectionContext.pb.h" +#include + namespace hdfs { namespace pb = ::google::protobuf; @@ -89,7 +91,7 @@ void RpcConnection::StartReading() { } service->PostLambda( - [shared_this, this] () { OnRecvCompleted(::asio::error_code(), 0); } + [shared_this, this] () { OnRecvCompleted(boost::system::error_code(), 0); } ); } @@ -248,8 +250,8 @@ Status RpcConnection::HandleRpcResponse(std::shared_ptr response) { } void RpcConnection::HandleRpcTimeout(std::shared_ptr req, - const ::asio::error_code &ec) { - if (ec.value() == asio::error::operation_aborted) { + const boost::system::error_code &ec) { + if (ec.value() == boost::asio::error::operation_aborted) { return; } @@ -260,7 +262,7 @@ void RpcConnection::HandleRpcTimeout(std::shared_ptr req, return; } - Status stat = ToStatus(ec ? ec : make_error_code(::asio::error::timed_out)); + Status stat = ToStatus(ec ? ec : make_error_code(boost::asio::error::timed_out)); r->OnResponseArrived(nullptr, stat); } @@ -469,7 +471,7 @@ void RpcConnection::CommsError(const Status &status) { pinnedEngine->AsyncRpcCommsError(status, shared_from_this(), requestsToReturn); } -void RpcConnection::ClearAndDisconnect(const ::asio::error_code &ec) { +void RpcConnection::ClearAndDisconnect(const boost::system::error_code &ec) { Disconnect(); std::vector> requests; std::transform(sent_requests_.begin(), sent_requests_.end(), diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.h index 1dd43af36b0e9..884bd64ac642c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.h @@ -28,9 +28,11 @@ #include "common/libhdfs_events_impl.h" #include "hdfspp/ioservice.h" -#include -#include -#include +#include +#include +#include +#include +#include #include @@ -44,17 +46,17 @@ class RpcConnectionImpl : public RpcConnection { RpcConnectionImpl(std::shared_ptr engine); virtual ~RpcConnectionImpl() override; - virtual void Connect(const std::vector<::asio::ip::tcp::endpoint> &server, + virtual void Connect(const std::vector &server, const AuthInfo & auth_info, RpcCallback &handler) override; virtual void ConnectAndFlush( - const std::vector<::asio::ip::tcp::endpoint> &server) override; + const std::vector &server) override; virtual void SendHandshake(RpcCallback &handler) override; virtual void SendContext(RpcCallback &handler) override; virtual void Disconnect() override; - virtual void OnSendCompleted(const ::asio::error_code &ec, + virtual void OnSendCompleted(const boost::system::error_code &ec, size_t transferred) override; - virtual void OnRecvCompleted(const ::asio::error_code &ec, + virtual void OnRecvCompleted(const boost::system::error_code &ec, size_t transferred) override; virtual void FlushPendingRequests() override; @@ -65,12 +67,12 @@ class RpcConnectionImpl : public RpcConnection { private: const Options options_; - ::asio::ip::tcp::endpoint current_endpoint_; - std::vector<::asio::ip::tcp::endpoint> additional_endpoints_; + boost::asio::ip::tcp::endpoint current_endpoint_; + std::vector additional_endpoints_; Socket socket_; - ::asio::deadline_timer connect_timer_; + boost::asio::deadline_timer connect_timer_; - void ConnectComplete(const ::asio::error_code &ec, const ::asio::ip::tcp::endpoint &remote); + void ConnectComplete(const boost::system::error_code &ec, const boost::asio::ip::tcp::endpoint &remote); }; template @@ -95,7 +97,7 @@ RpcConnectionImpl::~RpcConnectionImpl() { template void RpcConnectionImpl::Connect( - const std::vector<::asio::ip::tcp::endpoint> &server, + const std::vector &server, const AuthInfo & auth_info, RpcCallback &handler) { LOG_TRACE(kRPC, << "RpcConnectionImpl::Connect called"); @@ -124,7 +126,7 @@ void RpcConnectionImpl::Connect( template void RpcConnectionImpl::ConnectAndFlush( - const std::vector<::asio::ip::tcp::endpoint> &server) { + const std::vector &server) { LOG_INFO(kRPC, << "ConnectAndFlush called"); std::lock_guard state_lock(connection_state_lock_); @@ -147,29 +149,29 @@ void RpcConnectionImpl::ConnectAndFlush( // Take the first endpoint, but remember the alternatives for later additional_endpoints_ = server; - ::asio::ip::tcp::endpoint first_endpoint = additional_endpoints_.front(); + boost::asio::ip::tcp::endpoint first_endpoint = additional_endpoints_.front(); additional_endpoints_.erase(additional_endpoints_.begin()); current_endpoint_ = first_endpoint; auto shared_this = shared_from_this(); - socket_.async_connect(first_endpoint, [shared_this, this, first_endpoint](const ::asio::error_code &ec) { + socket_.async_connect(first_endpoint, [shared_this, this, first_endpoint](const boost::system::error_code &ec) { ConnectComplete(ec, first_endpoint); }); // Prompt the timer to timeout auto weak_this = std::weak_ptr(shared_this); connect_timer_.expires_from_now( - std::chrono::milliseconds(options_.rpc_connect_timeout)); - connect_timer_.async_wait([shared_this, this, first_endpoint](const ::asio::error_code &ec) { + boost::posix_time::milliseconds(options_.rpc_connect_timeout)); + connect_timer_.async_wait([shared_this, this, first_endpoint](const boost::system::error_code &ec) { if (ec) ConnectComplete(ec, first_endpoint); else - ConnectComplete(make_error_code(asio::error::host_unreachable), first_endpoint); + ConnectComplete(make_error_code(boost::asio::error::host_unreachable), first_endpoint); }); } template -void RpcConnectionImpl::ConnectComplete(const ::asio::error_code &ec, const ::asio::ip::tcp::endpoint & remote) { +void RpcConnectionImpl::ConnectComplete(const boost::system::error_code &ec, const boost::asio::ip::tcp::endpoint & remote) { auto shared_this = RpcConnectionImpl::shared_from_this(); std::lock_guard state_lock(connection_state_lock_); connect_timer_.cancel(); @@ -211,20 +213,20 @@ void RpcConnectionImpl::ConnectComplete(const ::asio::error_code &ec, co if (!additional_endpoints_.empty()) { // If we have additional endpoints, keep trying until we either run out or // hit one - ::asio::ip::tcp::endpoint next_endpoint = additional_endpoints_.front(); + boost::asio::ip::tcp::endpoint next_endpoint = additional_endpoints_.front(); additional_endpoints_.erase(additional_endpoints_.begin()); current_endpoint_ = next_endpoint; - socket_.async_connect(next_endpoint, [shared_this, this, next_endpoint](const ::asio::error_code &ec) { + socket_.async_connect(next_endpoint, [shared_this, this, next_endpoint](const boost::system::error_code &ec) { ConnectComplete(ec, next_endpoint); }); connect_timer_.expires_from_now( - std::chrono::milliseconds(options_.rpc_connect_timeout)); - connect_timer_.async_wait([shared_this, this, next_endpoint](const ::asio::error_code &ec) { + boost::posix_time::milliseconds(options_.rpc_connect_timeout)); + connect_timer_.async_wait([shared_this, this, next_endpoint](const boost::system::error_code &ec) { if (ec) ConnectComplete(ec, next_endpoint); else - ConnectComplete(make_error_code(asio::error::host_unreachable), next_endpoint); + ConnectComplete(make_error_code(boost::asio::error::host_unreachable), next_endpoint); }); } else { CommsError(status); @@ -241,9 +243,9 @@ void RpcConnectionImpl::SendHandshake(RpcCallback &handler) { auto shared_this = shared_from_this(); auto handshake_packet = PrepareHandshakePacket(); - ::asio::async_write(socket_, asio::buffer(*handshake_packet), + boost::asio::async_write(socket_, boost::asio::buffer(*handshake_packet), [handshake_packet, handler, shared_this, this]( - const ::asio::error_code &ec, size_t) { + const boost::system::error_code &ec, size_t) { Status status = ToStatus(ec); handler(status); }); @@ -257,16 +259,16 @@ void RpcConnectionImpl::SendContext(RpcCallback &handler) { auto shared_this = shared_from_this(); auto context_packet = PrepareContextPacket(); - ::asio::async_write(socket_, asio::buffer(*context_packet), + boost::asio::async_write(socket_, boost::asio::buffer(*context_packet), [context_packet, handler, shared_this, this]( - const ::asio::error_code &ec, size_t) { + const boost::system::error_code &ec, size_t) { Status status = ToStatus(ec); handler(status); }); } template -void RpcConnectionImpl::OnSendCompleted(const ::asio::error_code &ec, +void RpcConnectionImpl::OnSendCompleted(const boost::system::error_code &ec, size_t) { using std::placeholders::_1; using std::placeholders::_2; @@ -340,16 +342,16 @@ void RpcConnectionImpl::FlushPendingRequests() { outgoing_request_ = req; req->timer().expires_from_now( - std::chrono::milliseconds(options_.rpc_timeout)); - req->timer().async_wait([weak_this, weak_req, this](const ::asio::error_code &ec) { + boost::posix_time::milliseconds(options_.rpc_timeout)); + req->timer().async_wait([weak_this, weak_req, this](const boost::system::error_code &ec) { auto timeout_this = weak_this.lock(); auto timeout_req = weak_req.lock(); if (timeout_this && timeout_req) this->HandleRpcTimeout(timeout_req, ec); }); - asio::async_write(socket_, asio::buffer(*payload), - [shared_this, this, payload](const ::asio::error_code &ec, + boost::asio::async_write(socket_, boost::asio::buffer(*payload), + [shared_this, this, payload](const boost::system::error_code &ec, size_t size) { OnSendCompleted(ec, size); }); @@ -374,13 +376,13 @@ void RpcConnectionImpl::FlushPendingRequests() { template -void RpcConnectionImpl::OnRecvCompleted(const ::asio::error_code &original_ec, +void RpcConnectionImpl::OnRecvCompleted(const boost::system::error_code &original_ec, size_t) { using std::placeholders::_1; using std::placeholders::_2; std::lock_guard state_lock(connection_state_lock_); - ::asio::error_code my_ec(original_ec); + boost::system::error_code my_ec(original_ec); LOG_TRACE(kRPC, << "RpcConnectionImpl::OnRecvCompleted called"); @@ -390,7 +392,7 @@ void RpcConnectionImpl::OnRecvCompleted(const ::asio::error_code &origin event_response event_resp = event_handlers_->call(FS_NN_READ_EVENT, cluster_name_.c_str(), 0); #ifndef LIBHDFSPP_SIMULATE_ERROR_DISABLED if (event_resp.response_type() == event_response::kTest_Error) { - my_ec = std::make_error_code(std::errc::network_down); + my_ec = boost::system::error_code(boost::system::errc::errc_t::network_down, boost::system::system_category()); } #endif } @@ -399,7 +401,7 @@ void RpcConnectionImpl::OnRecvCompleted(const ::asio::error_code &origin case 0: // No errors break; - case asio::error::operation_aborted: + case boost::asio::error::operation_aborted: // The event loop has been shut down. Ignore the error. return; default: @@ -414,20 +416,20 @@ void RpcConnectionImpl::OnRecvCompleted(const ::asio::error_code &origin if (current_response_state_->state_ == Response::kReadLength) { current_response_state_->state_ = Response::kReadContent; - auto buf = ::asio::buffer(reinterpret_cast(¤t_response_state_->length_), + auto buf = boost::asio::buffer(reinterpret_cast(¤t_response_state_->length_), sizeof(current_response_state_->length_)); - asio::async_read( + boost::asio::async_read( socket_, buf, - [shared_this, this](const ::asio::error_code &ec, size_t size) { + [shared_this, this](const boost::system::error_code &ec, size_t size) { OnRecvCompleted(ec, size); }); } else if (current_response_state_->state_ == Response::kReadContent) { current_response_state_->state_ = Response::kParseResponse; current_response_state_->length_ = ntohl(current_response_state_->length_); current_response_state_->data_.resize(current_response_state_->length_); - asio::async_read( - socket_, ::asio::buffer(current_response_state_->data_), - [shared_this, this](const ::asio::error_code &ec, size_t size) { + boost::asio::async_read( + socket_, boost::asio::buffer(current_response_state_->data_), + [shared_this, this](const boost::system::error_code &ec, size_t size) { OnRecvCompleted(ec, size); }); } else if (current_response_state_->state_ == Response::kParseResponse) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc index ad6c9b91364f5..06cda962cf9b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc @@ -24,6 +24,8 @@ #include +#include + namespace hdfs { template @@ -171,7 +173,7 @@ std::shared_ptr RpcEngine::NewConnection() { LOG_DEBUG(kRPC, << "RpcEngine::NewConnection called"); - return std::make_shared>(shared_from_this()); + return std::make_shared>(shared_from_this()); } std::shared_ptr RpcEngine::InitializeConnection() @@ -307,8 +309,8 @@ void RpcEngine::RpcCommsError( if (head_action->delayMillis > 0) { auto weak_conn = std::weak_ptr(conn_); retry_timer.expires_from_now( - std::chrono::milliseconds(head_action->delayMillis)); - retry_timer.async_wait([this, weak_conn](asio::error_code ec) { + boost::posix_time::milliseconds(head_action->delayMillis)); + retry_timer.async_wait([this, weak_conn](boost::system::error_code ec) { auto strong_conn = weak_conn.lock(); if ( (!ec) && (strong_conn) ) { strong_conn->ConnectAndFlush(last_endpoints_); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h index 845eaf5868789..13e56c5b92fb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h @@ -31,8 +31,8 @@ #include -#include -#include +#include +#include #include #include @@ -160,7 +160,7 @@ class RpcEngine : public LockFreeRpcEngine, public std::enable_shared_from_this< static std::string getRandomClientId(); // Remember all of the last endpoints in case we need to reconnect and retry - std::vector<::asio::ip::tcp::endpoint> last_endpoints_; + std::vector last_endpoints_; private: mutable std::shared_ptr io_service_; @@ -173,7 +173,7 @@ class RpcEngine : public LockFreeRpcEngine, public std::enable_shared_from_this< AuthInfo auth_info_; std::string cluster_name_; std::atomic_int call_id_; - ::asio::deadline_timer retry_timer; + boost::asio::deadline_timer retry_timer; std::shared_ptr event_handlers_; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt index 59fdbf20a27d3..2b2f4f16f1677 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt @@ -16,6 +16,8 @@ # limitations under the License. # +find_package(Boost REQUIRED COMPONENTS date_time) + # Delegate some functionality to libhdfs, until libhdfspp is complete. set (LIBHDFS_SRC_DIR ../../libhdfs) set (LIBHDFS_TESTS_DIR ../../libhdfs-tests) @@ -81,7 +83,7 @@ add_memcheck_test(retry_policy retry_policy_test) include_directories(${CMAKE_CURRENT_BINARY_DIR}) add_executable(rpc_engine_test rpc_engine_test.cc ${PROTO_TEST_SRCS} ${PROTO_TEST_HDRS}) -target_link_libraries(rpc_engine_test test_common rpc proto common ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(rpc_engine_test test_common rpc proto common ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT} ${Boost_LIBRARIES}) add_memcheck_test(rpc_engine rpc_engine_test) add_executable(bad_datanode_test bad_datanode_test.cc) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc index 23de0154f8ab0..5417af8f4cf11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc @@ -25,6 +25,9 @@ #include +#include +#include + using hadoop::common::TokenProto; using hadoop::hdfs::DatanodeInfoProto; using hadoop::hdfs::DatanodeIDProto; @@ -42,7 +45,7 @@ class MockReader : public BlockReader { public: MOCK_METHOD2( AsyncReadPacket, - void(const asio::mutable_buffers_1 &, + void(const boost::asio::mutable_buffers_1 &, const std::function &)); MOCK_METHOD5(AsyncRequestBlock, @@ -69,17 +72,17 @@ class MockDNConnection : public DataNodeConnection, public std::enable_shared_fr } void async_read_some(const MutableBuffer &buf, - std::function handler) override { (void)buf; - handler(asio::error::fault, 0); + handler(boost::asio::error::fault, 0); } void async_write_some(const ConstBuffer &buf, - std::function handler) override { (void)buf; - handler(asio::error::fault, 0); + handler(boost::asio::error::fault, 0); } virtual void Cancel() override { @@ -141,7 +144,7 @@ TEST(BadDataNodeTest, TestNoNodes) { size_t read = 0; // Exclude the one datanode with the data - is.AsyncPreadSome(0, asio::buffer(buf, sizeof(buf)), nullptr, + is.AsyncPreadSome(0, boost::asio::buffer(buf, sizeof(buf)), nullptr, [&stat, &read](const Status &status, const std::string &, size_t transferred) { stat = status; read = transferred; @@ -202,7 +205,7 @@ TEST(BadDataNodeTest, NNEventCallback) { Status::OK(), 0)); is.AsyncPreadSome( - 0, asio::buffer(buf, sizeof(buf)), nullptr, + 0, boost::asio::buffer(buf, sizeof(buf)), nullptr, [&stat, &read](const Status &status, const std::string &, size_t transferred) { stat = status; @@ -248,7 +251,7 @@ TEST(BadDataNodeTest, RecoverableError) { is.AsyncPreadSome( - 0, asio::buffer(buf, sizeof(buf)), nullptr, + 0, boost::asio::buffer(buf, sizeof(buf)), nullptr, [&stat, &read](const Status &status, const std::string &, size_t transferred) { stat = status; @@ -300,7 +303,7 @@ TEST(BadDataNodeTest, InternalError) { sizeof(buf))); is.AsyncPreadSome( - 0, asio::buffer(buf, sizeof(buf)), nullptr, + 0, boost::asio::buffer(buf, sizeof(buf)), nullptr, [&stat, &read](const Status &status, const std::string &, size_t transferred) { stat = status; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.cc index 1885eea8da184..37fabf568d275 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.cc @@ -20,7 +20,7 @@ namespace hdfs { -MockConnectionBase::MockConnectionBase(::asio::io_service *io_service) +MockConnectionBase::MockConnectionBase(boost::asio::io_service *io_service) : io_service_(io_service) {} @@ -31,7 +31,7 @@ ProducerResult SharedMockConnection::Produce() { return shared_prducer->Produce(); } else { assert(false && "No producer registered"); - return std::make_pair(asio::error_code(), ""); + return std::make_pair(boost::system::error_code(), ""); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.h index 82db760421314..7a7b5f076ed39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.h @@ -20,21 +20,21 @@ #include "common/async_stream.h" -#include -#include -#include -#include +#include +#include +#include +#include #include namespace hdfs { -typedef std::pair ProducerResult; +typedef std::pair ProducerResult; class AsioProducer { public: /* * Return either: - * (::asio::error_code(), ) for a good result + * (::boost::system::error_code(), ) for a good result * (, ) to pass an error to the caller * (::asio::error::would_block, ) to block the next call forever */ @@ -45,53 +45,53 @@ class AsioProducer { class MockConnectionBase : public AsioProducer, public AsyncStream { public: - MockConnectionBase(::asio::io_service *io_service); + MockConnectionBase(boost::asio::io_service *io_service); virtual ~MockConnectionBase(); - typedef std::pair ProducerResult; + typedef std::pair ProducerResult; void async_read_some(const MutableBuffer &buf, - std::function handler) override { if (produced_.size() == 0) { ProducerResult r = Produce(); - if (r.first == asio::error::would_block) { + if (r.first == boost::asio::error::would_block) { return; // No more reads to do } if (r.first) { io_service_->post(std::bind(handler, r.first, 0)); return; } - asio::mutable_buffers_1 data = produced_.prepare(r.second.size()); - asio::buffer_copy(data, asio::buffer(r.second)); + boost::asio::mutable_buffers_1 data = produced_.prepare(r.second.size()); + boost::asio::buffer_copy(data, boost::asio::buffer(r.second)); produced_.commit(r.second.size()); } - size_t len = std::min(asio::buffer_size(buf), produced_.size()); - asio::buffer_copy(buf, produced_.data()); + size_t len = std::min(boost::asio::buffer_size(buf), produced_.size()); + boost::asio::buffer_copy(buf, produced_.data()); produced_.consume(len); - io_service_->post(std::bind(handler, asio::error_code(), len)); + io_service_->post(std::bind(handler, boost::system::error_code(), len)); } void async_write_some(const ConstBuffer &buf, - std::function handler) override { // CompletionResult res = OnWrite(buf); - io_service_->post(std::bind(handler, asio::error_code(), asio::buffer_size(buf))); + io_service_->post(std::bind(handler, boost::system::error_code(), boost::asio::buffer_size(buf))); } template void async_connect(const Endpoint &, Callback &&handler) { - io_service_->post([handler]() { handler(::asio::error_code()); }); + io_service_->post([handler]() { handler(::boost::system::error_code()); }); } virtual void cancel() {} virtual void close() {} protected: ProducerResult Produce() override = 0; - ::asio::io_service *io_service_; + boost::asio::io_service *io_service_; private: - asio::streambuf produced_; + boost::asio::streambuf produced_; }; @@ -114,10 +114,10 @@ class SharedMockConnection : public MockConnectionBase { assert(data); if (!data->checkProducerForConnect) { - io_service_->post([handler]() { handler(::asio::error_code()); }); + io_service_->post([handler]() { handler(::boost::system::error_code()); }); } else { ProducerResult result = Produce(); - if (result.first == asio::error::would_block) { + if (result.first == boost::asio::error::would_block) { return; // Connect will hang } else { io_service_->post([handler, result]() { handler( result.first); }); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc index 3997e64be5ce2..dfee686b60244 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc @@ -29,6 +29,9 @@ #include #include #include +#include +#include +#include #include @@ -44,9 +47,9 @@ using ::hadoop::hdfs::ReadOpChecksumInfoProto; using ::hadoop::hdfs::LocatedBlockProto; using ::hadoop::hdfs::LocatedBlocksProto; -using ::asio::buffer; -using ::asio::error_code; -using ::asio::mutable_buffers_1; +using boost::asio::buffer; +using boost::system::error_code; +using boost::asio::mutable_buffers_1; using ::testing::_; using ::testing::InvokeArgument; using ::testing::Return; @@ -60,7 +63,7 @@ namespace hdfs { class MockDNConnection : public MockConnectionBase, public DataNodeConnection{ public: - MockDNConnection(::asio::io_service &io_service) + MockDNConnection(boost::asio::io_service &io_service) : MockConnectionBase(&io_service), OnRead([](){}) {} MOCK_METHOD0(Produce, ProducerResult()); @@ -70,14 +73,14 @@ class MockDNConnection : public MockConnectionBase, public DataNodeConnection{ std::function OnRead; void async_read_some(const MutableBuffer &buf, - std::function handler) override { this->OnRead(); this->MockConnectionBase::async_read_some(buf, handler); } void async_write_some(const ConstBuffer &buf, - std::function handler) override { this->MockConnectionBase::async_write_some(buf, handler); } @@ -96,7 +99,7 @@ class PartialMockReader : public BlockReaderImpl { MOCK_METHOD2( AsyncReadPacket, - void(const asio::mutable_buffers_1 &, + void(const boost::asio::mutable_buffers_1 &, const std::function &)); MOCK_METHOD5(AsyncRequestBlock, @@ -163,7 +166,7 @@ TEST(RemoteBlockReaderTest, TestReadSingleTrunk) { .WillOnce(InvokeArgument<1>(Status::OK(), sizeof(buf))); reader.AsyncReadBlock( - GetRandomClientName(), block, 0, asio::buffer(buf, sizeof(buf)), + GetRandomClientName(), block, 0, boost::asio::buffer(buf, sizeof(buf)), [&stat, &read](const Status &status, size_t transferred) { stat = status; read = transferred; @@ -190,7 +193,7 @@ TEST(RemoteBlockReaderTest, TestReadMultipleTrunk) { .WillRepeatedly(InvokeArgument<1>(Status::OK(), sizeof(buf) / 4)); reader.AsyncReadBlock( - GetRandomClientName(), block, 0, asio::buffer(buf, sizeof(buf)), + GetRandomClientName(), block, 0, boost::asio::buffer(buf, sizeof(buf)), [&stat, &read](const Status &status, size_t transferred) { stat = status; read = transferred; @@ -218,7 +221,7 @@ TEST(RemoteBlockReaderTest, TestReadError) { .WillOnce(InvokeArgument<1>(Status::Error("error"), 0)); reader.AsyncReadBlock( - GetRandomClientName(), block, 0, asio::buffer(buf, sizeof(buf)), + GetRandomClientName(), block, 0, boost::asio::buffer(buf, sizeof(buf)), [&stat, &read](const Status &status, size_t transferred) { stat = status; read = transferred; @@ -250,7 +253,7 @@ ReadContent(std::shared_ptr conn, const ExtendedBlockProto &block, TEST(RemoteBlockReaderTest, TestReadWholeBlock) { static const size_t kChunkSize = 512; static const string kChunkData(kChunkSize, 'a'); - ::asio::io_service io_service; + boost::asio::io_service io_service; auto conn = std::make_shared(io_service); BlockOpResponseProto block_op_resp; @@ -287,7 +290,7 @@ TEST(RemoteBlockReaderTest, TestCancelWhileReceiving) { static const size_t kChunkSize = 512; static const string kChunkData(kChunkSize, 'a'); - ::asio::io_service io_service; + boost::asio::io_service io_service; auto conn = std::make_shared(io_service); BlockOpResponseProto block_op_resp; @@ -338,7 +341,7 @@ TEST(RemoteBlockReaderTest, TestReadWithinChunk) { static const size_t kOffset = kChunkSize / 4; static const string kChunkData = string(kOffset, 'a') + string(kLength, 'b'); - ::asio::io_service io_service; + boost::asio::io_service io_service; auto conn = std::make_shared(io_service); BlockOpResponseProto block_op_resp; ReadOpChecksumInfoProto *checksum_info = @@ -378,7 +381,7 @@ TEST(RemoteBlockReaderTest, TestReadMultiplePacket) { static const size_t kChunkSize = 1024; static const string kChunkData(kChunkSize, 'a'); - ::asio::io_service io_service; + boost::asio::io_service io_service; auto conn = std::make_shared(io_service); BlockOpResponseProto block_op_resp; block_op_resp.set_status(::hadoop::hdfs::Status::SUCCESS); @@ -428,7 +431,7 @@ TEST(RemoteBlockReaderTest, TestReadCancelBetweenPackets) { static const size_t kChunkSize = 1024; static const string kChunkData(kChunkSize, 'a'); - ::asio::io_service io_service; + boost::asio::io_service io_service; auto conn = std::make_shared(io_service); BlockOpResponseProto block_op_resp; block_op_resp.set_status(::hadoop::hdfs::Status::SUCCESS); @@ -482,7 +485,7 @@ TEST(RemoteBlockReaderTest, TestSaslConnection) { static const string kAuthPayload = "realm=\"0\",nonce=\"+GAWc+O6yEAWpew/" "qKah8qh4QZLoOLCDcTtEKhlS\",qop=\"auth\"," "charset=utf-8,algorithm=md5-sess"; - ::asio::io_service io_service; + boost::asio::io_service io_service; auto conn = std::make_shared(io_service); BlockOpResponseProto block_op_resp; block_op_resp.set_status(::hadoop::hdfs::Status::SUCCESS); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc index 6bbe7259ad853..744e7eba16d8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc @@ -26,6 +26,8 @@ #include #include +#include +#include using ::hadoop::common::RpcResponseHeaderProto; using ::hadoop::common::EmptyRequestProto; @@ -33,8 +35,6 @@ using ::hadoop::common::EmptyResponseProto; using ::hadoop::common::EchoRequestProto; using ::hadoop::common::EchoResponseProto; -using ::asio::error_code; - using ::testing::Return; using ::std::make_pair; @@ -47,20 +47,20 @@ namespace hdfs { std::vector make_endpoint() { ResolvedNamenodeInfo result; - result.endpoints.push_back(asio::ip::basic_endpoint()); + result.endpoints.push_back(boost::asio::ip::basic_endpoint()); return std::vector({result}); } class MockRPCConnection : public MockConnectionBase { public: - MockRPCConnection(::asio::io_service &io_service) + MockRPCConnection(boost::asio::io_service &io_service) : MockConnectionBase(&io_service) {} MOCK_METHOD0(Produce, ProducerResult()); }; class SharedMockRPCConnection : public SharedMockConnection { public: - SharedMockRPCConnection(::asio::io_service &io_service) + SharedMockRPCConnection(boost::asio::io_service &io_service) : SharedMockConnection(&io_service) {} }; @@ -79,9 +79,9 @@ class SharedConnectionEngine : public RpcEngine { } -static inline std::pair RpcResponse( +static inline std::pair RpcResponse( const RpcResponseHeaderProto &h, const std::string &data, - const ::asio::error_code &ec = error_code()) { + const boost::system::error_code &ec = boost::system::error_code()) { uint32_t payload_length = pbio::CodedOutputStream::VarintSize32(h.ByteSize()) + pbio::CodedOutputStream::VarintSize32(data.size()) + h.ByteSize() + @@ -157,7 +157,7 @@ TEST(RpcEngineTest, TestConnectionResetAndFail) { h.set_status(RpcResponseHeaderProto::SUCCESS); EXPECT_CALL(conn->TEST_get_mutable_socket(), Produce()) .WillOnce(Return(RpcResponse( - h, "", make_error_code(::asio::error::connection_reset)))); + h, "", make_error_code(boost::asio::error::connection_reset)))); std::shared_ptr conn_ptr(conn); engine->TEST_SetRpcConnection(conn_ptr); @@ -200,7 +200,7 @@ TEST(RpcEngineTest, TestConnectionResetAndRecover) { h.set_status(RpcResponseHeaderProto::SUCCESS); EXPECT_CALL(*producer, Produce()) .WillOnce(Return(RpcResponse( - h, "", make_error_code(::asio::error::connection_reset)))) + h, "", make_error_code(boost::asio::error::connection_reset)))) .WillOnce(Return(RpcResponse(h, server_resp.SerializeAsString()))); SharedMockConnection::SetSharedConnectionData(producer); @@ -240,7 +240,7 @@ TEST(RpcEngineTest, TestConnectionResetAndRecoverWithDelay) { h.set_status(RpcResponseHeaderProto::SUCCESS); EXPECT_CALL(*producer, Produce()) .WillOnce(Return(RpcResponse( - h, "", make_error_code(::asio::error::connection_reset)))) + h, "", make_error_code(boost::asio::error::connection_reset)))) .WillOnce(Return(RpcResponse(h, server_resp.SerializeAsString()))); SharedMockConnection::SetSharedConnectionData(producer); @@ -254,9 +254,9 @@ TEST(RpcEngineTest, TestConnectionResetAndRecoverWithDelay) { ASSERT_TRUE(stat.ok()); }); - ::asio::deadline_timer timer(io_service->GetRaw()); - timer.expires_from_now(std::chrono::hours(100)); - timer.async_wait([](const asio::error_code & err){(void)err; ASSERT_FALSE("Timed out"); }); + boost::asio::deadline_timer timer(io_service->GetRaw()); + timer.expires_from_now(boost::posix_time::hours(100)); + timer.async_wait([](const boost::system::error_code & err){(void)err; ASSERT_FALSE("Timed out"); }); io_service->Run(); ASSERT_TRUE(complete); @@ -279,7 +279,7 @@ TEST(RpcEngineTest, TestConnectionFailure) std::shared_ptr engine = std::make_shared(io_service, options, "foo", "", "protocol", 1); EXPECT_CALL(*producer, Produce()) - .WillOnce(Return(std::make_pair(make_error_code(::asio::error::connection_reset), ""))); + .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))); engine->Connect("", make_endpoint(), [&complete, io_service](const Status &stat) { complete = true; @@ -306,9 +306,9 @@ TEST(RpcEngineTest, TestConnectionFailureRetryAndFailure) std::shared_ptr engine = std::make_shared(io_service, options, "foo", "", "protocol", 1); EXPECT_CALL(*producer, Produce()) - .WillOnce(Return(std::make_pair(make_error_code(::asio::error::connection_reset), ""))) - .WillOnce(Return(std::make_pair(make_error_code(::asio::error::connection_reset), ""))) - .WillOnce(Return(std::make_pair(make_error_code(::asio::error::connection_reset), ""))); + .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))) + .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))) + .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))); engine->Connect("", make_endpoint(), [&complete, io_service](const Status &stat) { complete = true; @@ -335,9 +335,9 @@ TEST(RpcEngineTest, TestConnectionFailureAndRecover) std::shared_ptr engine = std::make_shared(io_service, options, "foo", "", "protocol", 1); EXPECT_CALL(*producer, Produce()) - .WillOnce(Return(std::make_pair(make_error_code(::asio::error::connection_reset), ""))) - .WillOnce(Return(std::make_pair(::asio::error_code(), ""))) - .WillOnce(Return(std::make_pair(::asio::error::would_block, ""))); + .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))) + .WillOnce(Return(std::make_pair(boost::system::error_code(), ""))) + .WillOnce(Return(std::make_pair(boost::asio::error::would_block, ""))); engine->Connect("", make_endpoint(), [&complete, io_service](const Status &stat) { complete = true; @@ -390,8 +390,8 @@ TEST(RpcEngineTest, TestEventCallbacks) h.set_callid(1); h.set_status(RpcResponseHeaderProto::SUCCESS); EXPECT_CALL(*producer, Produce()) - .WillOnce(Return(std::make_pair(::asio::error_code(), ""))) // subverted by callback - .WillOnce(Return(std::make_pair(::asio::error_code(), ""))) + .WillOnce(Return(std::make_pair(boost::system::error_code(), ""))) // subverted by callback + .WillOnce(Return(std::make_pair(boost::system::error_code(), ""))) .WillOnce(Return(RpcResponse(h, "b"))) // subverted by callback .WillOnce(Return(RpcResponse(h, server_resp.SerializeAsString()))); SharedMockConnection::SetSharedConnectionData(producer); @@ -444,9 +444,9 @@ TEST(RpcEngineTest, TestConnectionFailureAndAsyncRecover) std::shared_ptr engine = std::make_shared(io_service, options, "foo", "", "protocol", 1); EXPECT_CALL(*producer, Produce()) - .WillOnce(Return(std::make_pair(make_error_code(::asio::error::connection_reset), ""))) - .WillOnce(Return(std::make_pair(::asio::error_code(), ""))) - .WillOnce(Return(std::make_pair(::asio::error::would_block, ""))); + .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))) + .WillOnce(Return(std::make_pair(boost::system::error_code(), ""))) + .WillOnce(Return(std::make_pair(boost::asio::error::would_block, ""))); engine->Connect("", make_endpoint(), [&complete, io_service](const Status &stat) { complete = true; @@ -454,9 +454,9 @@ TEST(RpcEngineTest, TestConnectionFailureAndAsyncRecover) ASSERT_TRUE(stat.ok()); }); - ::asio::deadline_timer timer(io_service->GetRaw()); - timer.expires_from_now(std::chrono::hours(100)); - timer.async_wait([](const asio::error_code & err){(void)err; ASSERT_FALSE("Timed out"); }); + boost::asio::deadline_timer timer(io_service->GetRaw()); + timer.expires_from_now(boost::posix_time::hours(100)); + timer.async_wait([](const boost::system::error_code & err){(void)err; ASSERT_FALSE("Timed out"); }); io_service->Run(); ASSERT_TRUE(complete); @@ -473,7 +473,7 @@ TEST(RpcEngineTest, TestTimeout) { conn->StartReading(); EXPECT_CALL(conn->TEST_get_mutable_socket(), Produce()) - .WillOnce(Return(std::make_pair(::asio::error::would_block, ""))); + .WillOnce(Return(std::make_pair(boost::asio::error::would_block, ""))); std::shared_ptr conn_ptr(conn); engine->TEST_SetRpcConnection(conn_ptr); @@ -489,9 +489,9 @@ TEST(RpcEngineTest, TestTimeout) { ASSERT_FALSE(stat.ok()); }); - ::asio::deadline_timer timer(io_service->GetRaw()); - timer.expires_from_now(std::chrono::hours(100)); - timer.async_wait([](const asio::error_code & err){(void)err; ASSERT_FALSE("Timed out"); }); + boost::asio::deadline_timer timer(io_service->GetRaw()); + timer.expires_from_now(boost::posix_time::hours(100)); + timer.async_wait([](const boost::system::error_code & err){(void)err; ASSERT_FALSE("Timed out"); }); io_service->Run(); ASSERT_TRUE(complete); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/COPYING b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/COPYING deleted file mode 100644 index e86a3819fc51e..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/COPYING +++ /dev/null @@ -1,4 +0,0 @@ -Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) - -Distributed under the Boost Software License, Version 1.0. (See accompanying -file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio.hpp deleted file mode 100644 index 1f478409c2aac..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio.hpp +++ /dev/null @@ -1,122 +0,0 @@ -// -// asio.hpp -// ~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_HPP -#define ASIO_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/async_result.hpp" -#include "asio/basic_datagram_socket.hpp" -#include "asio/basic_deadline_timer.hpp" -#include "asio/basic_io_object.hpp" -#include "asio/basic_raw_socket.hpp" -#include "asio/basic_seq_packet_socket.hpp" -#include "asio/basic_serial_port.hpp" -#include "asio/basic_signal_set.hpp" -#include "asio/basic_socket_acceptor.hpp" -#include "asio/basic_socket_iostream.hpp" -#include "asio/basic_socket_streambuf.hpp" -#include "asio/basic_stream_socket.hpp" -#include "asio/basic_streambuf.hpp" -#include "asio/basic_waitable_timer.hpp" -#include "asio/buffer.hpp" -#include "asio/buffered_read_stream_fwd.hpp" -#include "asio/buffered_read_stream.hpp" -#include "asio/buffered_stream_fwd.hpp" -#include "asio/buffered_stream.hpp" -#include "asio/buffered_write_stream_fwd.hpp" -#include "asio/buffered_write_stream.hpp" -#include "asio/buffers_iterator.hpp" -#include "asio/completion_condition.hpp" -#include "asio/connect.hpp" -#include "asio/coroutine.hpp" -#include "asio/datagram_socket_service.hpp" -#include "asio/deadline_timer_service.hpp" -#include "asio/deadline_timer.hpp" -#include "asio/error.hpp" -#include "asio/error_code.hpp" -#include "asio/generic/basic_endpoint.hpp" -#include "asio/generic/datagram_protocol.hpp" -#include "asio/generic/raw_protocol.hpp" -#include "asio/generic/seq_packet_protocol.hpp" -#include "asio/generic/stream_protocol.hpp" -#include "asio/handler_alloc_hook.hpp" -#include "asio/handler_continuation_hook.hpp" -#include "asio/handler_invoke_hook.hpp" -#include "asio/handler_type.hpp" -#include "asio/io_service.hpp" -#include "asio/ip/address.hpp" -#include "asio/ip/address_v4.hpp" -#include "asio/ip/address_v6.hpp" -#include "asio/ip/basic_endpoint.hpp" -#include "asio/ip/basic_resolver.hpp" -#include "asio/ip/basic_resolver_entry.hpp" -#include "asio/ip/basic_resolver_iterator.hpp" -#include "asio/ip/basic_resolver_query.hpp" -#include "asio/ip/host_name.hpp" -#include "asio/ip/icmp.hpp" -#include "asio/ip/multicast.hpp" -#include "asio/ip/resolver_query_base.hpp" -#include "asio/ip/resolver_service.hpp" -#include "asio/ip/tcp.hpp" -#include "asio/ip/udp.hpp" -#include "asio/ip/unicast.hpp" -#include "asio/ip/v6_only.hpp" -#include "asio/is_read_buffered.hpp" -#include "asio/is_write_buffered.hpp" -#include "asio/local/basic_endpoint.hpp" -#include "asio/local/connect_pair.hpp" -#include "asio/local/datagram_protocol.hpp" -#include "asio/local/stream_protocol.hpp" -#include "asio/placeholders.hpp" -#include "asio/posix/basic_descriptor.hpp" -#include "asio/posix/basic_stream_descriptor.hpp" -#include "asio/posix/descriptor_base.hpp" -#include "asio/posix/stream_descriptor.hpp" -#include "asio/posix/stream_descriptor_service.hpp" -#include "asio/raw_socket_service.hpp" -#include "asio/read.hpp" -#include "asio/read_at.hpp" -#include "asio/read_until.hpp" -#include "asio/seq_packet_socket_service.hpp" -#include "asio/serial_port.hpp" -#include "asio/serial_port_base.hpp" -#include "asio/serial_port_service.hpp" -#include "asio/signal_set.hpp" -#include "asio/signal_set_service.hpp" -#include "asio/socket_acceptor_service.hpp" -#include "asio/socket_base.hpp" -#include "asio/strand.hpp" -#include "asio/stream_socket_service.hpp" -#include "asio/streambuf.hpp" -#include "asio/system_error.hpp" -#include "asio/thread.hpp" -#include "asio/time_traits.hpp" -#include "asio/version.hpp" -#include "asio/wait_traits.hpp" -#include "asio/waitable_timer_service.hpp" -#include "asio/windows/basic_handle.hpp" -#include "asio/windows/basic_object_handle.hpp" -#include "asio/windows/basic_random_access_handle.hpp" -#include "asio/windows/basic_stream_handle.hpp" -#include "asio/windows/object_handle.hpp" -#include "asio/windows/object_handle_service.hpp" -#include "asio/windows/overlapped_ptr.hpp" -#include "asio/windows/random_access_handle.hpp" -#include "asio/windows/random_access_handle_service.hpp" -#include "asio/windows/stream_handle.hpp" -#include "asio/windows/stream_handle_service.hpp" -#include "asio/write.hpp" -#include "asio/write_at.hpp" - -#endif // ASIO_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/async_result.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/async_result.hpp deleted file mode 100644 index b98d7703385fe..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/async_result.hpp +++ /dev/null @@ -1,94 +0,0 @@ -// -// async_result.hpp -// ~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_ASYNC_RESULT_HPP -#define ASIO_ASYNC_RESULT_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include "asio/handler_type.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// An interface for customising the behaviour of an initiating function. -/** - * This template may be specialised for user-defined handler types. - */ -template -class async_result -{ -public: - /// The return type of the initiating function. - typedef void type; - - /// Construct an async result from a given handler. - /** - * When using a specalised async_result, the constructor has an opportunity - * to initialise some state associated with the handler, which is then - * returned from the initiating function. - */ - explicit async_result(Handler&) - { - } - - /// Obtain the value to be returned from the initiating function. - type get() - { - } -}; - -namespace detail { - -// Helper template to deduce the true type of a handler, capture a local copy -// of the handler, and then create an async_result for the handler. -template -struct async_result_init -{ - explicit async_result_init(ASIO_MOVE_ARG(Handler) orig_handler) - : handler(ASIO_MOVE_CAST(Handler)(orig_handler)), - result(handler) - { - } - - typename handler_type::type handler; - async_result::type> result; -}; - -template -struct async_result_type_helper -{ - typedef typename async_result< - typename handler_type::type - >::type type; -}; - -} // namespace detail -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#if defined(GENERATING_DOCUMENTATION) -# define ASIO_INITFN_RESULT_TYPE(h, sig) \ - void_or_deduced -#elif defined(_MSC_VER) && (_MSC_VER < 1500) -# define ASIO_INITFN_RESULT_TYPE(h, sig) \ - typename ::asio::detail::async_result_type_helper::type -#else -# define ASIO_INITFN_RESULT_TYPE(h, sig) \ - typename ::asio::async_result< \ - typename ::asio::handler_type::type>::type -#endif - -#endif // ASIO_ASYNC_RESULT_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_datagram_socket.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_datagram_socket.hpp deleted file mode 100644 index a1356b90fba57..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_datagram_socket.hpp +++ /dev/null @@ -1,949 +0,0 @@ -// -// basic_datagram_socket.hpp -// ~~~~~~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_DATAGRAM_SOCKET_HPP -#define ASIO_BASIC_DATAGRAM_SOCKET_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include -#include "asio/basic_socket.hpp" -#include "asio/datagram_socket_service.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/detail/type_traits.hpp" -#include "asio/error.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides datagram-oriented socket functionality. -/** - * The basic_datagram_socket class template provides asynchronous and blocking - * datagram-oriented socket functionality. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - */ -template > -class basic_datagram_socket - : public basic_socket -{ -public: - /// (Deprecated: Use native_handle_type.) The native representation of a - /// socket. - typedef typename DatagramSocketService::native_handle_type native_type; - - /// The native representation of a socket. - typedef typename DatagramSocketService::native_handle_type native_handle_type; - - /// The protocol type. - typedef Protocol protocol_type; - - /// The endpoint type. - typedef typename Protocol::endpoint endpoint_type; - - /// Construct a basic_datagram_socket without opening it. - /** - * This constructor creates a datagram socket without opening it. The open() - * function must be called before data can be sent or received on the socket. - * - * @param io_service The io_service object that the datagram socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - */ - explicit basic_datagram_socket(asio::io_service& io_service) - : basic_socket(io_service) - { - } - - /// Construct and open a basic_datagram_socket. - /** - * This constructor creates and opens a datagram socket. - * - * @param io_service The io_service object that the datagram socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @throws asio::system_error Thrown on failure. - */ - basic_datagram_socket(asio::io_service& io_service, - const protocol_type& protocol) - : basic_socket(io_service, protocol) - { - } - - /// Construct a basic_datagram_socket, opening it and binding it to the given - /// local endpoint. - /** - * This constructor creates a datagram socket and automatically opens it bound - * to the specified endpoint on the local machine. The protocol used is the - * protocol associated with the given endpoint. - * - * @param io_service The io_service object that the datagram socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - * - * @param endpoint An endpoint on the local machine to which the datagram - * socket will be bound. - * - * @throws asio::system_error Thrown on failure. - */ - basic_datagram_socket(asio::io_service& io_service, - const endpoint_type& endpoint) - : basic_socket(io_service, endpoint) - { - } - - /// Construct a basic_datagram_socket on an existing native socket. - /** - * This constructor creates a datagram socket object to hold an existing - * native socket. - * - * @param io_service The io_service object that the datagram socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @param native_socket The new underlying socket implementation. - * - * @throws asio::system_error Thrown on failure. - */ - basic_datagram_socket(asio::io_service& io_service, - const protocol_type& protocol, const native_handle_type& native_socket) - : basic_socket( - io_service, protocol, native_socket) - { - } - -#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_datagram_socket from another. - /** - * This constructor moves a datagram socket from one object to another. - * - * @param other The other basic_datagram_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_datagram_socket(io_service&) constructor. - */ - basic_datagram_socket(basic_datagram_socket&& other) - : basic_socket( - ASIO_MOVE_CAST(basic_datagram_socket)(other)) - { - } - - /// Move-assign a basic_datagram_socket from another. - /** - * This assignment operator moves a datagram socket from one object to - * another. - * - * @param other The other basic_datagram_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_datagram_socket(io_service&) constructor. - */ - basic_datagram_socket& operator=(basic_datagram_socket&& other) - { - basic_socket::operator=( - ASIO_MOVE_CAST(basic_datagram_socket)(other)); - return *this; - } - - /// Move-construct a basic_datagram_socket from a socket of another protocol - /// type. - /** - * This constructor moves a datagram socket from one object to another. - * - * @param other The other basic_datagram_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_datagram_socket(io_service&) constructor. - */ - template - basic_datagram_socket( - basic_datagram_socket&& other, - typename enable_if::value>::type* = 0) - : basic_socket( - ASIO_MOVE_CAST2(basic_datagram_socket< - Protocol1, DatagramSocketService1>)(other)) - { - } - - /// Move-assign a basic_datagram_socket from a socket of another protocol - /// type. - /** - * This assignment operator moves a datagram socket from one object to - * another. - * - * @param other The other basic_datagram_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_datagram_socket(io_service&) constructor. - */ - template - typename enable_if::value, - basic_datagram_socket>::type& operator=( - basic_datagram_socket&& other) - { - basic_socket::operator=( - ASIO_MOVE_CAST2(basic_datagram_socket< - Protocol1, DatagramSocketService1>)(other)); - return *this; - } -#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - - /// Send some data on a connected socket. - /** - * This function is used to send data on the datagram socket. The function - * call will block until the data has been sent successfully or an error - * occurs. - * - * @param buffers One ore more data buffers to be sent on the socket. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @note The send operation can only be used with a connected socket. Use - * the send_to function to send data on an unconnected datagram socket. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code socket.send(asio::buffer(data, size)); @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t send(const ConstBufferSequence& buffers) - { - asio::error_code ec; - std::size_t s = this->get_service().send( - this->get_implementation(), buffers, 0, ec); - asio::detail::throw_error(ec, "send"); - return s; - } - - /// Send some data on a connected socket. - /** - * This function is used to send data on the datagram socket. The function - * call will block until the data has been sent successfully or an error - * occurs. - * - * @param buffers One ore more data buffers to be sent on the socket. - * - * @param flags Flags specifying how the send call is to be made. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @note The send operation can only be used with a connected socket. Use - * the send_to function to send data on an unconnected datagram socket. - */ - template - std::size_t send(const ConstBufferSequence& buffers, - socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().send( - this->get_implementation(), buffers, flags, ec); - asio::detail::throw_error(ec, "send"); - return s; - } - - /// Send some data on a connected socket. - /** - * This function is used to send data on the datagram socket. The function - * call will block until the data has been sent successfully or an error - * occurs. - * - * @param buffers One or more data buffers to be sent on the socket. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes sent. - * - * @note The send operation can only be used with a connected socket. Use - * the send_to function to send data on an unconnected datagram socket. - */ - template - std::size_t send(const ConstBufferSequence& buffers, - socket_base::message_flags flags, asio::error_code& ec) - { - return this->get_service().send( - this->get_implementation(), buffers, flags, ec); - } - - /// Start an asynchronous send on a connected socket. - /** - * This function is used to asynchronously send data on the datagram socket. - * The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent on the socket. Although - * the buffers object may be copied as necessary, ownership of the underlying - * memory blocks is retained by the caller, which must guarantee that they - * remain valid until the handler is called. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_send operation can only be used with a connected socket. - * Use the async_send_to function to send data on an unconnected datagram - * socket. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * socket.async_send(asio::buffer(data, size), handler); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send(const ConstBufferSequence& buffers, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send(this->get_implementation(), - buffers, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Start an asynchronous send on a connected socket. - /** - * This function is used to asynchronously send data on the datagram socket. - * The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent on the socket. Although - * the buffers object may be copied as necessary, ownership of the underlying - * memory blocks is retained by the caller, which must guarantee that they - * remain valid until the handler is called. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_send operation can only be used with a connected socket. - * Use the async_send_to function to send data on an unconnected datagram - * socket. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send(const ConstBufferSequence& buffers, - socket_base::message_flags flags, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send(this->get_implementation(), - buffers, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Send a datagram to the specified endpoint. - /** - * This function is used to send a datagram to the specified remote endpoint. - * The function call will block until the data has been sent successfully or - * an error occurs. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * - * @param destination The remote endpoint to which the data will be sent. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * asio::ip::udp::endpoint destination( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * socket.send_to(asio::buffer(data, size), destination); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination) - { - asio::error_code ec; - std::size_t s = this->get_service().send_to( - this->get_implementation(), buffers, destination, 0, ec); - asio::detail::throw_error(ec, "send_to"); - return s; - } - - /// Send a datagram to the specified endpoint. - /** - * This function is used to send a datagram to the specified remote endpoint. - * The function call will block until the data has been sent successfully or - * an error occurs. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * - * @param destination The remote endpoint to which the data will be sent. - * - * @param flags Flags specifying how the send call is to be made. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - */ - template - std::size_t send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().send_to( - this->get_implementation(), buffers, destination, flags, ec); - asio::detail::throw_error(ec, "send_to"); - return s; - } - - /// Send a datagram to the specified endpoint. - /** - * This function is used to send a datagram to the specified remote endpoint. - * The function call will block until the data has been sent successfully or - * an error occurs. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * - * @param destination The remote endpoint to which the data will be sent. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes sent. - */ - template - std::size_t send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, socket_base::message_flags flags, - asio::error_code& ec) - { - return this->get_service().send_to(this->get_implementation(), - buffers, destination, flags, ec); - } - - /// Start an asynchronous send. - /** - * This function is used to asynchronously send a datagram to the specified - * remote endpoint. The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param destination The remote endpoint to which the data will be sent. - * Copies will be made of the endpoint as required. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * asio::ip::udp::endpoint destination( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * socket.async_send_to( - * asio::buffer(data, size), destination, handler); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send_to( - this->get_implementation(), buffers, destination, 0, - ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Start an asynchronous send. - /** - * This function is used to asynchronously send a datagram to the specified - * remote endpoint. The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param destination The remote endpoint to which the data will be sent. - * Copies will be made of the endpoint as required. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, socket_base::message_flags flags, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send_to( - this->get_implementation(), buffers, destination, flags, - ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the datagram socket. The function - * call will block until data has been received successfully or an error - * occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - * - * @note The receive operation can only be used with a connected socket. Use - * the receive_from function to receive data on an unconnected datagram - * socket. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code socket.receive(asio::buffer(data, size)); @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t receive(const MutableBufferSequence& buffers) - { - asio::error_code ec; - std::size_t s = this->get_service().receive( - this->get_implementation(), buffers, 0, ec); - asio::detail::throw_error(ec, "receive"); - return s; - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the datagram socket. The function - * call will block until data has been received successfully or an error - * occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - * - * @note The receive operation can only be used with a connected socket. Use - * the receive_from function to receive data on an unconnected datagram - * socket. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().receive( - this->get_implementation(), buffers, flags, ec); - asio::detail::throw_error(ec, "receive"); - return s; - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the datagram socket. The function - * call will block until data has been received successfully or an error - * occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes received. - * - * @note The receive operation can only be used with a connected socket. Use - * the receive_from function to receive data on an unconnected datagram - * socket. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags flags, asio::error_code& ec) - { - return this->get_service().receive( - this->get_implementation(), buffers, flags, ec); - } - - /// Start an asynchronous receive on a connected socket. - /** - * This function is used to asynchronously receive data from the datagram - * socket. The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_receive operation can only be used with a connected socket. - * Use the async_receive_from function to receive data on an unconnected - * datagram socket. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * socket.async_receive(asio::buffer(data, size), handler); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive(const MutableBufferSequence& buffers, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive(this->get_implementation(), - buffers, 0, ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Start an asynchronous receive on a connected socket. - /** - * This function is used to asynchronously receive data from the datagram - * socket. The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_receive operation can only be used with a connected socket. - * Use the async_receive_from function to receive data on an unconnected - * datagram socket. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive(const MutableBufferSequence& buffers, - socket_base::message_flags flags, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive(this->get_implementation(), - buffers, flags, ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Receive a datagram with the endpoint of the sender. - /** - * This function is used to receive a datagram. The function call will block - * until data has been received successfully or an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the datagram. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * asio::ip::udp::endpoint sender_endpoint; - * socket.receive_from( - * asio::buffer(data, size), sender_endpoint); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint) - { - asio::error_code ec; - std::size_t s = this->get_service().receive_from( - this->get_implementation(), buffers, sender_endpoint, 0, ec); - asio::detail::throw_error(ec, "receive_from"); - return s; - } - - /// Receive a datagram with the endpoint of the sender. - /** - * This function is used to receive a datagram. The function call will block - * until data has been received successfully or an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the datagram. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - */ - template - std::size_t receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().receive_from( - this->get_implementation(), buffers, sender_endpoint, flags, ec); - asio::detail::throw_error(ec, "receive_from"); - return s; - } - - /// Receive a datagram with the endpoint of the sender. - /** - * This function is used to receive a datagram. The function call will block - * until data has been received successfully or an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the datagram. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes received. - */ - template - std::size_t receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, socket_base::message_flags flags, - asio::error_code& ec) - { - return this->get_service().receive_from(this->get_implementation(), - buffers, sender_endpoint, flags, ec); - } - - /// Start an asynchronous receive. - /** - * This function is used to asynchronously receive a datagram. The function - * call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the datagram. Ownership of the sender_endpoint object - * is retained by the caller, which must guarantee that it is valid until the - * handler is called. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code socket.async_receive_from( - * asio::buffer(data, size), sender_endpoint, handler); @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive_from( - this->get_implementation(), buffers, sender_endpoint, 0, - ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Start an asynchronous receive. - /** - * This function is used to asynchronously receive a datagram. The function - * call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the datagram. Ownership of the sender_endpoint object - * is retained by the caller, which must guarantee that it is valid until the - * handler is called. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, socket_base::message_flags flags, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive_from( - this->get_implementation(), buffers, sender_endpoint, flags, - ASIO_MOVE_CAST(ReadHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_DATAGRAM_SOCKET_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_deadline_timer.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_deadline_timer.hpp deleted file mode 100644 index d0fc371508075..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_deadline_timer.hpp +++ /dev/null @@ -1,520 +0,0 @@ -// -// basic_deadline_timer.hpp -// ~~~~~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_DEADLINE_TIMER_HPP -#define ASIO_BASIC_DEADLINE_TIMER_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" - -#if defined(ASIO_HAS_BOOST_DATE_TIME) \ - || defined(ASIO_CPP11_DATE_TIME) \ - || defined(GENERATING_DOCUMENTATION) - -#include -#include "asio/basic_io_object.hpp" -#include "asio/deadline_timer_service.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/error.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides waitable timer functionality. -/** - * The basic_deadline_timer class template provides the ability to perform a - * blocking or asynchronous wait for a timer to expire. - * - * A deadline timer is always in one of two states: "expired" or "not expired". - * If the wait() or async_wait() function is called on an expired timer, the - * wait operation will complete immediately. - * - * Most applications will use the asio::deadline_timer typedef. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - * - * @par Examples - * Performing a blocking wait: - * @code - * // Construct a timer without setting an expiry time. - * asio::deadline_timer timer(io_service); - * - * // Set an expiry time relative to now. - * timer.expires_from_now(boost::posix_time::seconds(5)); - * - * // Wait for the timer to expire. - * timer.wait(); - * @endcode - * - * @par - * Performing an asynchronous wait: - * @code - * void handler(const asio::error_code& error) - * { - * if (!error) - * { - * // Timer expired. - * } - * } - * - * ... - * - * // Construct a timer with an absolute expiry time. - * asio::deadline_timer timer(io_service, - * boost::posix_time::time_from_string("2005-12-07 23:59:59.000")); - * - * // Start an asynchronous wait. - * timer.async_wait(handler); - * @endcode - * - * @par Changing an active deadline_timer's expiry time - * - * Changing the expiry time of a timer while there are pending asynchronous - * waits causes those wait operations to be cancelled. To ensure that the action - * associated with the timer is performed only once, use something like this: - * used: - * - * @code - * void on_some_event() - * { - * if (my_timer.expires_from_now(seconds(5)) > 0) - * { - * // We managed to cancel the timer. Start new asynchronous wait. - * my_timer.async_wait(on_timeout); - * } - * else - * { - * // Too late, timer has already expired! - * } - * } - * - * void on_timeout(const asio::error_code& e) - * { - * if (e != asio::error::operation_aborted) - * { - * // Timer was not cancelled, take necessary action. - * } - * } - * @endcode - * - * @li The asio::basic_deadline_timer::expires_from_now() function - * cancels any pending asynchronous waits, and returns the number of - * asynchronous waits that were cancelled. If it returns 0 then you were too - * late and the wait handler has already been executed, or will soon be - * executed. If it returns 1 then the wait handler was successfully cancelled. - * - * @li If a wait handler is cancelled, the asio::error_code passed to - * it contains the value asio::error::operation_aborted. - */ -template , - typename TimerService = deadline_timer_service > -class basic_deadline_timer - : public basic_io_object -{ -public: - /// The time traits type. - typedef TimeTraits traits_type; - - /// The time type. - typedef typename traits_type::time_type time_type; - - /// The duration type. - typedef typename traits_type::duration_type duration_type; - - /// Constructor. - /** - * This constructor creates a timer without setting an expiry time. The - * expires_at() or expires_from_now() functions must be called to set an - * expiry time before the timer can be waited on. - * - * @param io_service The io_service object that the timer will use to dispatch - * handlers for any asynchronous operations performed on the timer. - */ - explicit basic_deadline_timer(asio::io_service& io_service) - : basic_io_object(io_service) - { - } - - /// Constructor to set a particular expiry time as an absolute time. - /** - * This constructor creates a timer and sets the expiry time. - * - * @param io_service The io_service object that the timer will use to dispatch - * handlers for any asynchronous operations performed on the timer. - * - * @param expiry_time The expiry time to be used for the timer, expressed - * as an absolute time. - */ - basic_deadline_timer(asio::io_service& io_service, - const time_type& expiry_time) - : basic_io_object(io_service) - { - asio::error_code ec; - this->service.expires_at(this->implementation, expiry_time, ec); - asio::detail::throw_error(ec, "expires_at"); - } - - /// Constructor to set a particular expiry time relative to now. - /** - * This constructor creates a timer and sets the expiry time. - * - * @param io_service The io_service object that the timer will use to dispatch - * handlers for any asynchronous operations performed on the timer. - * - * @param expiry_time The expiry time to be used for the timer, relative to - * now. - */ - basic_deadline_timer(asio::io_service& io_service, - const duration_type& expiry_time) - : basic_io_object(io_service) - { - asio::error_code ec; - this->service.expires_from_now(this->implementation, expiry_time, ec); - asio::detail::throw_error(ec, "expires_from_now"); - } - - /// Cancel any asynchronous operations that are waiting on the timer. - /** - * This function forces the completion of any pending asynchronous wait - * operations against the timer. The handler for each cancelled operation will - * be invoked with the asio::error::operation_aborted error code. - * - * Cancelling the timer does not change the expiry time. - * - * @return The number of asynchronous operations that were cancelled. - * - * @throws asio::system_error Thrown on failure. - * - * @note If the timer has already expired when cancel() is called, then the - * handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t cancel() - { - asio::error_code ec; - std::size_t s = this->service.cancel(this->implementation, ec); - asio::detail::throw_error(ec, "cancel"); - return s; - } - - /// Cancel any asynchronous operations that are waiting on the timer. - /** - * This function forces the completion of any pending asynchronous wait - * operations against the timer. The handler for each cancelled operation will - * be invoked with the asio::error::operation_aborted error code. - * - * Cancelling the timer does not change the expiry time. - * - * @param ec Set to indicate what error occurred, if any. - * - * @return The number of asynchronous operations that were cancelled. - * - * @note If the timer has already expired when cancel() is called, then the - * handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t cancel(asio::error_code& ec) - { - return this->service.cancel(this->implementation, ec); - } - - /// Cancels one asynchronous operation that is waiting on the timer. - /** - * This function forces the completion of one pending asynchronous wait - * operation against the timer. Handlers are cancelled in FIFO order. The - * handler for the cancelled operation will be invoked with the - * asio::error::operation_aborted error code. - * - * Cancelling the timer does not change the expiry time. - * - * @return The number of asynchronous operations that were cancelled. That is, - * either 0 or 1. - * - * @throws asio::system_error Thrown on failure. - * - * @note If the timer has already expired when cancel_one() is called, then - * the handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t cancel_one() - { - asio::error_code ec; - std::size_t s = this->service.cancel_one(this->implementation, ec); - asio::detail::throw_error(ec, "cancel_one"); - return s; - } - - /// Cancels one asynchronous operation that is waiting on the timer. - /** - * This function forces the completion of one pending asynchronous wait - * operation against the timer. Handlers are cancelled in FIFO order. The - * handler for the cancelled operation will be invoked with the - * asio::error::operation_aborted error code. - * - * Cancelling the timer does not change the expiry time. - * - * @param ec Set to indicate what error occurred, if any. - * - * @return The number of asynchronous operations that were cancelled. That is, - * either 0 or 1. - * - * @note If the timer has already expired when cancel_one() is called, then - * the handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t cancel_one(asio::error_code& ec) - { - return this->service.cancel_one(this->implementation, ec); - } - - /// Get the timer's expiry time as an absolute time. - /** - * This function may be used to obtain the timer's current expiry time. - * Whether the timer has expired or not does not affect this value. - */ - time_type expires_at() const - { - return this->service.expires_at(this->implementation); - } - - /// Set the timer's expiry time as an absolute time. - /** - * This function sets the expiry time. Any pending asynchronous wait - * operations will be cancelled. The handler for each cancelled operation will - * be invoked with the asio::error::operation_aborted error code. - * - * @param expiry_time The expiry time to be used for the timer. - * - * @return The number of asynchronous operations that were cancelled. - * - * @throws asio::system_error Thrown on failure. - * - * @note If the timer has already expired when expires_at() is called, then - * the handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t expires_at(const time_type& expiry_time) - { - asio::error_code ec; - std::size_t s = this->service.expires_at( - this->implementation, expiry_time, ec); - asio::detail::throw_error(ec, "expires_at"); - return s; - } - - /// Set the timer's expiry time as an absolute time. - /** - * This function sets the expiry time. Any pending asynchronous wait - * operations will be cancelled. The handler for each cancelled operation will - * be invoked with the asio::error::operation_aborted error code. - * - * @param expiry_time The expiry time to be used for the timer. - * - * @param ec Set to indicate what error occurred, if any. - * - * @return The number of asynchronous operations that were cancelled. - * - * @note If the timer has already expired when expires_at() is called, then - * the handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t expires_at(const time_type& expiry_time, - asio::error_code& ec) - { - return this->service.expires_at(this->implementation, expiry_time, ec); - } - - /// Get the timer's expiry time relative to now. - /** - * This function may be used to obtain the timer's current expiry time. - * Whether the timer has expired or not does not affect this value. - */ - duration_type expires_from_now() const - { - return this->service.expires_from_now(this->implementation); - } - - /// Set the timer's expiry time relative to now. - /** - * This function sets the expiry time. Any pending asynchronous wait - * operations will be cancelled. The handler for each cancelled operation will - * be invoked with the asio::error::operation_aborted error code. - * - * @param expiry_time The expiry time to be used for the timer. - * - * @return The number of asynchronous operations that were cancelled. - * - * @throws asio::system_error Thrown on failure. - * - * @note If the timer has already expired when expires_from_now() is called, - * then the handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t expires_from_now(const duration_type& expiry_time) - { - asio::error_code ec; - std::size_t s = this->service.expires_from_now( - this->implementation, expiry_time, ec); - asio::detail::throw_error(ec, "expires_from_now"); - return s; - } - - /// Set the timer's expiry time relative to now. - /** - * This function sets the expiry time. Any pending asynchronous wait - * operations will be cancelled. The handler for each cancelled operation will - * be invoked with the asio::error::operation_aborted error code. - * - * @param expiry_time The expiry time to be used for the timer. - * - * @param ec Set to indicate what error occurred, if any. - * - * @return The number of asynchronous operations that were cancelled. - * - * @note If the timer has already expired when expires_from_now() is called, - * then the handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t expires_from_now(const duration_type& expiry_time, - asio::error_code& ec) - { - return this->service.expires_from_now( - this->implementation, expiry_time, ec); - } - - /// Perform a blocking wait on the timer. - /** - * This function is used to wait for the timer to expire. This function - * blocks and does not return until the timer has expired. - * - * @throws asio::system_error Thrown on failure. - */ - void wait() - { - asio::error_code ec; - this->service.wait(this->implementation, ec); - asio::detail::throw_error(ec, "wait"); - } - - /// Perform a blocking wait on the timer. - /** - * This function is used to wait for the timer to expire. This function - * blocks and does not return until the timer has expired. - * - * @param ec Set to indicate what error occurred, if any. - */ - void wait(asio::error_code& ec) - { - this->service.wait(this->implementation, ec); - } - - /// Start an asynchronous wait on the timer. - /** - * This function may be used to initiate an asynchronous wait against the - * timer. It always returns immediately. - * - * For each call to async_wait(), the supplied handler will be called exactly - * once. The handler will be called when: - * - * @li The timer has expired. - * - * @li The timer was cancelled, in which case the handler is passed the error - * code asio::error::operation_aborted. - * - * @param handler The handler to be called when the timer expires. Copies - * will be made of the handler as required. The function signature of the - * handler must be: - * @code void handler( - * const asio::error_code& error // Result of operation. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(WaitHandler, - void (asio::error_code)) - async_wait(ASIO_MOVE_ARG(WaitHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WaitHandler. - ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check; - - return this->service.async_wait(this->implementation, - ASIO_MOVE_CAST(WaitHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // defined(ASIO_HAS_BOOST_DATE_TIME) - // || defined(ASIO_CPP11_DATE_TIME) - // || defined(GENERATING_DOCUMENTATION) - -#endif // ASIO_BASIC_DEADLINE_TIMER_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_io_object.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_io_object.hpp deleted file mode 100644 index 6154d92f3616d..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_io_object.hpp +++ /dev/null @@ -1,240 +0,0 @@ -// -// basic_io_object.hpp -// ~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_IO_OBJECT_HPP -#define ASIO_BASIC_IO_OBJECT_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include "asio/io_service.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -#if defined(ASIO_HAS_MOVE) -namespace detail -{ - // Type trait used to determine whether a service supports move. - template - class service_has_move - { - private: - typedef IoObjectService service_type; - typedef typename service_type::implementation_type implementation_type; - - template - static auto eval(T* t, U* u) -> decltype(t->move_construct(*u, *u), char()); - static char (&eval(...))[2]; - - public: - static const bool value = - sizeof(service_has_move::eval( - static_cast(0), - static_cast(0))) == 1; - }; -} -#endif // defined(ASIO_HAS_MOVE) - -/// Base class for all I/O objects. -/** - * @note All I/O objects are non-copyable. However, when using C++0x, certain - * I/O objects do support move construction and move assignment. - */ -#if !defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) -template -#else -template ::value> -#endif -class basic_io_object -{ -public: - /// The type of the service that will be used to provide I/O operations. - typedef IoObjectService service_type; - - /// The underlying implementation type of I/O object. - typedef typename service_type::implementation_type implementation_type; - - /// Get the io_service associated with the object. - /** - * This function may be used to obtain the io_service object that the I/O - * object uses to dispatch handlers for asynchronous operations. - * - * @return A reference to the io_service object that the I/O object will use - * to dispatch handlers. Ownership is not transferred to the caller. - */ - asio::io_service& get_io_service() - { - return service.get_io_service(); - } - -protected: - /// Construct a basic_io_object. - /** - * Performs: - * @code get_service().construct(get_implementation()); @endcode - */ - explicit basic_io_object(asio::io_service& io_service) - : service(asio::use_service(io_service)) - { - service.construct(implementation); - } - -#if defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_io_object. - /** - * Performs: - * @code get_service().move_construct( - * get_implementation(), other.get_implementation()); @endcode - * - * @note Available only for services that support movability, - */ - basic_io_object(basic_io_object&& other); - - /// Move-assign a basic_io_object. - /** - * Performs: - * @code get_service().move_assign(get_implementation(), - * other.get_service(), other.get_implementation()); @endcode - * - * @note Available only for services that support movability, - */ - basic_io_object& operator=(basic_io_object&& other); -#endif // defined(GENERATING_DOCUMENTATION) - - /// Protected destructor to prevent deletion through this type. - /** - * Performs: - * @code get_service().destroy(get_implementation()); @endcode - */ - ~basic_io_object() - { - service.destroy(implementation); - } - - /// Get the service associated with the I/O object. - service_type& get_service() - { - return service; - } - - /// Get the service associated with the I/O object. - const service_type& get_service() const - { - return service; - } - - /// (Deprecated: Use get_service().) The service associated with the I/O - /// object. - /** - * @note Available only for services that do not support movability. - */ - service_type& service; - - /// Get the underlying implementation of the I/O object. - implementation_type& get_implementation() - { - return implementation; - } - - /// Get the underlying implementation of the I/O object. - const implementation_type& get_implementation() const - { - return implementation; - } - - /// (Deprecated: Use get_implementation().) The underlying implementation of - /// the I/O object. - implementation_type implementation; - -private: - basic_io_object(const basic_io_object&); - basic_io_object& operator=(const basic_io_object&); -}; - -#if defined(ASIO_HAS_MOVE) -// Specialisation for movable objects. -template -class basic_io_object -{ -public: - typedef IoObjectService service_type; - typedef typename service_type::implementation_type implementation_type; - - asio::io_service& get_io_service() - { - return service_->get_io_service(); - } - -protected: - explicit basic_io_object(asio::io_service& io_service) - : service_(&asio::use_service(io_service)) - { - service_->construct(implementation); - } - - basic_io_object(basic_io_object&& other) - : service_(&other.get_service()) - { - service_->move_construct(implementation, other.implementation); - } - - ~basic_io_object() - { - service_->destroy(implementation); - } - - basic_io_object& operator=(basic_io_object&& other) - { - service_->move_assign(implementation, - *other.service_, other.implementation); - service_ = other.service_; - return *this; - } - - service_type& get_service() - { - return *service_; - } - - const service_type& get_service() const - { - return *service_; - } - - implementation_type& get_implementation() - { - return implementation; - } - - const implementation_type& get_implementation() const - { - return implementation; - } - - implementation_type implementation; - -private: - basic_io_object(const basic_io_object&); - void operator=(const basic_io_object&); - - IoObjectService* service_; -}; -#endif // defined(ASIO_HAS_MOVE) - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_IO_OBJECT_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_raw_socket.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_raw_socket.hpp deleted file mode 100644 index b0f3f186b2b57..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_raw_socket.hpp +++ /dev/null @@ -1,940 +0,0 @@ -// -// basic_raw_socket.hpp -// ~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_RAW_SOCKET_HPP -#define ASIO_BASIC_RAW_SOCKET_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include -#include "asio/basic_socket.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/detail/type_traits.hpp" -#include "asio/error.hpp" -#include "asio/raw_socket_service.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides raw-oriented socket functionality. -/** - * The basic_raw_socket class template provides asynchronous and blocking - * raw-oriented socket functionality. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - */ -template > -class basic_raw_socket - : public basic_socket -{ -public: - /// (Deprecated: Use native_handle_type.) The native representation of a - /// socket. - typedef typename RawSocketService::native_handle_type native_type; - - /// The native representation of a socket. - typedef typename RawSocketService::native_handle_type native_handle_type; - - /// The protocol type. - typedef Protocol protocol_type; - - /// The endpoint type. - typedef typename Protocol::endpoint endpoint_type; - - /// Construct a basic_raw_socket without opening it. - /** - * This constructor creates a raw socket without opening it. The open() - * function must be called before data can be sent or received on the socket. - * - * @param io_service The io_service object that the raw socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - */ - explicit basic_raw_socket(asio::io_service& io_service) - : basic_socket(io_service) - { - } - - /// Construct and open a basic_raw_socket. - /** - * This constructor creates and opens a raw socket. - * - * @param io_service The io_service object that the raw socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @throws asio::system_error Thrown on failure. - */ - basic_raw_socket(asio::io_service& io_service, - const protocol_type& protocol) - : basic_socket(io_service, protocol) - { - } - - /// Construct a basic_raw_socket, opening it and binding it to the given - /// local endpoint. - /** - * This constructor creates a raw socket and automatically opens it bound - * to the specified endpoint on the local machine. The protocol used is the - * protocol associated with the given endpoint. - * - * @param io_service The io_service object that the raw socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - * - * @param endpoint An endpoint on the local machine to which the raw - * socket will be bound. - * - * @throws asio::system_error Thrown on failure. - */ - basic_raw_socket(asio::io_service& io_service, - const endpoint_type& endpoint) - : basic_socket(io_service, endpoint) - { - } - - /// Construct a basic_raw_socket on an existing native socket. - /** - * This constructor creates a raw socket object to hold an existing - * native socket. - * - * @param io_service The io_service object that the raw socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @param native_socket The new underlying socket implementation. - * - * @throws asio::system_error Thrown on failure. - */ - basic_raw_socket(asio::io_service& io_service, - const protocol_type& protocol, const native_handle_type& native_socket) - : basic_socket( - io_service, protocol, native_socket) - { - } - -#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_raw_socket from another. - /** - * This constructor moves a raw socket from one object to another. - * - * @param other The other basic_raw_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_raw_socket(io_service&) constructor. - */ - basic_raw_socket(basic_raw_socket&& other) - : basic_socket( - ASIO_MOVE_CAST(basic_raw_socket)(other)) - { - } - - /// Move-assign a basic_raw_socket from another. - /** - * This assignment operator moves a raw socket from one object to another. - * - * @param other The other basic_raw_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_raw_socket(io_service&) constructor. - */ - basic_raw_socket& operator=(basic_raw_socket&& other) - { - basic_socket::operator=( - ASIO_MOVE_CAST(basic_raw_socket)(other)); - return *this; - } - - /// Move-construct a basic_raw_socket from a socket of another protocol type. - /** - * This constructor moves a raw socket from one object to another. - * - * @param other The other basic_raw_socket object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_raw_socket(io_service&) constructor. - */ - template - basic_raw_socket(basic_raw_socket&& other, - typename enable_if::value>::type* = 0) - : basic_socket( - ASIO_MOVE_CAST2(basic_raw_socket< - Protocol1, RawSocketService1>)(other)) - { - } - - /// Move-assign a basic_raw_socket from a socket of another protocol type. - /** - * This assignment operator moves a raw socket from one object to another. - * - * @param other The other basic_raw_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_raw_socket(io_service&) constructor. - */ - template - typename enable_if::value, - basic_raw_socket>::type& operator=( - basic_raw_socket&& other) - { - basic_socket::operator=( - ASIO_MOVE_CAST2(basic_raw_socket< - Protocol1, RawSocketService1>)(other)); - return *this; - } -#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - - /// Send some data on a connected socket. - /** - * This function is used to send data on the raw socket. The function call - * will block until the data has been sent successfully or an error occurs. - * - * @param buffers One ore more data buffers to be sent on the socket. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @note The send operation can only be used with a connected socket. Use - * the send_to function to send data on an unconnected raw socket. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code socket.send(asio::buffer(data, size)); @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t send(const ConstBufferSequence& buffers) - { - asio::error_code ec; - std::size_t s = this->get_service().send( - this->get_implementation(), buffers, 0, ec); - asio::detail::throw_error(ec, "send"); - return s; - } - - /// Send some data on a connected socket. - /** - * This function is used to send data on the raw socket. The function call - * will block until the data has been sent successfully or an error occurs. - * - * @param buffers One ore more data buffers to be sent on the socket. - * - * @param flags Flags specifying how the send call is to be made. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @note The send operation can only be used with a connected socket. Use - * the send_to function to send data on an unconnected raw socket. - */ - template - std::size_t send(const ConstBufferSequence& buffers, - socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().send( - this->get_implementation(), buffers, flags, ec); - asio::detail::throw_error(ec, "send"); - return s; - } - - /// Send some data on a connected socket. - /** - * This function is used to send data on the raw socket. The function call - * will block until the data has been sent successfully or an error occurs. - * - * @param buffers One or more data buffers to be sent on the socket. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes sent. - * - * @note The send operation can only be used with a connected socket. Use - * the send_to function to send data on an unconnected raw socket. - */ - template - std::size_t send(const ConstBufferSequence& buffers, - socket_base::message_flags flags, asio::error_code& ec) - { - return this->get_service().send( - this->get_implementation(), buffers, flags, ec); - } - - /// Start an asynchronous send on a connected socket. - /** - * This function is used to send data on the raw socket. The function call - * will block until the data has been sent successfully or an error occurs. - * - * @param buffers One or more data buffers to be sent on the socket. Although - * the buffers object may be copied as necessary, ownership of the underlying - * memory blocks is retained by the caller, which must guarantee that they - * remain valid until the handler is called. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_send operation can only be used with a connected socket. - * Use the async_send_to function to send data on an unconnected raw - * socket. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * socket.async_send(asio::buffer(data, size), handler); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send(const ConstBufferSequence& buffers, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send(this->get_implementation(), - buffers, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Start an asynchronous send on a connected socket. - /** - * This function is used to send data on the raw socket. The function call - * will block until the data has been sent successfully or an error occurs. - * - * @param buffers One or more data buffers to be sent on the socket. Although - * the buffers object may be copied as necessary, ownership of the underlying - * memory blocks is retained by the caller, which must guarantee that they - * remain valid until the handler is called. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_send operation can only be used with a connected socket. - * Use the async_send_to function to send data on an unconnected raw - * socket. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send(const ConstBufferSequence& buffers, - socket_base::message_flags flags, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send(this->get_implementation(), - buffers, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Send raw data to the specified endpoint. - /** - * This function is used to send raw data to the specified remote endpoint. - * The function call will block until the data has been sent successfully or - * an error occurs. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * - * @param destination The remote endpoint to which the data will be sent. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * asio::ip::udp::endpoint destination( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * socket.send_to(asio::buffer(data, size), destination); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination) - { - asio::error_code ec; - std::size_t s = this->get_service().send_to( - this->get_implementation(), buffers, destination, 0, ec); - asio::detail::throw_error(ec, "send_to"); - return s; - } - - /// Send raw data to the specified endpoint. - /** - * This function is used to send raw data to the specified remote endpoint. - * The function call will block until the data has been sent successfully or - * an error occurs. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * - * @param destination The remote endpoint to which the data will be sent. - * - * @param flags Flags specifying how the send call is to be made. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - */ - template - std::size_t send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().send_to( - this->get_implementation(), buffers, destination, flags, ec); - asio::detail::throw_error(ec, "send_to"); - return s; - } - - /// Send raw data to the specified endpoint. - /** - * This function is used to send raw data to the specified remote endpoint. - * The function call will block until the data has been sent successfully or - * an error occurs. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * - * @param destination The remote endpoint to which the data will be sent. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes sent. - */ - template - std::size_t send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, socket_base::message_flags flags, - asio::error_code& ec) - { - return this->get_service().send_to(this->get_implementation(), - buffers, destination, flags, ec); - } - - /// Start an asynchronous send. - /** - * This function is used to asynchronously send raw data to the specified - * remote endpoint. The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param destination The remote endpoint to which the data will be sent. - * Copies will be made of the endpoint as required. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * asio::ip::udp::endpoint destination( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * socket.async_send_to( - * asio::buffer(data, size), destination, handler); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send_to(this->get_implementation(), - buffers, destination, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Start an asynchronous send. - /** - * This function is used to asynchronously send raw data to the specified - * remote endpoint. The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param destination The remote endpoint to which the data will be sent. - * Copies will be made of the endpoint as required. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, socket_base::message_flags flags, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send_to( - this->get_implementation(), buffers, destination, flags, - ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the raw socket. The function - * call will block until data has been received successfully or an error - * occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - * - * @note The receive operation can only be used with a connected socket. Use - * the receive_from function to receive data on an unconnected raw - * socket. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code socket.receive(asio::buffer(data, size)); @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t receive(const MutableBufferSequence& buffers) - { - asio::error_code ec; - std::size_t s = this->get_service().receive( - this->get_implementation(), buffers, 0, ec); - asio::detail::throw_error(ec, "receive"); - return s; - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the raw socket. The function - * call will block until data has been received successfully or an error - * occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - * - * @note The receive operation can only be used with a connected socket. Use - * the receive_from function to receive data on an unconnected raw - * socket. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().receive( - this->get_implementation(), buffers, flags, ec); - asio::detail::throw_error(ec, "receive"); - return s; - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the raw socket. The function - * call will block until data has been received successfully or an error - * occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes received. - * - * @note The receive operation can only be used with a connected socket. Use - * the receive_from function to receive data on an unconnected raw - * socket. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags flags, asio::error_code& ec) - { - return this->get_service().receive( - this->get_implementation(), buffers, flags, ec); - } - - /// Start an asynchronous receive on a connected socket. - /** - * This function is used to asynchronously receive data from the raw - * socket. The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_receive operation can only be used with a connected socket. - * Use the async_receive_from function to receive data on an unconnected - * raw socket. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * socket.async_receive(asio::buffer(data, size), handler); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive(const MutableBufferSequence& buffers, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive(this->get_implementation(), - buffers, 0, ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Start an asynchronous receive on a connected socket. - /** - * This function is used to asynchronously receive data from the raw - * socket. The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_receive operation can only be used with a connected socket. - * Use the async_receive_from function to receive data on an unconnected - * raw socket. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive(const MutableBufferSequence& buffers, - socket_base::message_flags flags, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive(this->get_implementation(), - buffers, flags, ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Receive raw data with the endpoint of the sender. - /** - * This function is used to receive raw data. The function call will block - * until data has been received successfully or an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the data. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * asio::ip::udp::endpoint sender_endpoint; - * socket.receive_from( - * asio::buffer(data, size), sender_endpoint); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint) - { - asio::error_code ec; - std::size_t s = this->get_service().receive_from( - this->get_implementation(), buffers, sender_endpoint, 0, ec); - asio::detail::throw_error(ec, "receive_from"); - return s; - } - - /// Receive raw data with the endpoint of the sender. - /** - * This function is used to receive raw data. The function call will block - * until data has been received successfully or an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the data. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - */ - template - std::size_t receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().receive_from( - this->get_implementation(), buffers, sender_endpoint, flags, ec); - asio::detail::throw_error(ec, "receive_from"); - return s; - } - - /// Receive raw data with the endpoint of the sender. - /** - * This function is used to receive raw data. The function call will block - * until data has been received successfully or an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the data. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes received. - */ - template - std::size_t receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, socket_base::message_flags flags, - asio::error_code& ec) - { - return this->get_service().receive_from(this->get_implementation(), - buffers, sender_endpoint, flags, ec); - } - - /// Start an asynchronous receive. - /** - * This function is used to asynchronously receive raw data. The function - * call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the data. Ownership of the sender_endpoint object - * is retained by the caller, which must guarantee that it is valid until the - * handler is called. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code socket.async_receive_from( - * asio::buffer(data, size), 0, sender_endpoint, handler); @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive_from( - this->get_implementation(), buffers, sender_endpoint, 0, - ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Start an asynchronous receive. - /** - * This function is used to asynchronously receive raw data. The function - * call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the data. Ownership of the sender_endpoint object - * is retained by the caller, which must guarantee that it is valid until the - * handler is called. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, socket_base::message_flags flags, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive_from( - this->get_implementation(), buffers, sender_endpoint, flags, - ASIO_MOVE_CAST(ReadHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_RAW_SOCKET_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_seq_packet_socket.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_seq_packet_socket.hpp deleted file mode 100644 index a3d720ebe0b84..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_seq_packet_socket.hpp +++ /dev/null @@ -1,565 +0,0 @@ -// -// basic_seq_packet_socket.hpp -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_SEQ_PACKET_SOCKET_HPP -#define ASIO_BASIC_SEQ_PACKET_SOCKET_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include -#include "asio/basic_socket.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/error.hpp" -#include "asio/seq_packet_socket_service.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides sequenced packet socket functionality. -/** - * The basic_seq_packet_socket class template provides asynchronous and blocking - * sequenced packet socket functionality. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - */ -template > -class basic_seq_packet_socket - : public basic_socket -{ -public: - /// (Deprecated: Use native_handle_type.) The native representation of a - /// socket. - typedef typename SeqPacketSocketService::native_handle_type native_type; - - /// The native representation of a socket. - typedef typename SeqPacketSocketService::native_handle_type - native_handle_type; - - /// The protocol type. - typedef Protocol protocol_type; - - /// The endpoint type. - typedef typename Protocol::endpoint endpoint_type; - - /// Construct a basic_seq_packet_socket without opening it. - /** - * This constructor creates a sequenced packet socket without opening it. The - * socket needs to be opened and then connected or accepted before data can - * be sent or received on it. - * - * @param io_service The io_service object that the sequenced packet socket - * will use to dispatch handlers for any asynchronous operations performed on - * the socket. - */ - explicit basic_seq_packet_socket(asio::io_service& io_service) - : basic_socket(io_service) - { - } - - /// Construct and open a basic_seq_packet_socket. - /** - * This constructor creates and opens a sequenced_packet socket. The socket - * needs to be connected or accepted before data can be sent or received on - * it. - * - * @param io_service The io_service object that the sequenced packet socket - * will use to dispatch handlers for any asynchronous operations performed on - * the socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @throws asio::system_error Thrown on failure. - */ - basic_seq_packet_socket(asio::io_service& io_service, - const protocol_type& protocol) - : basic_socket(io_service, protocol) - { - } - - /// Construct a basic_seq_packet_socket, opening it and binding it to the - /// given local endpoint. - /** - * This constructor creates a sequenced packet socket and automatically opens - * it bound to the specified endpoint on the local machine. The protocol used - * is the protocol associated with the given endpoint. - * - * @param io_service The io_service object that the sequenced packet socket - * will use to dispatch handlers for any asynchronous operations performed on - * the socket. - * - * @param endpoint An endpoint on the local machine to which the sequenced - * packet socket will be bound. - * - * @throws asio::system_error Thrown on failure. - */ - basic_seq_packet_socket(asio::io_service& io_service, - const endpoint_type& endpoint) - : basic_socket(io_service, endpoint) - { - } - - /// Construct a basic_seq_packet_socket on an existing native socket. - /** - * This constructor creates a sequenced packet socket object to hold an - * existing native socket. - * - * @param io_service The io_service object that the sequenced packet socket - * will use to dispatch handlers for any asynchronous operations performed on - * the socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @param native_socket The new underlying socket implementation. - * - * @throws asio::system_error Thrown on failure. - */ - basic_seq_packet_socket(asio::io_service& io_service, - const protocol_type& protocol, const native_handle_type& native_socket) - : basic_socket( - io_service, protocol, native_socket) - { - } - -#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_seq_packet_socket from another. - /** - * This constructor moves a sequenced packet socket from one object to - * another. - * - * @param other The other basic_seq_packet_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_seq_packet_socket(io_service&) constructor. - */ - basic_seq_packet_socket(basic_seq_packet_socket&& other) - : basic_socket( - ASIO_MOVE_CAST(basic_seq_packet_socket)(other)) - { - } - - /// Move-assign a basic_seq_packet_socket from another. - /** - * This assignment operator moves a sequenced packet socket from one object to - * another. - * - * @param other The other basic_seq_packet_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_seq_packet_socket(io_service&) constructor. - */ - basic_seq_packet_socket& operator=(basic_seq_packet_socket&& other) - { - basic_socket::operator=( - ASIO_MOVE_CAST(basic_seq_packet_socket)(other)); - return *this; - } - - /// Move-construct a basic_seq_packet_socket from a socket of another protocol - /// type. - /** - * This constructor moves a sequenced packet socket from one object to - * another. - * - * @param other The other basic_seq_packet_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_seq_packet_socket(io_service&) constructor. - */ - template - basic_seq_packet_socket( - basic_seq_packet_socket&& other, - typename enable_if::value>::type* = 0) - : basic_socket( - ASIO_MOVE_CAST2(basic_seq_packet_socket< - Protocol1, SeqPacketSocketService1>)(other)) - { - } - - /// Move-assign a basic_seq_packet_socket from a socket of another protocol - /// type. - /** - * This assignment operator moves a sequenced packet socket from one object to - * another. - * - * @param other The other basic_seq_packet_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_seq_packet_socket(io_service&) constructor. - */ - template - typename enable_if::value, - basic_seq_packet_socket>::type& operator=( - basic_seq_packet_socket&& other) - { - basic_socket::operator=( - ASIO_MOVE_CAST2(basic_seq_packet_socket< - Protocol1, SeqPacketSocketService1>)(other)); - return *this; - } -#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - - /// Send some data on the socket. - /** - * This function is used to send data on the sequenced packet socket. The - * function call will block until the data has been sent successfully, or an - * until error occurs. - * - * @param buffers One or more data buffers to be sent on the socket. - * - * @param flags Flags specifying how the send call is to be made. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * socket.send(asio::buffer(data, size), 0); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t send(const ConstBufferSequence& buffers, - socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().send( - this->get_implementation(), buffers, flags, ec); - asio::detail::throw_error(ec, "send"); - return s; - } - - /// Send some data on the socket. - /** - * This function is used to send data on the sequenced packet socket. The - * function call will block the data has been sent successfully, or an until - * error occurs. - * - * @param buffers One or more data buffers to be sent on the socket. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes sent. Returns 0 if an error occurred. - * - * @note The send operation may not transmit all of the data to the peer. - * Consider using the @ref write function if you need to ensure that all data - * is written before the blocking operation completes. - */ - template - std::size_t send(const ConstBufferSequence& buffers, - socket_base::message_flags flags, asio::error_code& ec) - { - return this->get_service().send( - this->get_implementation(), buffers, flags, ec); - } - - /// Start an asynchronous send. - /** - * This function is used to asynchronously send data on the sequenced packet - * socket. The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent on the socket. Although - * the buffers object may be copied as necessary, ownership of the underlying - * memory blocks is retained by the caller, which must guarantee that they - * remain valid until the handler is called. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * socket.async_send(asio::buffer(data, size), 0, handler); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send(const ConstBufferSequence& buffers, - socket_base::message_flags flags, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send(this->get_implementation(), - buffers, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Receive some data on the socket. - /** - * This function is used to receive data on the sequenced packet socket. The - * function call will block until data has been received successfully, or - * until an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param out_flags After the receive call completes, contains flags - * associated with the received data. For example, if the - * socket_base::message_end_of_record bit is set then the received data marks - * the end of a record. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. An error code of - * asio::error::eof indicates that the connection was closed by the - * peer. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * socket.receive(asio::buffer(data, size), out_flags); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags& out_flags) - { - asio::error_code ec; - std::size_t s = this->get_service().receive( - this->get_implementation(), buffers, 0, out_flags, ec); - asio::detail::throw_error(ec, "receive"); - return s; - } - - /// Receive some data on the socket. - /** - * This function is used to receive data on the sequenced packet socket. The - * function call will block until data has been received successfully, or - * until an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param in_flags Flags specifying how the receive call is to be made. - * - * @param out_flags After the receive call completes, contains flags - * associated with the received data. For example, if the - * socket_base::message_end_of_record bit is set then the received data marks - * the end of a record. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. An error code of - * asio::error::eof indicates that the connection was closed by the - * peer. - * - * @note The receive operation may not receive all of the requested number of - * bytes. Consider using the @ref read function if you need to ensure that the - * requested amount of data is read before the blocking operation completes. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * socket.receive(asio::buffer(data, size), 0, out_flags); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags in_flags, - socket_base::message_flags& out_flags) - { - asio::error_code ec; - std::size_t s = this->get_service().receive( - this->get_implementation(), buffers, in_flags, out_flags, ec); - asio::detail::throw_error(ec, "receive"); - return s; - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the sequenced packet socket. The - * function call will block until data has been received successfully, or - * until an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param in_flags Flags specifying how the receive call is to be made. - * - * @param out_flags After the receive call completes, contains flags - * associated with the received data. For example, if the - * socket_base::message_end_of_record bit is set then the received data marks - * the end of a record. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes received. Returns 0 if an error occurred. - * - * @note The receive operation may not receive all of the requested number of - * bytes. Consider using the @ref read function if you need to ensure that the - * requested amount of data is read before the blocking operation completes. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags in_flags, - socket_base::message_flags& out_flags, asio::error_code& ec) - { - return this->get_service().receive(this->get_implementation(), - buffers, in_flags, out_flags, ec); - } - - /// Start an asynchronous receive. - /** - * This function is used to asynchronously receive data from the sequenced - * packet socket. The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param out_flags Once the asynchronous operation completes, contains flags - * associated with the received data. For example, if the - * socket_base::message_end_of_record bit is set then the received data marks - * the end of a record. The caller must guarantee that the referenced - * variable remains valid until the handler is called. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * socket.async_receive(asio::buffer(data, size), out_flags, handler); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive(const MutableBufferSequence& buffers, - socket_base::message_flags& out_flags, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive( - this->get_implementation(), buffers, 0, out_flags, - ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Start an asynchronous receive. - /** - * This function is used to asynchronously receive data from the sequenced - * data socket. The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param in_flags Flags specifying how the receive call is to be made. - * - * @param out_flags Once the asynchronous operation completes, contains flags - * associated with the received data. For example, if the - * socket_base::message_end_of_record bit is set then the received data marks - * the end of a record. The caller must guarantee that the referenced - * variable remains valid until the handler is called. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * socket.async_receive( - * asio::buffer(data, size), - * 0, out_flags, handler); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive(const MutableBufferSequence& buffers, - socket_base::message_flags in_flags, - socket_base::message_flags& out_flags, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive( - this->get_implementation(), buffers, in_flags, out_flags, - ASIO_MOVE_CAST(ReadHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_SEQ_PACKET_SOCKET_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_serial_port.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_serial_port.hpp deleted file mode 100644 index 007d293fed1db..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_serial_port.hpp +++ /dev/null @@ -1,695 +0,0 @@ -// -// basic_serial_port.hpp -// ~~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_SERIAL_PORT_HPP -#define ASIO_BASIC_SERIAL_PORT_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" - -#if defined(ASIO_HAS_SERIAL_PORT) \ - || defined(GENERATING_DOCUMENTATION) - -#include -#include "asio/basic_io_object.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/error.hpp" -#include "asio/serial_port_base.hpp" -#include "asio/serial_port_service.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides serial port functionality. -/** - * The basic_serial_port class template provides functionality that is common - * to all serial ports. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - */ -template -class basic_serial_port - : public basic_io_object, - public serial_port_base -{ -public: - /// (Deprecated: Use native_handle_type.) The native representation of a - /// serial port. - typedef typename SerialPortService::native_handle_type native_type; - - /// The native representation of a serial port. - typedef typename SerialPortService::native_handle_type native_handle_type; - - /// A basic_serial_port is always the lowest layer. - typedef basic_serial_port lowest_layer_type; - - /// Construct a basic_serial_port without opening it. - /** - * This constructor creates a serial port without opening it. - * - * @param io_service The io_service object that the serial port will use to - * dispatch handlers for any asynchronous operations performed on the port. - */ - explicit basic_serial_port(asio::io_service& io_service) - : basic_io_object(io_service) - { - } - - /// Construct and open a basic_serial_port. - /** - * This constructor creates and opens a serial port for the specified device - * name. - * - * @param io_service The io_service object that the serial port will use to - * dispatch handlers for any asynchronous operations performed on the port. - * - * @param device The platform-specific device name for this serial - * port. - */ - explicit basic_serial_port(asio::io_service& io_service, - const char* device) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), device, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Construct and open a basic_serial_port. - /** - * This constructor creates and opens a serial port for the specified device - * name. - * - * @param io_service The io_service object that the serial port will use to - * dispatch handlers for any asynchronous operations performed on the port. - * - * @param device The platform-specific device name for this serial - * port. - */ - explicit basic_serial_port(asio::io_service& io_service, - const std::string& device) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), device, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Construct a basic_serial_port on an existing native serial port. - /** - * This constructor creates a serial port object to hold an existing native - * serial port. - * - * @param io_service The io_service object that the serial port will use to - * dispatch handlers for any asynchronous operations performed on the port. - * - * @param native_serial_port A native serial port. - * - * @throws asio::system_error Thrown on failure. - */ - basic_serial_port(asio::io_service& io_service, - const native_handle_type& native_serial_port) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().assign(this->get_implementation(), - native_serial_port, ec); - asio::detail::throw_error(ec, "assign"); - } - -#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_serial_port from another. - /** - * This constructor moves a serial port from one object to another. - * - * @param other The other basic_serial_port object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_serial_port(io_service&) constructor. - */ - basic_serial_port(basic_serial_port&& other) - : basic_io_object( - ASIO_MOVE_CAST(basic_serial_port)(other)) - { - } - - /// Move-assign a basic_serial_port from another. - /** - * This assignment operator moves a serial port from one object to another. - * - * @param other The other basic_serial_port object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_serial_port(io_service&) constructor. - */ - basic_serial_port& operator=(basic_serial_port&& other) - { - basic_io_object::operator=( - ASIO_MOVE_CAST(basic_serial_port)(other)); - return *this; - } -#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - - /// Get a reference to the lowest layer. - /** - * This function returns a reference to the lowest layer in a stack of - * layers. Since a basic_serial_port cannot contain any further layers, it - * simply returns a reference to itself. - * - * @return A reference to the lowest layer in the stack of layers. Ownership - * is not transferred to the caller. - */ - lowest_layer_type& lowest_layer() - { - return *this; - } - - /// Get a const reference to the lowest layer. - /** - * This function returns a const reference to the lowest layer in a stack of - * layers. Since a basic_serial_port cannot contain any further layers, it - * simply returns a reference to itself. - * - * @return A const reference to the lowest layer in the stack of layers. - * Ownership is not transferred to the caller. - */ - const lowest_layer_type& lowest_layer() const - { - return *this; - } - - /// Open the serial port using the specified device name. - /** - * This function opens the serial port for the specified device name. - * - * @param device The platform-specific device name. - * - * @throws asio::system_error Thrown on failure. - */ - void open(const std::string& device) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), device, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Open the serial port using the specified device name. - /** - * This function opens the serial port using the given platform-specific - * device name. - * - * @param device The platform-specific device name. - * - * @param ec Set the indicate what error occurred, if any. - */ - asio::error_code open(const std::string& device, - asio::error_code& ec) - { - return this->get_service().open(this->get_implementation(), device, ec); - } - - /// Assign an existing native serial port to the serial port. - /* - * This function opens the serial port to hold an existing native serial port. - * - * @param native_serial_port A native serial port. - * - * @throws asio::system_error Thrown on failure. - */ - void assign(const native_handle_type& native_serial_port) - { - asio::error_code ec; - this->get_service().assign(this->get_implementation(), - native_serial_port, ec); - asio::detail::throw_error(ec, "assign"); - } - - /// Assign an existing native serial port to the serial port. - /* - * This function opens the serial port to hold an existing native serial port. - * - * @param native_serial_port A native serial port. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code assign(const native_handle_type& native_serial_port, - asio::error_code& ec) - { - return this->get_service().assign(this->get_implementation(), - native_serial_port, ec); - } - - /// Determine whether the serial port is open. - bool is_open() const - { - return this->get_service().is_open(this->get_implementation()); - } - - /// Close the serial port. - /** - * This function is used to close the serial port. Any asynchronous read or - * write operations will be cancelled immediately, and will complete with the - * asio::error::operation_aborted error. - * - * @throws asio::system_error Thrown on failure. - */ - void close() - { - asio::error_code ec; - this->get_service().close(this->get_implementation(), ec); - asio::detail::throw_error(ec, "close"); - } - - /// Close the serial port. - /** - * This function is used to close the serial port. Any asynchronous read or - * write operations will be cancelled immediately, and will complete with the - * asio::error::operation_aborted error. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code close(asio::error_code& ec) - { - return this->get_service().close(this->get_implementation(), ec); - } - - /// (Deprecated: Use native_handle().) Get the native serial port - /// representation. - /** - * This function may be used to obtain the underlying representation of the - * serial port. This is intended to allow access to native serial port - * functionality that is not otherwise provided. - */ - native_type native() - { - return this->get_service().native_handle(this->get_implementation()); - } - - /// Get the native serial port representation. - /** - * This function may be used to obtain the underlying representation of the - * serial port. This is intended to allow access to native serial port - * functionality that is not otherwise provided. - */ - native_handle_type native_handle() - { - return this->get_service().native_handle(this->get_implementation()); - } - - /// Cancel all asynchronous operations associated with the serial port. - /** - * This function causes all outstanding asynchronous read or write operations - * to finish immediately, and the handlers for cancelled operations will be - * passed the asio::error::operation_aborted error. - * - * @throws asio::system_error Thrown on failure. - */ - void cancel() - { - asio::error_code ec; - this->get_service().cancel(this->get_implementation(), ec); - asio::detail::throw_error(ec, "cancel"); - } - - /// Cancel all asynchronous operations associated with the serial port. - /** - * This function causes all outstanding asynchronous read or write operations - * to finish immediately, and the handlers for cancelled operations will be - * passed the asio::error::operation_aborted error. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code cancel(asio::error_code& ec) - { - return this->get_service().cancel(this->get_implementation(), ec); - } - - /// Send a break sequence to the serial port. - /** - * This function causes a break sequence of platform-specific duration to be - * sent out the serial port. - * - * @throws asio::system_error Thrown on failure. - */ - void send_break() - { - asio::error_code ec; - this->get_service().send_break(this->get_implementation(), ec); - asio::detail::throw_error(ec, "send_break"); - } - - /// Send a break sequence to the serial port. - /** - * This function causes a break sequence of platform-specific duration to be - * sent out the serial port. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code send_break(asio::error_code& ec) - { - return this->get_service().send_break(this->get_implementation(), ec); - } - - /// Set an option on the serial port. - /** - * This function is used to set an option on the serial port. - * - * @param option The option value to be set on the serial port. - * - * @throws asio::system_error Thrown on failure. - * - * @sa SettableSerialPortOption @n - * asio::serial_port_base::baud_rate @n - * asio::serial_port_base::flow_control @n - * asio::serial_port_base::parity @n - * asio::serial_port_base::stop_bits @n - * asio::serial_port_base::character_size - */ - template - void set_option(const SettableSerialPortOption& option) - { - asio::error_code ec; - this->get_service().set_option(this->get_implementation(), option, ec); - asio::detail::throw_error(ec, "set_option"); - } - - /// Set an option on the serial port. - /** - * This function is used to set an option on the serial port. - * - * @param option The option value to be set on the serial port. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa SettableSerialPortOption @n - * asio::serial_port_base::baud_rate @n - * asio::serial_port_base::flow_control @n - * asio::serial_port_base::parity @n - * asio::serial_port_base::stop_bits @n - * asio::serial_port_base::character_size - */ - template - asio::error_code set_option(const SettableSerialPortOption& option, - asio::error_code& ec) - { - return this->get_service().set_option( - this->get_implementation(), option, ec); - } - - /// Get an option from the serial port. - /** - * This function is used to get the current value of an option on the serial - * port. - * - * @param option The option value to be obtained from the serial port. - * - * @throws asio::system_error Thrown on failure. - * - * @sa GettableSerialPortOption @n - * asio::serial_port_base::baud_rate @n - * asio::serial_port_base::flow_control @n - * asio::serial_port_base::parity @n - * asio::serial_port_base::stop_bits @n - * asio::serial_port_base::character_size - */ - template - void get_option(GettableSerialPortOption& option) - { - asio::error_code ec; - this->get_service().get_option(this->get_implementation(), option, ec); - asio::detail::throw_error(ec, "get_option"); - } - - /// Get an option from the serial port. - /** - * This function is used to get the current value of an option on the serial - * port. - * - * @param option The option value to be obtained from the serial port. - * - * @param ec Set to indicate what error occured, if any. - * - * @sa GettableSerialPortOption @n - * asio::serial_port_base::baud_rate @n - * asio::serial_port_base::flow_control @n - * asio::serial_port_base::parity @n - * asio::serial_port_base::stop_bits @n - * asio::serial_port_base::character_size - */ - template - asio::error_code get_option(GettableSerialPortOption& option, - asio::error_code& ec) - { - return this->get_service().get_option( - this->get_implementation(), option, ec); - } - - /// Write some data to the serial port. - /** - * This function is used to write data to the serial port. The function call - * will block until one or more bytes of the data has been written - * successfully, or until an error occurs. - * - * @param buffers One or more data buffers to be written to the serial port. - * - * @returns The number of bytes written. - * - * @throws asio::system_error Thrown on failure. An error code of - * asio::error::eof indicates that the connection was closed by the - * peer. - * - * @note The write_some operation may not transmit all of the data to the - * peer. Consider using the @ref write function if you need to ensure that - * all data is written before the blocking operation completes. - * - * @par Example - * To write a single data buffer use the @ref buffer function as follows: - * @code - * serial_port.write_some(asio::buffer(data, size)); - * @endcode - * See the @ref buffer documentation for information on writing multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t write_some(const ConstBufferSequence& buffers) - { - asio::error_code ec; - std::size_t s = this->get_service().write_some( - this->get_implementation(), buffers, ec); - asio::detail::throw_error(ec, "write_some"); - return s; - } - - /// Write some data to the serial port. - /** - * This function is used to write data to the serial port. The function call - * will block until one or more bytes of the data has been written - * successfully, or until an error occurs. - * - * @param buffers One or more data buffers to be written to the serial port. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes written. Returns 0 if an error occurred. - * - * @note The write_some operation may not transmit all of the data to the - * peer. Consider using the @ref write function if you need to ensure that - * all data is written before the blocking operation completes. - */ - template - std::size_t write_some(const ConstBufferSequence& buffers, - asio::error_code& ec) - { - return this->get_service().write_some( - this->get_implementation(), buffers, ec); - } - - /// Start an asynchronous write. - /** - * This function is used to asynchronously write data to the serial port. - * The function call always returns immediately. - * - * @param buffers One or more data buffers to be written to the serial port. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param handler The handler to be called when the write operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes written. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The write operation may not transmit all of the data to the peer. - * Consider using the @ref async_write function if you need to ensure that all - * data is written before the asynchronous operation completes. - * - * @par Example - * To write a single data buffer use the @ref buffer function as follows: - * @code - * serial_port.async_write_some(asio::buffer(data, size), handler); - * @endcode - * See the @ref buffer documentation for information on writing multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_write_some(const ConstBufferSequence& buffers, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_write_some(this->get_implementation(), - buffers, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Read some data from the serial port. - /** - * This function is used to read data from the serial port. The function - * call will block until one or more bytes of data has been read successfully, - * or until an error occurs. - * - * @param buffers One or more buffers into which the data will be read. - * - * @returns The number of bytes read. - * - * @throws asio::system_error Thrown on failure. An error code of - * asio::error::eof indicates that the connection was closed by the - * peer. - * - * @note The read_some operation may not read all of the requested number of - * bytes. Consider using the @ref read function if you need to ensure that - * the requested amount of data is read before the blocking operation - * completes. - * - * @par Example - * To read into a single data buffer use the @ref buffer function as follows: - * @code - * serial_port.read_some(asio::buffer(data, size)); - * @endcode - * See the @ref buffer documentation for information on reading into multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t read_some(const MutableBufferSequence& buffers) - { - asio::error_code ec; - std::size_t s = this->get_service().read_some( - this->get_implementation(), buffers, ec); - asio::detail::throw_error(ec, "read_some"); - return s; - } - - /// Read some data from the serial port. - /** - * This function is used to read data from the serial port. The function - * call will block until one or more bytes of data has been read successfully, - * or until an error occurs. - * - * @param buffers One or more buffers into which the data will be read. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes read. Returns 0 if an error occurred. - * - * @note The read_some operation may not read all of the requested number of - * bytes. Consider using the @ref read function if you need to ensure that - * the requested amount of data is read before the blocking operation - * completes. - */ - template - std::size_t read_some(const MutableBufferSequence& buffers, - asio::error_code& ec) - { - return this->get_service().read_some( - this->get_implementation(), buffers, ec); - } - - /// Start an asynchronous read. - /** - * This function is used to asynchronously read data from the serial port. - * The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be read. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param handler The handler to be called when the read operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes read. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The read operation may not read all of the requested number of bytes. - * Consider using the @ref async_read function if you need to ensure that the - * requested amount of data is read before the asynchronous operation - * completes. - * - * @par Example - * To read into a single data buffer use the @ref buffer function as follows: - * @code - * serial_port.async_read_some(asio::buffer(data, size), handler); - * @endcode - * See the @ref buffer documentation for information on reading into multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_read_some(const MutableBufferSequence& buffers, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_read_some(this->get_implementation(), - buffers, ASIO_MOVE_CAST(ReadHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // defined(ASIO_HAS_SERIAL_PORT) - // || defined(GENERATING_DOCUMENTATION) - -#endif // ASIO_BASIC_SERIAL_PORT_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_signal_set.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_signal_set.hpp deleted file mode 100644 index 2dd71ceec7011..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_signal_set.hpp +++ /dev/null @@ -1,384 +0,0 @@ -// -// basic_signal_set.hpp -// ~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_SIGNAL_SET_HPP -#define ASIO_BASIC_SIGNAL_SET_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" - -#include "asio/basic_io_object.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/error.hpp" -#include "asio/signal_set_service.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides signal functionality. -/** - * The basic_signal_set class template provides the ability to perform an - * asynchronous wait for one or more signals to occur. - * - * Most applications will use the asio::signal_set typedef. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - * - * @par Example - * Performing an asynchronous wait: - * @code - * void handler( - * const asio::error_code& error, - * int signal_number) - * { - * if (!error) - * { - * // A signal occurred. - * } - * } - * - * ... - * - * // Construct a signal set registered for process termination. - * asio::signal_set signals(io_service, SIGINT, SIGTERM); - * - * // Start an asynchronous wait for one of the signals to occur. - * signals.async_wait(handler); - * @endcode - * - * @par Queueing of signal notifications - * - * If a signal is registered with a signal_set, and the signal occurs when - * there are no waiting handlers, then the signal notification is queued. The - * next async_wait operation on that signal_set will dequeue the notification. - * If multiple notifications are queued, subsequent async_wait operations - * dequeue them one at a time. Signal notifications are dequeued in order of - * ascending signal number. - * - * If a signal number is removed from a signal_set (using the @c remove or @c - * erase member functions) then any queued notifications for that signal are - * discarded. - * - * @par Multiple registration of signals - * - * The same signal number may be registered with different signal_set objects. - * When the signal occurs, one handler is called for each signal_set object. - * - * Note that multiple registration only works for signals that are registered - * using Asio. The application must not also register a signal handler using - * functions such as @c signal() or @c sigaction(). - * - * @par Signal masking on POSIX platforms - * - * POSIX allows signals to be blocked using functions such as @c sigprocmask() - * and @c pthread_sigmask(). For signals to be delivered, programs must ensure - * that any signals registered using signal_set objects are unblocked in at - * least one thread. - */ -template -class basic_signal_set - : public basic_io_object -{ -public: - /// Construct a signal set without adding any signals. - /** - * This constructor creates a signal set without registering for any signals. - * - * @param io_service The io_service object that the signal set will use to - * dispatch handlers for any asynchronous operations performed on the set. - */ - explicit basic_signal_set(asio::io_service& io_service) - : basic_io_object(io_service) - { - } - - /// Construct a signal set and add one signal. - /** - * This constructor creates a signal set and registers for one signal. - * - * @param io_service The io_service object that the signal set will use to - * dispatch handlers for any asynchronous operations performed on the set. - * - * @param signal_number_1 The signal number to be added. - * - * @note This constructor is equivalent to performing: - * @code asio::signal_set signals(io_service); - * signals.add(signal_number_1); @endcode - */ - basic_signal_set(asio::io_service& io_service, int signal_number_1) - : basic_io_object(io_service) - { - asio::error_code ec; - this->service.add(this->implementation, signal_number_1, ec); - asio::detail::throw_error(ec, "add"); - } - - /// Construct a signal set and add two signals. - /** - * This constructor creates a signal set and registers for two signals. - * - * @param io_service The io_service object that the signal set will use to - * dispatch handlers for any asynchronous operations performed on the set. - * - * @param signal_number_1 The first signal number to be added. - * - * @param signal_number_2 The second signal number to be added. - * - * @note This constructor is equivalent to performing: - * @code asio::signal_set signals(io_service); - * signals.add(signal_number_1); - * signals.add(signal_number_2); @endcode - */ - basic_signal_set(asio::io_service& io_service, int signal_number_1, - int signal_number_2) - : basic_io_object(io_service) - { - asio::error_code ec; - this->service.add(this->implementation, signal_number_1, ec); - asio::detail::throw_error(ec, "add"); - this->service.add(this->implementation, signal_number_2, ec); - asio::detail::throw_error(ec, "add"); - } - - /// Construct a signal set and add three signals. - /** - * This constructor creates a signal set and registers for three signals. - * - * @param io_service The io_service object that the signal set will use to - * dispatch handlers for any asynchronous operations performed on the set. - * - * @param signal_number_1 The first signal number to be added. - * - * @param signal_number_2 The second signal number to be added. - * - * @param signal_number_3 The third signal number to be added. - * - * @note This constructor is equivalent to performing: - * @code asio::signal_set signals(io_service); - * signals.add(signal_number_1); - * signals.add(signal_number_2); - * signals.add(signal_number_3); @endcode - */ - basic_signal_set(asio::io_service& io_service, int signal_number_1, - int signal_number_2, int signal_number_3) - : basic_io_object(io_service) - { - asio::error_code ec; - this->service.add(this->implementation, signal_number_1, ec); - asio::detail::throw_error(ec, "add"); - this->service.add(this->implementation, signal_number_2, ec); - asio::detail::throw_error(ec, "add"); - this->service.add(this->implementation, signal_number_3, ec); - asio::detail::throw_error(ec, "add"); - } - - /// Add a signal to a signal_set. - /** - * This function adds the specified signal to the set. It has no effect if the - * signal is already in the set. - * - * @param signal_number The signal to be added to the set. - * - * @throws asio::system_error Thrown on failure. - */ - void add(int signal_number) - { - asio::error_code ec; - this->service.add(this->implementation, signal_number, ec); - asio::detail::throw_error(ec, "add"); - } - - /// Add a signal to a signal_set. - /** - * This function adds the specified signal to the set. It has no effect if the - * signal is already in the set. - * - * @param signal_number The signal to be added to the set. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code add(int signal_number, - asio::error_code& ec) - { - return this->service.add(this->implementation, signal_number, ec); - } - - /// Remove a signal from a signal_set. - /** - * This function removes the specified signal from the set. It has no effect - * if the signal is not in the set. - * - * @param signal_number The signal to be removed from the set. - * - * @throws asio::system_error Thrown on failure. - * - * @note Removes any notifications that have been queued for the specified - * signal number. - */ - void remove(int signal_number) - { - asio::error_code ec; - this->service.remove(this->implementation, signal_number, ec); - asio::detail::throw_error(ec, "remove"); - } - - /// Remove a signal from a signal_set. - /** - * This function removes the specified signal from the set. It has no effect - * if the signal is not in the set. - * - * @param signal_number The signal to be removed from the set. - * - * @param ec Set to indicate what error occurred, if any. - * - * @note Removes any notifications that have been queued for the specified - * signal number. - */ - asio::error_code remove(int signal_number, - asio::error_code& ec) - { - return this->service.remove(this->implementation, signal_number, ec); - } - - /// Remove all signals from a signal_set. - /** - * This function removes all signals from the set. It has no effect if the set - * is already empty. - * - * @throws asio::system_error Thrown on failure. - * - * @note Removes all queued notifications. - */ - void clear() - { - asio::error_code ec; - this->service.clear(this->implementation, ec); - asio::detail::throw_error(ec, "clear"); - } - - /// Remove all signals from a signal_set. - /** - * This function removes all signals from the set. It has no effect if the set - * is already empty. - * - * @param ec Set to indicate what error occurred, if any. - * - * @note Removes all queued notifications. - */ - asio::error_code clear(asio::error_code& ec) - { - return this->service.clear(this->implementation, ec); - } - - /// Cancel all operations associated with the signal set. - /** - * This function forces the completion of any pending asynchronous wait - * operations against the signal set. The handler for each cancelled - * operation will be invoked with the asio::error::operation_aborted - * error code. - * - * Cancellation does not alter the set of registered signals. - * - * @throws asio::system_error Thrown on failure. - * - * @note If a registered signal occurred before cancel() is called, then the - * handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - void cancel() - { - asio::error_code ec; - this->service.cancel(this->implementation, ec); - asio::detail::throw_error(ec, "cancel"); - } - - /// Cancel all operations associated with the signal set. - /** - * This function forces the completion of any pending asynchronous wait - * operations against the signal set. The handler for each cancelled - * operation will be invoked with the asio::error::operation_aborted - * error code. - * - * Cancellation does not alter the set of registered signals. - * - * @param ec Set to indicate what error occurred, if any. - * - * @note If a registered signal occurred before cancel() is called, then the - * handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - asio::error_code cancel(asio::error_code& ec) - { - return this->service.cancel(this->implementation, ec); - } - - /// Start an asynchronous operation to wait for a signal to be delivered. - /** - * This function may be used to initiate an asynchronous wait against the - * signal set. It always returns immediately. - * - * For each call to async_wait(), the supplied handler will be called exactly - * once. The handler will be called when: - * - * @li One of the registered signals in the signal set occurs; or - * - * @li The signal set was cancelled, in which case the handler is passed the - * error code asio::error::operation_aborted. - * - * @param handler The handler to be called when the signal occurs. Copies - * will be made of the handler as required. The function signature of the - * handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * int signal_number // Indicates which signal occurred. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(SignalHandler, - void (asio::error_code, int)) - async_wait(ASIO_MOVE_ARG(SignalHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a SignalHandler. - ASIO_SIGNAL_HANDLER_CHECK(SignalHandler, handler) type_check; - - return this->service.async_wait(this->implementation, - ASIO_MOVE_CAST(SignalHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_SIGNAL_SET_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket.hpp deleted file mode 100644 index be4fb3c2c3af7..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket.hpp +++ /dev/null @@ -1,1518 +0,0 @@ -// -// basic_socket.hpp -// ~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_SOCKET_HPP -#define ASIO_BASIC_SOCKET_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include "asio/async_result.hpp" -#include "asio/basic_io_object.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/detail/type_traits.hpp" -#include "asio/error.hpp" -#include "asio/socket_base.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides socket functionality. -/** - * The basic_socket class template provides functionality that is common to both - * stream-oriented and datagram-oriented sockets. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - */ -template -class basic_socket - : public basic_io_object, - public socket_base -{ -public: - /// (Deprecated: Use native_handle_type.) The native representation of a - /// socket. - typedef typename SocketService::native_handle_type native_type; - - /// The native representation of a socket. - typedef typename SocketService::native_handle_type native_handle_type; - - /// The protocol type. - typedef Protocol protocol_type; - - /// The endpoint type. - typedef typename Protocol::endpoint endpoint_type; - - /// A basic_socket is always the lowest layer. - typedef basic_socket lowest_layer_type; - - /// Construct a basic_socket without opening it. - /** - * This constructor creates a socket without opening it. - * - * @param io_service The io_service object that the socket will use to - * dispatch handlers for any asynchronous operations performed on the socket. - */ - explicit basic_socket(asio::io_service& io_service) - : basic_io_object(io_service) - { - } - - /// Construct and open a basic_socket. - /** - * This constructor creates and opens a socket. - * - * @param io_service The io_service object that the socket will use to - * dispatch handlers for any asynchronous operations performed on the socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @throws asio::system_error Thrown on failure. - */ - basic_socket(asio::io_service& io_service, - const protocol_type& protocol) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), protocol, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Construct a basic_socket, opening it and binding it to the given local - /// endpoint. - /** - * This constructor creates a socket and automatically opens it bound to the - * specified endpoint on the local machine. The protocol used is the protocol - * associated with the given endpoint. - * - * @param io_service The io_service object that the socket will use to - * dispatch handlers for any asynchronous operations performed on the socket. - * - * @param endpoint An endpoint on the local machine to which the socket will - * be bound. - * - * @throws asio::system_error Thrown on failure. - */ - basic_socket(asio::io_service& io_service, - const endpoint_type& endpoint) - : basic_io_object(io_service) - { - asio::error_code ec; - const protocol_type protocol = endpoint.protocol(); - this->get_service().open(this->get_implementation(), protocol, ec); - asio::detail::throw_error(ec, "open"); - this->get_service().bind(this->get_implementation(), endpoint, ec); - asio::detail::throw_error(ec, "bind"); - } - - /// Construct a basic_socket on an existing native socket. - /** - * This constructor creates a socket object to hold an existing native socket. - * - * @param io_service The io_service object that the socket will use to - * dispatch handlers for any asynchronous operations performed on the socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @param native_socket A native socket. - * - * @throws asio::system_error Thrown on failure. - */ - basic_socket(asio::io_service& io_service, - const protocol_type& protocol, const native_handle_type& native_socket) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().assign(this->get_implementation(), - protocol, native_socket, ec); - asio::detail::throw_error(ec, "assign"); - } - -#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_socket from another. - /** - * This constructor moves a socket from one object to another. - * - * @param other The other basic_socket object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket(io_service&) constructor. - */ - basic_socket(basic_socket&& other) - : basic_io_object( - ASIO_MOVE_CAST(basic_socket)(other)) - { - } - - /// Move-assign a basic_socket from another. - /** - * This assignment operator moves a socket from one object to another. - * - * @param other The other basic_socket object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket(io_service&) constructor. - */ - basic_socket& operator=(basic_socket&& other) - { - basic_io_object::operator=( - ASIO_MOVE_CAST(basic_socket)(other)); - return *this; - } - - // All sockets have access to each other's implementations. - template - friend class basic_socket; - - /// Move-construct a basic_socket from a socket of another protocol type. - /** - * This constructor moves a socket from one object to another. - * - * @param other The other basic_socket object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket(io_service&) constructor. - */ - template - basic_socket(basic_socket&& other, - typename enable_if::value>::type* = 0) - : basic_io_object(other.get_io_service()) - { - this->get_service().template converting_move_construct( - this->get_implementation(), other.get_implementation()); - } - - /// Move-assign a basic_socket from a socket of another protocol type. - /** - * This assignment operator moves a socket from one object to another. - * - * @param other The other basic_socket object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket(io_service&) constructor. - */ - template - typename enable_if::value, - basic_socket>::type& operator=( - basic_socket&& other) - { - basic_socket tmp(ASIO_MOVE_CAST2(basic_socket< - Protocol1, SocketService1>)(other)); - basic_io_object::operator=( - ASIO_MOVE_CAST(basic_socket)(tmp)); - return *this; - } -#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - - /// Get a reference to the lowest layer. - /** - * This function returns a reference to the lowest layer in a stack of - * layers. Since a basic_socket cannot contain any further layers, it simply - * returns a reference to itself. - * - * @return A reference to the lowest layer in the stack of layers. Ownership - * is not transferred to the caller. - */ - lowest_layer_type& lowest_layer() - { - return *this; - } - - /// Get a const reference to the lowest layer. - /** - * This function returns a const reference to the lowest layer in a stack of - * layers. Since a basic_socket cannot contain any further layers, it simply - * returns a reference to itself. - * - * @return A const reference to the lowest layer in the stack of layers. - * Ownership is not transferred to the caller. - */ - const lowest_layer_type& lowest_layer() const - { - return *this; - } - - /// Open the socket using the specified protocol. - /** - * This function opens the socket so that it will use the specified protocol. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * socket.open(asio::ip::tcp::v4()); - * @endcode - */ - void open(const protocol_type& protocol = protocol_type()) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), protocol, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Open the socket using the specified protocol. - /** - * This function opens the socket so that it will use the specified protocol. - * - * @param protocol An object specifying which protocol is to be used. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * asio::error_code ec; - * socket.open(asio::ip::tcp::v4(), ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code open(const protocol_type& protocol, - asio::error_code& ec) - { - return this->get_service().open(this->get_implementation(), protocol, ec); - } - - /// Assign an existing native socket to the socket. - /* - * This function opens the socket to hold an existing native socket. - * - * @param protocol An object specifying which protocol is to be used. - * - * @param native_socket A native socket. - * - * @throws asio::system_error Thrown on failure. - */ - void assign(const protocol_type& protocol, - const native_handle_type& native_socket) - { - asio::error_code ec; - this->get_service().assign(this->get_implementation(), - protocol, native_socket, ec); - asio::detail::throw_error(ec, "assign"); - } - - /// Assign an existing native socket to the socket. - /* - * This function opens the socket to hold an existing native socket. - * - * @param protocol An object specifying which protocol is to be used. - * - * @param native_socket A native socket. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code assign(const protocol_type& protocol, - const native_handle_type& native_socket, asio::error_code& ec) - { - return this->get_service().assign(this->get_implementation(), - protocol, native_socket, ec); - } - - /// Determine whether the socket is open. - bool is_open() const - { - return this->get_service().is_open(this->get_implementation()); - } - - /// Close the socket. - /** - * This function is used to close the socket. Any asynchronous send, receive - * or connect operations will be cancelled immediately, and will complete - * with the asio::error::operation_aborted error. - * - * @throws asio::system_error Thrown on failure. Note that, even if - * the function indicates an error, the underlying descriptor is closed. - * - * @note For portable behaviour with respect to graceful closure of a - * connected socket, call shutdown() before closing the socket. - */ - void close() - { - asio::error_code ec; - this->get_service().close(this->get_implementation(), ec); - asio::detail::throw_error(ec, "close"); - } - - /// Close the socket. - /** - * This function is used to close the socket. Any asynchronous send, receive - * or connect operations will be cancelled immediately, and will complete - * with the asio::error::operation_aborted error. - * - * @param ec Set to indicate what error occurred, if any. Note that, even if - * the function indicates an error, the underlying descriptor is closed. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::error_code ec; - * socket.close(ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - * - * @note For portable behaviour with respect to graceful closure of a - * connected socket, call shutdown() before closing the socket. - */ - asio::error_code close(asio::error_code& ec) - { - return this->get_service().close(this->get_implementation(), ec); - } - - /// (Deprecated: Use native_handle().) Get the native socket representation. - /** - * This function may be used to obtain the underlying representation of the - * socket. This is intended to allow access to native socket functionality - * that is not otherwise provided. - */ - native_type native() - { - return this->get_service().native_handle(this->get_implementation()); - } - - /// Get the native socket representation. - /** - * This function may be used to obtain the underlying representation of the - * socket. This is intended to allow access to native socket functionality - * that is not otherwise provided. - */ - native_handle_type native_handle() - { - return this->get_service().native_handle(this->get_implementation()); - } - - /// Cancel all asynchronous operations associated with the socket. - /** - * This function causes all outstanding asynchronous connect, send and receive - * operations to finish immediately, and the handlers for cancelled operations - * will be passed the asio::error::operation_aborted error. - * - * @throws asio::system_error Thrown on failure. - * - * @note Calls to cancel() will always fail with - * asio::error::operation_not_supported when run on Windows XP, Windows - * Server 2003, and earlier versions of Windows, unless - * ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has - * two issues that should be considered before enabling its use: - * - * @li It will only cancel asynchronous operations that were initiated in the - * current thread. - * - * @li It can appear to complete without error, but the request to cancel the - * unfinished operations may be silently ignored by the operating system. - * Whether it works or not seems to depend on the drivers that are installed. - * - * For portable cancellation, consider using one of the following - * alternatives: - * - * @li Disable asio's I/O completion port backend by defining - * ASIO_DISABLE_IOCP. - * - * @li Use the close() function to simultaneously cancel the outstanding - * operations and close the socket. - * - * When running on Windows Vista, Windows Server 2008, and later, the - * CancelIoEx function is always used. This function does not have the - * problems described above. - */ -#if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ - && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \ - && !defined(ASIO_ENABLE_CANCELIO) - __declspec(deprecated("By default, this function always fails with " - "operation_not_supported when used on Windows XP, Windows Server 2003, " - "or earlier. Consult documentation for details.")) -#endif - void cancel() - { - asio::error_code ec; - this->get_service().cancel(this->get_implementation(), ec); - asio::detail::throw_error(ec, "cancel"); - } - - /// Cancel all asynchronous operations associated with the socket. - /** - * This function causes all outstanding asynchronous connect, send and receive - * operations to finish immediately, and the handlers for cancelled operations - * will be passed the asio::error::operation_aborted error. - * - * @param ec Set to indicate what error occurred, if any. - * - * @note Calls to cancel() will always fail with - * asio::error::operation_not_supported when run on Windows XP, Windows - * Server 2003, and earlier versions of Windows, unless - * ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has - * two issues that should be considered before enabling its use: - * - * @li It will only cancel asynchronous operations that were initiated in the - * current thread. - * - * @li It can appear to complete without error, but the request to cancel the - * unfinished operations may be silently ignored by the operating system. - * Whether it works or not seems to depend on the drivers that are installed. - * - * For portable cancellation, consider using one of the following - * alternatives: - * - * @li Disable asio's I/O completion port backend by defining - * ASIO_DISABLE_IOCP. - * - * @li Use the close() function to simultaneously cancel the outstanding - * operations and close the socket. - * - * When running on Windows Vista, Windows Server 2008, and later, the - * CancelIoEx function is always used. This function does not have the - * problems described above. - */ -#if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ - && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \ - && !defined(ASIO_ENABLE_CANCELIO) - __declspec(deprecated("By default, this function always fails with " - "operation_not_supported when used on Windows XP, Windows Server 2003, " - "or earlier. Consult documentation for details.")) -#endif - asio::error_code cancel(asio::error_code& ec) - { - return this->get_service().cancel(this->get_implementation(), ec); - } - - /// Determine whether the socket is at the out-of-band data mark. - /** - * This function is used to check whether the socket input is currently - * positioned at the out-of-band data mark. - * - * @return A bool indicating whether the socket is at the out-of-band data - * mark. - * - * @throws asio::system_error Thrown on failure. - */ - bool at_mark() const - { - asio::error_code ec; - bool b = this->get_service().at_mark(this->get_implementation(), ec); - asio::detail::throw_error(ec, "at_mark"); - return b; - } - - /// Determine whether the socket is at the out-of-band data mark. - /** - * This function is used to check whether the socket input is currently - * positioned at the out-of-band data mark. - * - * @param ec Set to indicate what error occurred, if any. - * - * @return A bool indicating whether the socket is at the out-of-band data - * mark. - */ - bool at_mark(asio::error_code& ec) const - { - return this->get_service().at_mark(this->get_implementation(), ec); - } - - /// Determine the number of bytes available for reading. - /** - * This function is used to determine the number of bytes that may be read - * without blocking. - * - * @return The number of bytes that may be read without blocking, or 0 if an - * error occurs. - * - * @throws asio::system_error Thrown on failure. - */ - std::size_t available() const - { - asio::error_code ec; - std::size_t s = this->get_service().available( - this->get_implementation(), ec); - asio::detail::throw_error(ec, "available"); - return s; - } - - /// Determine the number of bytes available for reading. - /** - * This function is used to determine the number of bytes that may be read - * without blocking. - * - * @param ec Set to indicate what error occurred, if any. - * - * @return The number of bytes that may be read without blocking, or 0 if an - * error occurs. - */ - std::size_t available(asio::error_code& ec) const - { - return this->get_service().available(this->get_implementation(), ec); - } - - /// Bind the socket to the given local endpoint. - /** - * This function binds the socket to the specified endpoint on the local - * machine. - * - * @param endpoint An endpoint on the local machine to which the socket will - * be bound. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * socket.open(asio::ip::tcp::v4()); - * socket.bind(asio::ip::tcp::endpoint( - * asio::ip::tcp::v4(), 12345)); - * @endcode - */ - void bind(const endpoint_type& endpoint) - { - asio::error_code ec; - this->get_service().bind(this->get_implementation(), endpoint, ec); - asio::detail::throw_error(ec, "bind"); - } - - /// Bind the socket to the given local endpoint. - /** - * This function binds the socket to the specified endpoint on the local - * machine. - * - * @param endpoint An endpoint on the local machine to which the socket will - * be bound. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * socket.open(asio::ip::tcp::v4()); - * asio::error_code ec; - * socket.bind(asio::ip::tcp::endpoint( - * asio::ip::tcp::v4(), 12345), ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code bind(const endpoint_type& endpoint, - asio::error_code& ec) - { - return this->get_service().bind(this->get_implementation(), endpoint, ec); - } - - /// Connect the socket to the specified endpoint. - /** - * This function is used to connect a socket to the specified remote endpoint. - * The function call will block until the connection is successfully made or - * an error occurs. - * - * The socket is automatically opened if it is not already open. If the - * connect fails, and the socket was automatically opened, the socket is - * not returned to the closed state. - * - * @param peer_endpoint The remote endpoint to which the socket will be - * connected. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * asio::ip::tcp::endpoint endpoint( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * socket.connect(endpoint); - * @endcode - */ - void connect(const endpoint_type& peer_endpoint) - { - asio::error_code ec; - if (!is_open()) - { - this->get_service().open(this->get_implementation(), - peer_endpoint.protocol(), ec); - asio::detail::throw_error(ec, "connect"); - } - this->get_service().connect(this->get_implementation(), peer_endpoint, ec); - asio::detail::throw_error(ec, "connect"); - } - - /// Connect the socket to the specified endpoint. - /** - * This function is used to connect a socket to the specified remote endpoint. - * The function call will block until the connection is successfully made or - * an error occurs. - * - * The socket is automatically opened if it is not already open. If the - * connect fails, and the socket was automatically opened, the socket is - * not returned to the closed state. - * - * @param peer_endpoint The remote endpoint to which the socket will be - * connected. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * asio::ip::tcp::endpoint endpoint( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * asio::error_code ec; - * socket.connect(endpoint, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code connect(const endpoint_type& peer_endpoint, - asio::error_code& ec) - { - if (!is_open()) - { - if (this->get_service().open(this->get_implementation(), - peer_endpoint.protocol(), ec)) - { - return ec; - } - } - - return this->get_service().connect( - this->get_implementation(), peer_endpoint, ec); - } - - /// Start an asynchronous connect. - /** - * This function is used to asynchronously connect a socket to the specified - * remote endpoint. The function call always returns immediately. - * - * The socket is automatically opened if it is not already open. If the - * connect fails, and the socket was automatically opened, the socket is - * not returned to the closed state. - * - * @param peer_endpoint The remote endpoint to which the socket will be - * connected. Copies will be made of the endpoint object as required. - * - * @param handler The handler to be called when the connection operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error // Result of operation - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * @code - * void connect_handler(const asio::error_code& error) - * { - * if (!error) - * { - * // Connect succeeded. - * } - * } - * - * ... - * - * asio::ip::tcp::socket socket(io_service); - * asio::ip::tcp::endpoint endpoint( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * socket.async_connect(endpoint, connect_handler); - * @endcode - */ - template - ASIO_INITFN_RESULT_TYPE(ConnectHandler, - void (asio::error_code)) - async_connect(const endpoint_type& peer_endpoint, - ASIO_MOVE_ARG(ConnectHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ConnectHandler. - ASIO_CONNECT_HANDLER_CHECK(ConnectHandler, handler) type_check; - - if (!is_open()) - { - asio::error_code ec; - const protocol_type protocol = peer_endpoint.protocol(); - if (this->get_service().open(this->get_implementation(), protocol, ec)) - { - detail::async_result_init< - ConnectHandler, void (asio::error_code)> init( - ASIO_MOVE_CAST(ConnectHandler)(handler)); - - this->get_io_service().post( - asio::detail::bind_handler( - ASIO_MOVE_CAST(ASIO_HANDLER_TYPE( - ConnectHandler, void (asio::error_code)))( - init.handler), ec)); - - return init.result.get(); - } - } - - return this->get_service().async_connect(this->get_implementation(), - peer_endpoint, ASIO_MOVE_CAST(ConnectHandler)(handler)); - } - - /// Set an option on the socket. - /** - * This function is used to set an option on the socket. - * - * @param option The new option value to be set on the socket. - * - * @throws asio::system_error Thrown on failure. - * - * @sa SettableSocketOption @n - * asio::socket_base::broadcast @n - * asio::socket_base::do_not_route @n - * asio::socket_base::keep_alive @n - * asio::socket_base::linger @n - * asio::socket_base::receive_buffer_size @n - * asio::socket_base::receive_low_watermark @n - * asio::socket_base::reuse_address @n - * asio::socket_base::send_buffer_size @n - * asio::socket_base::send_low_watermark @n - * asio::ip::multicast::join_group @n - * asio::ip::multicast::leave_group @n - * asio::ip::multicast::enable_loopback @n - * asio::ip::multicast::outbound_interface @n - * asio::ip::multicast::hops @n - * asio::ip::tcp::no_delay - * - * @par Example - * Setting the IPPROTO_TCP/TCP_NODELAY option: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::no_delay option(true); - * socket.set_option(option); - * @endcode - */ - template - void set_option(const SettableSocketOption& option) - { - asio::error_code ec; - this->get_service().set_option(this->get_implementation(), option, ec); - asio::detail::throw_error(ec, "set_option"); - } - - /// Set an option on the socket. - /** - * This function is used to set an option on the socket. - * - * @param option The new option value to be set on the socket. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa SettableSocketOption @n - * asio::socket_base::broadcast @n - * asio::socket_base::do_not_route @n - * asio::socket_base::keep_alive @n - * asio::socket_base::linger @n - * asio::socket_base::receive_buffer_size @n - * asio::socket_base::receive_low_watermark @n - * asio::socket_base::reuse_address @n - * asio::socket_base::send_buffer_size @n - * asio::socket_base::send_low_watermark @n - * asio::ip::multicast::join_group @n - * asio::ip::multicast::leave_group @n - * asio::ip::multicast::enable_loopback @n - * asio::ip::multicast::outbound_interface @n - * asio::ip::multicast::hops @n - * asio::ip::tcp::no_delay - * - * @par Example - * Setting the IPPROTO_TCP/TCP_NODELAY option: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::no_delay option(true); - * asio::error_code ec; - * socket.set_option(option, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - template - asio::error_code set_option(const SettableSocketOption& option, - asio::error_code& ec) - { - return this->get_service().set_option( - this->get_implementation(), option, ec); - } - - /// Get an option from the socket. - /** - * This function is used to get the current value of an option on the socket. - * - * @param option The option value to be obtained from the socket. - * - * @throws asio::system_error Thrown on failure. - * - * @sa GettableSocketOption @n - * asio::socket_base::broadcast @n - * asio::socket_base::do_not_route @n - * asio::socket_base::keep_alive @n - * asio::socket_base::linger @n - * asio::socket_base::receive_buffer_size @n - * asio::socket_base::receive_low_watermark @n - * asio::socket_base::reuse_address @n - * asio::socket_base::send_buffer_size @n - * asio::socket_base::send_low_watermark @n - * asio::ip::multicast::join_group @n - * asio::ip::multicast::leave_group @n - * asio::ip::multicast::enable_loopback @n - * asio::ip::multicast::outbound_interface @n - * asio::ip::multicast::hops @n - * asio::ip::tcp::no_delay - * - * @par Example - * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::socket::keep_alive option; - * socket.get_option(option); - * bool is_set = option.value(); - * @endcode - */ - template - void get_option(GettableSocketOption& option) const - { - asio::error_code ec; - this->get_service().get_option(this->get_implementation(), option, ec); - asio::detail::throw_error(ec, "get_option"); - } - - /// Get an option from the socket. - /** - * This function is used to get the current value of an option on the socket. - * - * @param option The option value to be obtained from the socket. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa GettableSocketOption @n - * asio::socket_base::broadcast @n - * asio::socket_base::do_not_route @n - * asio::socket_base::keep_alive @n - * asio::socket_base::linger @n - * asio::socket_base::receive_buffer_size @n - * asio::socket_base::receive_low_watermark @n - * asio::socket_base::reuse_address @n - * asio::socket_base::send_buffer_size @n - * asio::socket_base::send_low_watermark @n - * asio::ip::multicast::join_group @n - * asio::ip::multicast::leave_group @n - * asio::ip::multicast::enable_loopback @n - * asio::ip::multicast::outbound_interface @n - * asio::ip::multicast::hops @n - * asio::ip::tcp::no_delay - * - * @par Example - * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::socket::keep_alive option; - * asio::error_code ec; - * socket.get_option(option, ec); - * if (ec) - * { - * // An error occurred. - * } - * bool is_set = option.value(); - * @endcode - */ - template - asio::error_code get_option(GettableSocketOption& option, - asio::error_code& ec) const - { - return this->get_service().get_option( - this->get_implementation(), option, ec); - } - - /// Perform an IO control command on the socket. - /** - * This function is used to execute an IO control command on the socket. - * - * @param command The IO control command to be performed on the socket. - * - * @throws asio::system_error Thrown on failure. - * - * @sa IoControlCommand @n - * asio::socket_base::bytes_readable @n - * asio::socket_base::non_blocking_io - * - * @par Example - * Getting the number of bytes ready to read: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::socket::bytes_readable command; - * socket.io_control(command); - * std::size_t bytes_readable = command.get(); - * @endcode - */ - template - void io_control(IoControlCommand& command) - { - asio::error_code ec; - this->get_service().io_control(this->get_implementation(), command, ec); - asio::detail::throw_error(ec, "io_control"); - } - - /// Perform an IO control command on the socket. - /** - * This function is used to execute an IO control command on the socket. - * - * @param command The IO control command to be performed on the socket. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa IoControlCommand @n - * asio::socket_base::bytes_readable @n - * asio::socket_base::non_blocking_io - * - * @par Example - * Getting the number of bytes ready to read: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::socket::bytes_readable command; - * asio::error_code ec; - * socket.io_control(command, ec); - * if (ec) - * { - * // An error occurred. - * } - * std::size_t bytes_readable = command.get(); - * @endcode - */ - template - asio::error_code io_control(IoControlCommand& command, - asio::error_code& ec) - { - return this->get_service().io_control( - this->get_implementation(), command, ec); - } - - /// Gets the non-blocking mode of the socket. - /** - * @returns @c true if the socket's synchronous operations will fail with - * asio::error::would_block if they are unable to perform the requested - * operation immediately. If @c false, synchronous operations will block - * until complete. - * - * @note The non-blocking mode has no effect on the behaviour of asynchronous - * operations. Asynchronous operations will never fail with the error - * asio::error::would_block. - */ - bool non_blocking() const - { - return this->get_service().non_blocking(this->get_implementation()); - } - - /// Sets the non-blocking mode of the socket. - /** - * @param mode If @c true, the socket's synchronous operations will fail with - * asio::error::would_block if they are unable to perform the requested - * operation immediately. If @c false, synchronous operations will block - * until complete. - * - * @throws asio::system_error Thrown on failure. - * - * @note The non-blocking mode has no effect on the behaviour of asynchronous - * operations. Asynchronous operations will never fail with the error - * asio::error::would_block. - */ - void non_blocking(bool mode) - { - asio::error_code ec; - this->get_service().non_blocking(this->get_implementation(), mode, ec); - asio::detail::throw_error(ec, "non_blocking"); - } - - /// Sets the non-blocking mode of the socket. - /** - * @param mode If @c true, the socket's synchronous operations will fail with - * asio::error::would_block if they are unable to perform the requested - * operation immediately. If @c false, synchronous operations will block - * until complete. - * - * @param ec Set to indicate what error occurred, if any. - * - * @note The non-blocking mode has no effect on the behaviour of asynchronous - * operations. Asynchronous operations will never fail with the error - * asio::error::would_block. - */ - asio::error_code non_blocking( - bool mode, asio::error_code& ec) - { - return this->get_service().non_blocking( - this->get_implementation(), mode, ec); - } - - /// Gets the non-blocking mode of the native socket implementation. - /** - * This function is used to retrieve the non-blocking mode of the underlying - * native socket. This mode has no effect on the behaviour of the socket - * object's synchronous operations. - * - * @returns @c true if the underlying socket is in non-blocking mode and - * direct system calls may fail with asio::error::would_block (or the - * equivalent system error). - * - * @note The current non-blocking mode is cached by the socket object. - * Consequently, the return value may be incorrect if the non-blocking mode - * was set directly on the native socket. - * - * @par Example - * This function is intended to allow the encapsulation of arbitrary - * non-blocking system calls as asynchronous operations, in a way that is - * transparent to the user of the socket object. The following example - * illustrates how Linux's @c sendfile system call might be encapsulated: - * @code template - * struct sendfile_op - * { - * tcp::socket& sock_; - * int fd_; - * Handler handler_; - * off_t offset_; - * std::size_t total_bytes_transferred_; - * - * // Function call operator meeting WriteHandler requirements. - * // Used as the handler for the async_write_some operation. - * void operator()(asio::error_code ec, std::size_t) - * { - * // Put the underlying socket into non-blocking mode. - * if (!ec) - * if (!sock_.native_non_blocking()) - * sock_.native_non_blocking(true, ec); - * - * if (!ec) - * { - * for (;;) - * { - * // Try the system call. - * errno = 0; - * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); - * ec = asio::error_code(n < 0 ? errno : 0, - * asio::error::get_system_category()); - * total_bytes_transferred_ += ec ? 0 : n; - * - * // Retry operation immediately if interrupted by signal. - * if (ec == asio::error::interrupted) - * continue; - * - * // Check if we need to run the operation again. - * if (ec == asio::error::would_block - * || ec == asio::error::try_again) - * { - * // We have to wait for the socket to become ready again. - * sock_.async_write_some(asio::null_buffers(), *this); - * return; - * } - * - * if (ec || n == 0) - * { - * // An error occurred, or we have reached the end of the file. - * // Either way we must exit the loop so we can call the handler. - * break; - * } - * - * // Loop around to try calling sendfile again. - * } - * } - * - * // Pass result back to user's handler. - * handler_(ec, total_bytes_transferred_); - * } - * }; - * - * template - * void async_sendfile(tcp::socket& sock, int fd, Handler h) - * { - * sendfile_op op = { sock, fd, h, 0, 0 }; - * sock.async_write_some(asio::null_buffers(), op); - * } @endcode - */ - bool native_non_blocking() const - { - return this->get_service().native_non_blocking(this->get_implementation()); - } - - /// Sets the non-blocking mode of the native socket implementation. - /** - * This function is used to modify the non-blocking mode of the underlying - * native socket. It has no effect on the behaviour of the socket object's - * synchronous operations. - * - * @param mode If @c true, the underlying socket is put into non-blocking - * mode and direct system calls may fail with asio::error::would_block - * (or the equivalent system error). - * - * @throws asio::system_error Thrown on failure. If the @c mode is - * @c false, but the current value of @c non_blocking() is @c true, this - * function fails with asio::error::invalid_argument, as the - * combination does not make sense. - * - * @par Example - * This function is intended to allow the encapsulation of arbitrary - * non-blocking system calls as asynchronous operations, in a way that is - * transparent to the user of the socket object. The following example - * illustrates how Linux's @c sendfile system call might be encapsulated: - * @code template - * struct sendfile_op - * { - * tcp::socket& sock_; - * int fd_; - * Handler handler_; - * off_t offset_; - * std::size_t total_bytes_transferred_; - * - * // Function call operator meeting WriteHandler requirements. - * // Used as the handler for the async_write_some operation. - * void operator()(asio::error_code ec, std::size_t) - * { - * // Put the underlying socket into non-blocking mode. - * if (!ec) - * if (!sock_.native_non_blocking()) - * sock_.native_non_blocking(true, ec); - * - * if (!ec) - * { - * for (;;) - * { - * // Try the system call. - * errno = 0; - * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); - * ec = asio::error_code(n < 0 ? errno : 0, - * asio::error::get_system_category()); - * total_bytes_transferred_ += ec ? 0 : n; - * - * // Retry operation immediately if interrupted by signal. - * if (ec == asio::error::interrupted) - * continue; - * - * // Check if we need to run the operation again. - * if (ec == asio::error::would_block - * || ec == asio::error::try_again) - * { - * // We have to wait for the socket to become ready again. - * sock_.async_write_some(asio::null_buffers(), *this); - * return; - * } - * - * if (ec || n == 0) - * { - * // An error occurred, or we have reached the end of the file. - * // Either way we must exit the loop so we can call the handler. - * break; - * } - * - * // Loop around to try calling sendfile again. - * } - * } - * - * // Pass result back to user's handler. - * handler_(ec, total_bytes_transferred_); - * } - * }; - * - * template - * void async_sendfile(tcp::socket& sock, int fd, Handler h) - * { - * sendfile_op op = { sock, fd, h, 0, 0 }; - * sock.async_write_some(asio::null_buffers(), op); - * } @endcode - */ - void native_non_blocking(bool mode) - { - asio::error_code ec; - this->get_service().native_non_blocking( - this->get_implementation(), mode, ec); - asio::detail::throw_error(ec, "native_non_blocking"); - } - - /// Sets the non-blocking mode of the native socket implementation. - /** - * This function is used to modify the non-blocking mode of the underlying - * native socket. It has no effect on the behaviour of the socket object's - * synchronous operations. - * - * @param mode If @c true, the underlying socket is put into non-blocking - * mode and direct system calls may fail with asio::error::would_block - * (or the equivalent system error). - * - * @param ec Set to indicate what error occurred, if any. If the @c mode is - * @c false, but the current value of @c non_blocking() is @c true, this - * function fails with asio::error::invalid_argument, as the - * combination does not make sense. - * - * @par Example - * This function is intended to allow the encapsulation of arbitrary - * non-blocking system calls as asynchronous operations, in a way that is - * transparent to the user of the socket object. The following example - * illustrates how Linux's @c sendfile system call might be encapsulated: - * @code template - * struct sendfile_op - * { - * tcp::socket& sock_; - * int fd_; - * Handler handler_; - * off_t offset_; - * std::size_t total_bytes_transferred_; - * - * // Function call operator meeting WriteHandler requirements. - * // Used as the handler for the async_write_some operation. - * void operator()(asio::error_code ec, std::size_t) - * { - * // Put the underlying socket into non-blocking mode. - * if (!ec) - * if (!sock_.native_non_blocking()) - * sock_.native_non_blocking(true, ec); - * - * if (!ec) - * { - * for (;;) - * { - * // Try the system call. - * errno = 0; - * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); - * ec = asio::error_code(n < 0 ? errno : 0, - * asio::error::get_system_category()); - * total_bytes_transferred_ += ec ? 0 : n; - * - * // Retry operation immediately if interrupted by signal. - * if (ec == asio::error::interrupted) - * continue; - * - * // Check if we need to run the operation again. - * if (ec == asio::error::would_block - * || ec == asio::error::try_again) - * { - * // We have to wait for the socket to become ready again. - * sock_.async_write_some(asio::null_buffers(), *this); - * return; - * } - * - * if (ec || n == 0) - * { - * // An error occurred, or we have reached the end of the file. - * // Either way we must exit the loop so we can call the handler. - * break; - * } - * - * // Loop around to try calling sendfile again. - * } - * } - * - * // Pass result back to user's handler. - * handler_(ec, total_bytes_transferred_); - * } - * }; - * - * template - * void async_sendfile(tcp::socket& sock, int fd, Handler h) - * { - * sendfile_op op = { sock, fd, h, 0, 0 }; - * sock.async_write_some(asio::null_buffers(), op); - * } @endcode - */ - asio::error_code native_non_blocking( - bool mode, asio::error_code& ec) - { - return this->get_service().native_non_blocking( - this->get_implementation(), mode, ec); - } - - /// Get the local endpoint of the socket. - /** - * This function is used to obtain the locally bound endpoint of the socket. - * - * @returns An object that represents the local endpoint of the socket. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::endpoint endpoint = socket.local_endpoint(); - * @endcode - */ - endpoint_type local_endpoint() const - { - asio::error_code ec; - endpoint_type ep = this->get_service().local_endpoint( - this->get_implementation(), ec); - asio::detail::throw_error(ec, "local_endpoint"); - return ep; - } - - /// Get the local endpoint of the socket. - /** - * This function is used to obtain the locally bound endpoint of the socket. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns An object that represents the local endpoint of the socket. - * Returns a default-constructed endpoint object if an error occurred. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::error_code ec; - * asio::ip::tcp::endpoint endpoint = socket.local_endpoint(ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - endpoint_type local_endpoint(asio::error_code& ec) const - { - return this->get_service().local_endpoint(this->get_implementation(), ec); - } - - /// Get the remote endpoint of the socket. - /** - * This function is used to obtain the remote endpoint of the socket. - * - * @returns An object that represents the remote endpoint of the socket. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(); - * @endcode - */ - endpoint_type remote_endpoint() const - { - asio::error_code ec; - endpoint_type ep = this->get_service().remote_endpoint( - this->get_implementation(), ec); - asio::detail::throw_error(ec, "remote_endpoint"); - return ep; - } - - /// Get the remote endpoint of the socket. - /** - * This function is used to obtain the remote endpoint of the socket. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns An object that represents the remote endpoint of the socket. - * Returns a default-constructed endpoint object if an error occurred. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::error_code ec; - * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - endpoint_type remote_endpoint(asio::error_code& ec) const - { - return this->get_service().remote_endpoint(this->get_implementation(), ec); - } - - /// Disable sends or receives on the socket. - /** - * This function is used to disable send operations, receive operations, or - * both. - * - * @param what Determines what types of operation will no longer be allowed. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * Shutting down the send side of the socket: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * socket.shutdown(asio::ip::tcp::socket::shutdown_send); - * @endcode - */ - void shutdown(shutdown_type what) - { - asio::error_code ec; - this->get_service().shutdown(this->get_implementation(), what, ec); - asio::detail::throw_error(ec, "shutdown"); - } - - /// Disable sends or receives on the socket. - /** - * This function is used to disable send operations, receive operations, or - * both. - * - * @param what Determines what types of operation will no longer be allowed. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * Shutting down the send side of the socket: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::error_code ec; - * socket.shutdown(asio::ip::tcp::socket::shutdown_send, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code shutdown(shutdown_type what, - asio::error_code& ec) - { - return this->get_service().shutdown(this->get_implementation(), what, ec); - } - -protected: - /// Protected destructor to prevent deletion through this type. - ~basic_socket() - { - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_SOCKET_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_acceptor.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_acceptor.hpp deleted file mode 100644 index f69f483250f27..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_acceptor.hpp +++ /dev/null @@ -1,1136 +0,0 @@ -// -// basic_socket_acceptor.hpp -// ~~~~~~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_SOCKET_ACCEPTOR_HPP -#define ASIO_BASIC_SOCKET_ACCEPTOR_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include "asio/basic_io_object.hpp" -#include "asio/basic_socket.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/detail/type_traits.hpp" -#include "asio/error.hpp" -#include "asio/socket_acceptor_service.hpp" -#include "asio/socket_base.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides the ability to accept new connections. -/** - * The basic_socket_acceptor class template is used for accepting new socket - * connections. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - * - * @par Example - * Opening a socket acceptor with the SO_REUSEADDR option enabled: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), port); - * acceptor.open(endpoint.protocol()); - * acceptor.set_option(asio::ip::tcp::acceptor::reuse_address(true)); - * acceptor.bind(endpoint); - * acceptor.listen(); - * @endcode - */ -template > -class basic_socket_acceptor - : public basic_io_object, - public socket_base -{ -public: - /// (Deprecated: Use native_handle_type.) The native representation of an - /// acceptor. - typedef typename SocketAcceptorService::native_handle_type native_type; - - /// The native representation of an acceptor. - typedef typename SocketAcceptorService::native_handle_type native_handle_type; - - /// The protocol type. - typedef Protocol protocol_type; - - /// The endpoint type. - typedef typename Protocol::endpoint endpoint_type; - - /// Construct an acceptor without opening it. - /** - * This constructor creates an acceptor without opening it to listen for new - * connections. The open() function must be called before the acceptor can - * accept new socket connections. - * - * @param io_service The io_service object that the acceptor will use to - * dispatch handlers for any asynchronous operations performed on the - * acceptor. - */ - explicit basic_socket_acceptor(asio::io_service& io_service) - : basic_io_object(io_service) - { - } - - /// Construct an open acceptor. - /** - * This constructor creates an acceptor and automatically opens it. - * - * @param io_service The io_service object that the acceptor will use to - * dispatch handlers for any asynchronous operations performed on the - * acceptor. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @throws asio::system_error Thrown on failure. - */ - basic_socket_acceptor(asio::io_service& io_service, - const protocol_type& protocol) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), protocol, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Construct an acceptor opened on the given endpoint. - /** - * This constructor creates an acceptor and automatically opens it to listen - * for new connections on the specified endpoint. - * - * @param io_service The io_service object that the acceptor will use to - * dispatch handlers for any asynchronous operations performed on the - * acceptor. - * - * @param endpoint An endpoint on the local machine on which the acceptor - * will listen for new connections. - * - * @param reuse_addr Whether the constructor should set the socket option - * socket_base::reuse_address. - * - * @throws asio::system_error Thrown on failure. - * - * @note This constructor is equivalent to the following code: - * @code - * basic_socket_acceptor acceptor(io_service); - * acceptor.open(endpoint.protocol()); - * if (reuse_addr) - * acceptor.set_option(socket_base::reuse_address(true)); - * acceptor.bind(endpoint); - * acceptor.listen(listen_backlog); - * @endcode - */ - basic_socket_acceptor(asio::io_service& io_service, - const endpoint_type& endpoint, bool reuse_addr = true) - : basic_io_object(io_service) - { - asio::error_code ec; - const protocol_type protocol = endpoint.protocol(); - this->get_service().open(this->get_implementation(), protocol, ec); - asio::detail::throw_error(ec, "open"); - if (reuse_addr) - { - this->get_service().set_option(this->get_implementation(), - socket_base::reuse_address(true), ec); - asio::detail::throw_error(ec, "set_option"); - } - this->get_service().bind(this->get_implementation(), endpoint, ec); - asio::detail::throw_error(ec, "bind"); - this->get_service().listen(this->get_implementation(), - socket_base::max_connections, ec); - asio::detail::throw_error(ec, "listen"); - } - - /// Construct a basic_socket_acceptor on an existing native acceptor. - /** - * This constructor creates an acceptor object to hold an existing native - * acceptor. - * - * @param io_service The io_service object that the acceptor will use to - * dispatch handlers for any asynchronous operations performed on the - * acceptor. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @param native_acceptor A native acceptor. - * - * @throws asio::system_error Thrown on failure. - */ - basic_socket_acceptor(asio::io_service& io_service, - const protocol_type& protocol, const native_handle_type& native_acceptor) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().assign(this->get_implementation(), - protocol, native_acceptor, ec); - asio::detail::throw_error(ec, "assign"); - } - -#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_socket_acceptor from another. - /** - * This constructor moves an acceptor from one object to another. - * - * @param other The other basic_socket_acceptor object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket_acceptor(io_service&) constructor. - */ - basic_socket_acceptor(basic_socket_acceptor&& other) - : basic_io_object( - ASIO_MOVE_CAST(basic_socket_acceptor)(other)) - { - } - - /// Move-assign a basic_socket_acceptor from another. - /** - * This assignment operator moves an acceptor from one object to another. - * - * @param other The other basic_socket_acceptor object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket_acceptor(io_service&) constructor. - */ - basic_socket_acceptor& operator=(basic_socket_acceptor&& other) - { - basic_io_object::operator=( - ASIO_MOVE_CAST(basic_socket_acceptor)(other)); - return *this; - } - - // All socket acceptors have access to each other's implementations. - template - friend class basic_socket_acceptor; - - /// Move-construct a basic_socket_acceptor from an acceptor of another - /// protocol type. - /** - * This constructor moves an acceptor from one object to another. - * - * @param other The other basic_socket_acceptor object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket(io_service&) constructor. - */ - template - basic_socket_acceptor( - basic_socket_acceptor&& other, - typename enable_if::value>::type* = 0) - : basic_io_object(other.get_io_service()) - { - this->get_service().template converting_move_construct( - this->get_implementation(), other.get_implementation()); - } - - /// Move-assign a basic_socket_acceptor from an acceptor of another protocol - /// type. - /** - * This assignment operator moves an acceptor from one object to another. - * - * @param other The other basic_socket_acceptor object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket(io_service&) constructor. - */ - template - typename enable_if::value, - basic_socket_acceptor>::type& operator=( - basic_socket_acceptor&& other) - { - basic_socket_acceptor tmp(ASIO_MOVE_CAST2(basic_socket_acceptor< - Protocol1, SocketAcceptorService1>)(other)); - basic_io_object::operator=( - ASIO_MOVE_CAST(basic_socket_acceptor)(tmp)); - return *this; - } -#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - - /// Open the acceptor using the specified protocol. - /** - * This function opens the socket acceptor so that it will use the specified - * protocol. - * - * @param protocol An object specifying which protocol is to be used. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * acceptor.open(asio::ip::tcp::v4()); - * @endcode - */ - void open(const protocol_type& protocol = protocol_type()) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), protocol, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Open the acceptor using the specified protocol. - /** - * This function opens the socket acceptor so that it will use the specified - * protocol. - * - * @param protocol An object specifying which protocol is to be used. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * asio::error_code ec; - * acceptor.open(asio::ip::tcp::v4(), ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code open(const protocol_type& protocol, - asio::error_code& ec) - { - return this->get_service().open(this->get_implementation(), protocol, ec); - } - - /// Assigns an existing native acceptor to the acceptor. - /* - * This function opens the acceptor to hold an existing native acceptor. - * - * @param protocol An object specifying which protocol is to be used. - * - * @param native_acceptor A native acceptor. - * - * @throws asio::system_error Thrown on failure. - */ - void assign(const protocol_type& protocol, - const native_handle_type& native_acceptor) - { - asio::error_code ec; - this->get_service().assign(this->get_implementation(), - protocol, native_acceptor, ec); - asio::detail::throw_error(ec, "assign"); - } - - /// Assigns an existing native acceptor to the acceptor. - /* - * This function opens the acceptor to hold an existing native acceptor. - * - * @param protocol An object specifying which protocol is to be used. - * - * @param native_acceptor A native acceptor. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code assign(const protocol_type& protocol, - const native_handle_type& native_acceptor, asio::error_code& ec) - { - return this->get_service().assign(this->get_implementation(), - protocol, native_acceptor, ec); - } - - /// Determine whether the acceptor is open. - bool is_open() const - { - return this->get_service().is_open(this->get_implementation()); - } - - /// Bind the acceptor to the given local endpoint. - /** - * This function binds the socket acceptor to the specified endpoint on the - * local machine. - * - * @param endpoint An endpoint on the local machine to which the socket - * acceptor will be bound. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), 12345); - * acceptor.open(endpoint.protocol()); - * acceptor.bind(endpoint); - * @endcode - */ - void bind(const endpoint_type& endpoint) - { - asio::error_code ec; - this->get_service().bind(this->get_implementation(), endpoint, ec); - asio::detail::throw_error(ec, "bind"); - } - - /// Bind the acceptor to the given local endpoint. - /** - * This function binds the socket acceptor to the specified endpoint on the - * local machine. - * - * @param endpoint An endpoint on the local machine to which the socket - * acceptor will be bound. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), 12345); - * acceptor.open(endpoint.protocol()); - * asio::error_code ec; - * acceptor.bind(endpoint, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code bind(const endpoint_type& endpoint, - asio::error_code& ec) - { - return this->get_service().bind(this->get_implementation(), endpoint, ec); - } - - /// Place the acceptor into the state where it will listen for new - /// connections. - /** - * This function puts the socket acceptor into the state where it may accept - * new connections. - * - * @param backlog The maximum length of the queue of pending connections. - * - * @throws asio::system_error Thrown on failure. - */ - void listen(int backlog = socket_base::max_connections) - { - asio::error_code ec; - this->get_service().listen(this->get_implementation(), backlog, ec); - asio::detail::throw_error(ec, "listen"); - } - - /// Place the acceptor into the state where it will listen for new - /// connections. - /** - * This function puts the socket acceptor into the state where it may accept - * new connections. - * - * @param backlog The maximum length of the queue of pending connections. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::error_code ec; - * acceptor.listen(asio::socket_base::max_connections, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code listen(int backlog, asio::error_code& ec) - { - return this->get_service().listen(this->get_implementation(), backlog, ec); - } - - /// Close the acceptor. - /** - * This function is used to close the acceptor. Any asynchronous accept - * operations will be cancelled immediately. - * - * A subsequent call to open() is required before the acceptor can again be - * used to again perform socket accept operations. - * - * @throws asio::system_error Thrown on failure. - */ - void close() - { - asio::error_code ec; - this->get_service().close(this->get_implementation(), ec); - asio::detail::throw_error(ec, "close"); - } - - /// Close the acceptor. - /** - * This function is used to close the acceptor. Any asynchronous accept - * operations will be cancelled immediately. - * - * A subsequent call to open() is required before the acceptor can again be - * used to again perform socket accept operations. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::error_code ec; - * acceptor.close(ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code close(asio::error_code& ec) - { - return this->get_service().close(this->get_implementation(), ec); - } - - /// (Deprecated: Use native_handle().) Get the native acceptor representation. - /** - * This function may be used to obtain the underlying representation of the - * acceptor. This is intended to allow access to native acceptor functionality - * that is not otherwise provided. - */ - native_type native() - { - return this->get_service().native_handle(this->get_implementation()); - } - - /// Get the native acceptor representation. - /** - * This function may be used to obtain the underlying representation of the - * acceptor. This is intended to allow access to native acceptor functionality - * that is not otherwise provided. - */ - native_handle_type native_handle() - { - return this->get_service().native_handle(this->get_implementation()); - } - - /// Cancel all asynchronous operations associated with the acceptor. - /** - * This function causes all outstanding asynchronous connect, send and receive - * operations to finish immediately, and the handlers for cancelled operations - * will be passed the asio::error::operation_aborted error. - * - * @throws asio::system_error Thrown on failure. - */ - void cancel() - { - asio::error_code ec; - this->get_service().cancel(this->get_implementation(), ec); - asio::detail::throw_error(ec, "cancel"); - } - - /// Cancel all asynchronous operations associated with the acceptor. - /** - * This function causes all outstanding asynchronous connect, send and receive - * operations to finish immediately, and the handlers for cancelled operations - * will be passed the asio::error::operation_aborted error. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code cancel(asio::error_code& ec) - { - return this->get_service().cancel(this->get_implementation(), ec); - } - - /// Set an option on the acceptor. - /** - * This function is used to set an option on the acceptor. - * - * @param option The new option value to be set on the acceptor. - * - * @throws asio::system_error Thrown on failure. - * - * @sa SettableSocketOption @n - * asio::socket_base::reuse_address - * asio::socket_base::enable_connection_aborted - * - * @par Example - * Setting the SOL_SOCKET/SO_REUSEADDR option: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::acceptor::reuse_address option(true); - * acceptor.set_option(option); - * @endcode - */ - template - void set_option(const SettableSocketOption& option) - { - asio::error_code ec; - this->get_service().set_option(this->get_implementation(), option, ec); - asio::detail::throw_error(ec, "set_option"); - } - - /// Set an option on the acceptor. - /** - * This function is used to set an option on the acceptor. - * - * @param option The new option value to be set on the acceptor. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa SettableSocketOption @n - * asio::socket_base::reuse_address - * asio::socket_base::enable_connection_aborted - * - * @par Example - * Setting the SOL_SOCKET/SO_REUSEADDR option: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::acceptor::reuse_address option(true); - * asio::error_code ec; - * acceptor.set_option(option, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - template - asio::error_code set_option(const SettableSocketOption& option, - asio::error_code& ec) - { - return this->get_service().set_option( - this->get_implementation(), option, ec); - } - - /// Get an option from the acceptor. - /** - * This function is used to get the current value of an option on the - * acceptor. - * - * @param option The option value to be obtained from the acceptor. - * - * @throws asio::system_error Thrown on failure. - * - * @sa GettableSocketOption @n - * asio::socket_base::reuse_address - * - * @par Example - * Getting the value of the SOL_SOCKET/SO_REUSEADDR option: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::acceptor::reuse_address option; - * acceptor.get_option(option); - * bool is_set = option.get(); - * @endcode - */ - template - void get_option(GettableSocketOption& option) - { - asio::error_code ec; - this->get_service().get_option(this->get_implementation(), option, ec); - asio::detail::throw_error(ec, "get_option"); - } - - /// Get an option from the acceptor. - /** - * This function is used to get the current value of an option on the - * acceptor. - * - * @param option The option value to be obtained from the acceptor. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa GettableSocketOption @n - * asio::socket_base::reuse_address - * - * @par Example - * Getting the value of the SOL_SOCKET/SO_REUSEADDR option: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::acceptor::reuse_address option; - * asio::error_code ec; - * acceptor.get_option(option, ec); - * if (ec) - * { - * // An error occurred. - * } - * bool is_set = option.get(); - * @endcode - */ - template - asio::error_code get_option(GettableSocketOption& option, - asio::error_code& ec) - { - return this->get_service().get_option( - this->get_implementation(), option, ec); - } - - /// Perform an IO control command on the acceptor. - /** - * This function is used to execute an IO control command on the acceptor. - * - * @param command The IO control command to be performed on the acceptor. - * - * @throws asio::system_error Thrown on failure. - * - * @sa IoControlCommand @n - * asio::socket_base::non_blocking_io - * - * @par Example - * Getting the number of bytes ready to read: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::acceptor::non_blocking_io command(true); - * socket.io_control(command); - * @endcode - */ - template - void io_control(IoControlCommand& command) - { - asio::error_code ec; - this->get_service().io_control(this->get_implementation(), command, ec); - asio::detail::throw_error(ec, "io_control"); - } - - /// Perform an IO control command on the acceptor. - /** - * This function is used to execute an IO control command on the acceptor. - * - * @param command The IO control command to be performed on the acceptor. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa IoControlCommand @n - * asio::socket_base::non_blocking_io - * - * @par Example - * Getting the number of bytes ready to read: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::acceptor::non_blocking_io command(true); - * asio::error_code ec; - * socket.io_control(command, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - template - asio::error_code io_control(IoControlCommand& command, - asio::error_code& ec) - { - return this->get_service().io_control( - this->get_implementation(), command, ec); - } - - /// Gets the non-blocking mode of the acceptor. - /** - * @returns @c true if the acceptor's synchronous operations will fail with - * asio::error::would_block if they are unable to perform the requested - * operation immediately. If @c false, synchronous operations will block - * until complete. - * - * @note The non-blocking mode has no effect on the behaviour of asynchronous - * operations. Asynchronous operations will never fail with the error - * asio::error::would_block. - */ - bool non_blocking() const - { - return this->get_service().non_blocking(this->get_implementation()); - } - - /// Sets the non-blocking mode of the acceptor. - /** - * @param mode If @c true, the acceptor's synchronous operations will fail - * with asio::error::would_block if they are unable to perform the - * requested operation immediately. If @c false, synchronous operations will - * block until complete. - * - * @throws asio::system_error Thrown on failure. - * - * @note The non-blocking mode has no effect on the behaviour of asynchronous - * operations. Asynchronous operations will never fail with the error - * asio::error::would_block. - */ - void non_blocking(bool mode) - { - asio::error_code ec; - this->get_service().non_blocking(this->get_implementation(), mode, ec); - asio::detail::throw_error(ec, "non_blocking"); - } - - /// Sets the non-blocking mode of the acceptor. - /** - * @param mode If @c true, the acceptor's synchronous operations will fail - * with asio::error::would_block if they are unable to perform the - * requested operation immediately. If @c false, synchronous operations will - * block until complete. - * - * @param ec Set to indicate what error occurred, if any. - * - * @note The non-blocking mode has no effect on the behaviour of asynchronous - * operations. Asynchronous operations will never fail with the error - * asio::error::would_block. - */ - asio::error_code non_blocking( - bool mode, asio::error_code& ec) - { - return this->get_service().non_blocking( - this->get_implementation(), mode, ec); - } - - /// Gets the non-blocking mode of the native acceptor implementation. - /** - * This function is used to retrieve the non-blocking mode of the underlying - * native acceptor. This mode has no effect on the behaviour of the acceptor - * object's synchronous operations. - * - * @returns @c true if the underlying acceptor is in non-blocking mode and - * direct system calls may fail with asio::error::would_block (or the - * equivalent system error). - * - * @note The current non-blocking mode is cached by the acceptor object. - * Consequently, the return value may be incorrect if the non-blocking mode - * was set directly on the native acceptor. - */ - bool native_non_blocking() const - { - return this->get_service().native_non_blocking(this->get_implementation()); - } - - /// Sets the non-blocking mode of the native acceptor implementation. - /** - * This function is used to modify the non-blocking mode of the underlying - * native acceptor. It has no effect on the behaviour of the acceptor object's - * synchronous operations. - * - * @param mode If @c true, the underlying acceptor is put into non-blocking - * mode and direct system calls may fail with asio::error::would_block - * (or the equivalent system error). - * - * @throws asio::system_error Thrown on failure. If the @c mode is - * @c false, but the current value of @c non_blocking() is @c true, this - * function fails with asio::error::invalid_argument, as the - * combination does not make sense. - */ - void native_non_blocking(bool mode) - { - asio::error_code ec; - this->get_service().native_non_blocking( - this->get_implementation(), mode, ec); - asio::detail::throw_error(ec, "native_non_blocking"); - } - - /// Sets the non-blocking mode of the native acceptor implementation. - /** - * This function is used to modify the non-blocking mode of the underlying - * native acceptor. It has no effect on the behaviour of the acceptor object's - * synchronous operations. - * - * @param mode If @c true, the underlying acceptor is put into non-blocking - * mode and direct system calls may fail with asio::error::would_block - * (or the equivalent system error). - * - * @param ec Set to indicate what error occurred, if any. If the @c mode is - * @c false, but the current value of @c non_blocking() is @c true, this - * function fails with asio::error::invalid_argument, as the - * combination does not make sense. - */ - asio::error_code native_non_blocking( - bool mode, asio::error_code& ec) - { - return this->get_service().native_non_blocking( - this->get_implementation(), mode, ec); - } - - /// Get the local endpoint of the acceptor. - /** - * This function is used to obtain the locally bound endpoint of the acceptor. - * - * @returns An object that represents the local endpoint of the acceptor. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::endpoint endpoint = acceptor.local_endpoint(); - * @endcode - */ - endpoint_type local_endpoint() const - { - asio::error_code ec; - endpoint_type ep = this->get_service().local_endpoint( - this->get_implementation(), ec); - asio::detail::throw_error(ec, "local_endpoint"); - return ep; - } - - /// Get the local endpoint of the acceptor. - /** - * This function is used to obtain the locally bound endpoint of the acceptor. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns An object that represents the local endpoint of the acceptor. - * Returns a default-constructed endpoint object if an error occurred and the - * error handler did not throw an exception. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::error_code ec; - * asio::ip::tcp::endpoint endpoint = acceptor.local_endpoint(ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - endpoint_type local_endpoint(asio::error_code& ec) const - { - return this->get_service().local_endpoint(this->get_implementation(), ec); - } - - /// Accept a new connection. - /** - * This function is used to accept a new connection from a peer into the - * given socket. The function call will block until a new connection has been - * accepted successfully or an error occurs. - * - * @param peer The socket into which the new connection will be accepted. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::socket socket(io_service); - * acceptor.accept(socket); - * @endcode - */ - template - void accept(basic_socket& peer, - typename enable_if::value>::type* = 0) - { - asio::error_code ec; - this->get_service().accept(this->get_implementation(), - peer, static_cast(0), ec); - asio::detail::throw_error(ec, "accept"); - } - - /// Accept a new connection. - /** - * This function is used to accept a new connection from a peer into the - * given socket. The function call will block until a new connection has been - * accepted successfully or an error occurs. - * - * @param peer The socket into which the new connection will be accepted. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::soocket socket(io_service); - * asio::error_code ec; - * acceptor.accept(socket, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - template - asio::error_code accept( - basic_socket& peer, - asio::error_code& ec, - typename enable_if::value>::type* = 0) - { - return this->get_service().accept(this->get_implementation(), - peer, static_cast(0), ec); - } - - /// Start an asynchronous accept. - /** - * This function is used to asynchronously accept a new connection into a - * socket. The function call always returns immediately. - * - * @param peer The socket into which the new connection will be accepted. - * Ownership of the peer object is retained by the caller, which must - * guarantee that it is valid until the handler is called. - * - * @param handler The handler to be called when the accept operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error // Result of operation. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * @code - * void accept_handler(const asio::error_code& error) - * { - * if (!error) - * { - * // Accept succeeded. - * } - * } - * - * ... - * - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::socket socket(io_service); - * acceptor.async_accept(socket, accept_handler); - * @endcode - */ - template - ASIO_INITFN_RESULT_TYPE(AcceptHandler, - void (asio::error_code)) - async_accept(basic_socket& peer, - ASIO_MOVE_ARG(AcceptHandler) handler, - typename enable_if::value>::type* = 0) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a AcceptHandler. - ASIO_ACCEPT_HANDLER_CHECK(AcceptHandler, handler) type_check; - - return this->get_service().async_accept(this->get_implementation(), - peer, static_cast(0), - ASIO_MOVE_CAST(AcceptHandler)(handler)); - } - - /// Accept a new connection and obtain the endpoint of the peer - /** - * This function is used to accept a new connection from a peer into the - * given socket, and additionally provide the endpoint of the remote peer. - * The function call will block until a new connection has been accepted - * successfully or an error occurs. - * - * @param peer The socket into which the new connection will be accepted. - * - * @param peer_endpoint An endpoint object which will receive the endpoint of - * the remote peer. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::socket socket(io_service); - * asio::ip::tcp::endpoint endpoint; - * acceptor.accept(socket, endpoint); - * @endcode - */ - template - void accept(basic_socket& peer, - endpoint_type& peer_endpoint) - { - asio::error_code ec; - this->get_service().accept(this->get_implementation(), - peer, &peer_endpoint, ec); - asio::detail::throw_error(ec, "accept"); - } - - /// Accept a new connection and obtain the endpoint of the peer - /** - * This function is used to accept a new connection from a peer into the - * given socket, and additionally provide the endpoint of the remote peer. - * The function call will block until a new connection has been accepted - * successfully or an error occurs. - * - * @param peer The socket into which the new connection will be accepted. - * - * @param peer_endpoint An endpoint object which will receive the endpoint of - * the remote peer. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::socket socket(io_service); - * asio::ip::tcp::endpoint endpoint; - * asio::error_code ec; - * acceptor.accept(socket, endpoint, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - template - asio::error_code accept( - basic_socket& peer, - endpoint_type& peer_endpoint, asio::error_code& ec) - { - return this->get_service().accept( - this->get_implementation(), peer, &peer_endpoint, ec); - } - - /// Start an asynchronous accept. - /** - * This function is used to asynchronously accept a new connection into a - * socket, and additionally obtain the endpoint of the remote peer. The - * function call always returns immediately. - * - * @param peer The socket into which the new connection will be accepted. - * Ownership of the peer object is retained by the caller, which must - * guarantee that it is valid until the handler is called. - * - * @param peer_endpoint An endpoint object into which the endpoint of the - * remote peer will be written. Ownership of the peer_endpoint object is - * retained by the caller, which must guarantee that it is valid until the - * handler is called. - * - * @param handler The handler to be called when the accept operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error // Result of operation. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(AcceptHandler, - void (asio::error_code)) - async_accept(basic_socket& peer, - endpoint_type& peer_endpoint, ASIO_MOVE_ARG(AcceptHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a AcceptHandler. - ASIO_ACCEPT_HANDLER_CHECK(AcceptHandler, handler) type_check; - - return this->get_service().async_accept(this->get_implementation(), peer, - &peer_endpoint, ASIO_MOVE_CAST(AcceptHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_SOCKET_ACCEPTOR_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_iostream.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_iostream.hpp deleted file mode 100644 index 81754022131f6..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_iostream.hpp +++ /dev/null @@ -1,286 +0,0 @@ -// -// basic_socket_iostream.hpp -// ~~~~~~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_SOCKET_IOSTREAM_HPP -#define ASIO_BASIC_SOCKET_IOSTREAM_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" - -#if !defined(ASIO_NO_IOSTREAM) - -#include -#include -#include "asio/basic_socket_streambuf.hpp" -#include "asio/stream_socket_service.hpp" - -#if !defined(ASIO_HAS_VARIADIC_TEMPLATES) - -# include "asio/detail/variadic_templates.hpp" - -// A macro that should expand to: -// template -// explicit basic_socket_iostream(T1 x1, ..., Tn xn) -// : std::basic_iostream( -// &this->detail::socket_iostream_base< -// Protocol, StreamSocketService, Time, -// TimeTraits, TimerService>::streambuf_) -// { -// if (rdbuf()->connect(x1, ..., xn) == 0) -// this->setstate(std::ios_base::failbit); -// } -// This macro should only persist within this file. - -# define ASIO_PRIVATE_CTR_DEF(n) \ - template \ - explicit basic_socket_iostream(ASIO_VARIADIC_PARAMS(n)) \ - : std::basic_iostream( \ - &this->detail::socket_iostream_base< \ - Protocol, StreamSocketService, Time, \ - TimeTraits, TimerService>::streambuf_) \ - { \ - this->setf(std::ios_base::unitbuf); \ - if (rdbuf()->connect(ASIO_VARIADIC_ARGS(n)) == 0) \ - this->setstate(std::ios_base::failbit); \ - } \ - /**/ - -// A macro that should expand to: -// template -// void connect(T1 x1, ..., Tn xn) -// { -// if (rdbuf()->connect(x1, ..., xn) == 0) -// this->setstate(std::ios_base::failbit); -// } -// This macro should only persist within this file. - -# define ASIO_PRIVATE_CONNECT_DEF(n) \ - template \ - void connect(ASIO_VARIADIC_PARAMS(n)) \ - { \ - if (rdbuf()->connect(ASIO_VARIADIC_ARGS(n)) == 0) \ - this->setstate(std::ios_base::failbit); \ - } \ - /**/ - -#endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) - -#include "asio/detail/push_options.hpp" - -namespace asio { -namespace detail { - -// A separate base class is used to ensure that the streambuf is initialised -// prior to the basic_socket_iostream's basic_iostream base class. -template -class socket_iostream_base -{ -protected: - basic_socket_streambuf streambuf_; -}; - -} - -/// Iostream interface for a socket. -template , -#if defined(ASIO_HAS_BOOST_DATE_TIME) \ - || defined(GENERATING_DOCUMENTATION) - typename Time = boost::posix_time::ptime, - typename TimeTraits = asio::time_traits